summary refs log tree commit diff
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/acpi/video.c8
-rw-r--r--drivers/char/agp/intel-gtt.c62
-rw-r--r--drivers/gpu/drm/Kconfig19
-rw-r--r--drivers/gpu/drm/Makefile3
-rw-r--r--drivers/gpu/drm/ast/ast_drv.h1
-rw-r--r--drivers/gpu/drm/ast/ast_mode.c2
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_drv.h1
-rw-r--r--drivers/gpu/drm/drm_cache.c28
-rw-r--r--drivers/gpu/drm/drm_crtc.c139
-rw-r--r--drivers/gpu/drm/drm_drv.c22
-rw-r--r--drivers/gpu/drm/drm_edid.c208
-rw-r--r--drivers/gpu/drm/drm_edid_load.c29
-rw-r--r--drivers/gpu/drm/drm_edid_modes.h42
-rw-r--r--drivers/gpu/drm/drm_fb_cma_helper.c406
-rw-r--r--drivers/gpu/drm/drm_fb_helper.c10
-rw-r--r--drivers/gpu/drm/drm_gem_cma_helper.c251
-rw-r--r--drivers/gpu/drm/drm_irq.c2
-rw-r--r--drivers/gpu/drm/drm_vm.c11
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_connector.c4
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fbdev.c4
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_vidi.c13
-rw-r--r--drivers/gpu/drm/gma500/Makefile5
-rw-r--r--drivers/gpu/drm/gma500/backlight.c45
-rw-r--r--drivers/gpu/drm/gma500/cdv_device.c72
-rw-r--r--drivers/gpu/drm/gma500/cdv_intel_display.c236
-rw-r--r--drivers/gpu/drm/gma500/cdv_intel_dp.c1950
-rw-r--r--drivers/gpu/drm/gma500/cdv_intel_hdmi.c6
-rw-r--r--drivers/gpu/drm/gma500/cdv_intel_lvds.c12
-rw-r--r--drivers/gpu/drm/gma500/framebuffer.c7
-rw-r--r--drivers/gpu/drm/gma500/gem.c9
-rw-r--r--drivers/gpu/drm/gma500/gem_glue.c90
-rw-r--r--drivers/gpu/drm/gma500/gem_glue.h2
-rw-r--r--drivers/gpu/drm/gma500/intel_bios.c101
-rw-r--r--drivers/gpu/drm/gma500/intel_bios.h46
-rw-r--r--drivers/gpu/drm/gma500/mdfld_dsi_output.c13
-rw-r--r--drivers/gpu/drm/gma500/mid_bios.c8
-rw-r--r--drivers/gpu/drm/gma500/oaktrail_hdmi.c1
-rw-r--r--drivers/gpu/drm/gma500/opregion.c3
-rw-r--r--drivers/gpu/drm/gma500/psb_device.c1
-rw-r--r--drivers/gpu/drm/gma500/psb_drv.h20
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_drv.h28
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_lvds.c13
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_reg.h197
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_sdvo.c4
-rw-r--r--drivers/gpu/drm/i2c/ch7006_drv.c16
-rw-r--r--drivers/gpu/drm/i915/Makefile1
-rw-r--r--drivers/gpu/drm/i915/dvo.h16
-rw-r--r--drivers/gpu/drm/i915/dvo_ch7017.c21
-rw-r--r--drivers/gpu/drm/i915/dvo_ch7xxx.c17
-rw-r--r--drivers/gpu/drm/i915/dvo_ivch.c23
-rw-r--r--drivers/gpu/drm/i915/dvo_ns2501.c588
-rw-r--r--drivers/gpu/drm/i915/dvo_sil164.c20
-rw-r--r--drivers/gpu/drm/i915/dvo_tfp410.c18
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c251
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c73
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c61
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h239
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c1514
-rw-r--r--drivers/gpu/drm/i915/i915_gem_context.c65
-rw-r--r--drivers/gpu/drm/i915/i915_gem_dmabuf.c174
-rw-r--r--drivers/gpu/drm/i915/i915_gem_evict.c44
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c391
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c144
-rw-r--r--drivers/gpu/drm/i915/i915_gem_tiling.c16
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c194
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h328
-rw-r--r--drivers/gpu/drm/i915/i915_sysfs.c220
-rw-r--r--drivers/gpu/drm/i915/i915_trace.h25
-rw-r--r--drivers/gpu/drm/i915/intel_crt.c165
-rw-r--r--drivers/gpu/drm/i915/intel_ddi.c144
-rw-r--r--drivers/gpu/drm/i915/intel_display.c2129
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c401
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h147
-rw-r--r--drivers/gpu/drm/i915/intel_dvo.c115
-rw-r--r--drivers/gpu/drm/i915/intel_hdmi.c221
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c99
-rw-r--r--drivers/gpu/drm/i915/intel_modes.c1
-rw-r--r--drivers/gpu/drm/i915/intel_opregion.c23
-rw-r--r--drivers/gpu/drm/i915/intel_overlay.c60
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c420
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c152
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.h20
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo.c210
-rw-r--r--drivers/gpu/drm/i915/intel_tv.c70
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_drv.h1
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_mode.c1
-rw-r--r--drivers/gpu/drm/nouveau/Kconfig36
-rw-r--r--drivers/gpu/drm/nouveau/Makefile225
-rw-r--r--drivers/gpu/drm/nouveau/core/core/client.c103
-rw-r--r--drivers/gpu/drm/nouveau/core/core/engctx.c236
-rw-r--r--drivers/gpu/drm/nouveau/core/core/engine.c55
-rw-r--r--drivers/gpu/drm/nouveau/core/core/enum.c (renamed from drivers/gpu/drm/nouveau/nouveau_util.c)47
-rw-r--r--drivers/gpu/drm/nouveau/core/core/gpuobj.c318
-rw-r--r--drivers/gpu/drm/nouveau/core/core/handle.c223
-rw-r--r--drivers/gpu/drm/nouveau/core/core/mm.c (renamed from drivers/gpu/drm/nouveau/nouveau_mm.c)174
-rw-r--r--drivers/gpu/drm/nouveau/core/core/namedb.c203
-rw-r--r--drivers/gpu/drm/nouveau/core/core/object.c468
-rw-r--r--drivers/gpu/drm/nouveau/core/core/option.c131
-rw-r--r--drivers/gpu/drm/nouveau/core/core/parent.c139
-rw-r--r--drivers/gpu/drm/nouveau/core/core/printk.c74
-rw-r--r--drivers/gpu/drm/nouveau/core/core/ramht.c109
-rw-r--r--drivers/gpu/drm/nouveau/core/core/subdev.c115
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/bsp/nv84.c175
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/copy/fuc/nva3.fuc (renamed from drivers/gpu/drm/nouveau/nva3_copy.fuc)0
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/copy/fuc/nva3.fuc.h (renamed from drivers/gpu/drm/nouveau/nva3_copy.fuc.h)4
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/copy/fuc/nvc0.fuc.h (renamed from drivers/gpu/drm/nouveau/nvc0_copy.fuc.h)4
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/copy/nva3.c222
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/copy/nvc0.c265
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/copy/nve0.c156
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/crypt/fuc/nv98.fuc (renamed from drivers/gpu/drm/nouveau/nv98_crypt.fuc)2
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/crypt/fuc/nv98.fuc.h (renamed from drivers/gpu/drm/nouveau/nv98_crypt.fuc.h)4
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/crypt/nv84.c217
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/crypt/nv98.c208
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/nv04.c90
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/nv50.c125
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/nvd0.c118
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/vga.c215
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/dmaobj/base.c87
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/dmaobj/nv04.c185
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/dmaobj/nv50.c173
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/dmaobj/nvc0.c99
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/fifo/base.c181
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/fifo/nv04.c630
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/fifo/nv04.h178
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/fifo/nv10.c171
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/fifo/nv17.c208
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/fifo/nv40.c349
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/fifo/nv50.c502
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/fifo/nv50.h36
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/fifo/nv84.c420
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/fifo/nvc0.c647
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c628
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/ctx.h (renamed from drivers/gpu/drm/nouveau/nouveau_grctx.h)26
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/ctxnv40.c (renamed from drivers/gpu/drm/nouveau/nv40_grctx.c)133
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/ctxnv50.c (renamed from drivers/gpu/drm/nouveau/nv50_grctx.c)561
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/ctxnvc0.c3039
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/ctxnve0.c2788
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnvc0.fuc (renamed from drivers/gpu/drm/nouveau/nvc0_grgpc.fuc)8
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnvc0.fuc.h (renamed from drivers/gpu/drm/nouveau/nvc0_grgpc.fuc.h)66
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnve0.fuc451
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnve0.fuc.h530
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnvc0.fuc (renamed from drivers/gpu/drm/nouveau/nvc0_grhub.fuc)8
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnvc0.fuc.h (renamed from drivers/gpu/drm/nouveau/nvc0_grhub.fuc.h)89
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnve0.fuc780
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnve0.fuc.h857
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/fuc/nvc0.fuc (renamed from drivers/gpu/drm/nouveau/nvc0_graph.fuc)0
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/fuc/nve0.fuc400
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nv04.c1387
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nv10.c1314
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nv20.c381
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nv20.h31
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nv25.c167
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nv2a.c134
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nv30.c238
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nv34.c168
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nv35.c166
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nv40.c495
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nv40.h21
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nv50.c888
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nv50.h7
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nvc0.c955
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nvc0.h171
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nve0.c576
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/regs.h269
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/mpeg/nv31.c308
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/mpeg/nv40.c144
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/mpeg/nv50.c240
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/mpeg/nv84.c104
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/ppp/nv98.c175
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/software/nv04.c147
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/software/nv10.c129
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/software/nv50.c199
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/software/nvc0.c181
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/vp/nv84.c175
-rw-r--r--drivers/gpu/drm/nouveau/core/include/core/class.h118
-rw-r--r--drivers/gpu/drm/nouveau/core/include/core/client.h42
-rw-r--r--drivers/gpu/drm/nouveau/core/include/core/debug.h13
-rw-r--r--drivers/gpu/drm/nouveau/core/include/core/device.h136
-rw-r--r--drivers/gpu/drm/nouveau/core/include/core/engctx.h51
-rw-r--r--drivers/gpu/drm/nouveau/core/include/core/engine.h57
-rw-r--r--drivers/gpu/drm/nouveau/core/include/core/enum.h23
-rw-r--r--drivers/gpu/drm/nouveau/core/include/core/gpuobj.h71
-rw-r--r--drivers/gpu/drm/nouveau/core/include/core/handle.h31
-rw-r--r--drivers/gpu/drm/nouveau/core/include/core/math.h16
-rw-r--r--drivers/gpu/drm/nouveau/core/include/core/mm.h33
-rw-r--r--drivers/gpu/drm/nouveau/core/include/core/namedb.h56
-rw-r--r--drivers/gpu/drm/nouveau/core/include/core/object.h188
-rw-r--r--drivers/gpu/drm/nouveau/core/include/core/option.h11
-rw-r--r--drivers/gpu/drm/nouveau/core/include/core/parent.h64
-rw-r--r--drivers/gpu/drm/nouveau/core/include/core/printk.h39
-rw-r--r--drivers/gpu/drm/nouveau/core/include/core/ramht.h23
-rw-r--r--drivers/gpu/drm/nouveau/core/include/core/subdev.h118
-rw-r--r--drivers/gpu/drm/nouveau/core/include/engine/bsp.h45
-rw-r--r--drivers/gpu/drm/nouveau/core/include/engine/copy.h49
-rw-r--r--drivers/gpu/drm/nouveau/core/include/engine/crypt.h46
-rw-r--r--drivers/gpu/drm/nouveau/core/include/engine/disp.h44
-rw-r--r--drivers/gpu/drm/nouveau/core/include/engine/dmaobj.h57
-rw-r--r--drivers/gpu/drm/nouveau/core/include/engine/fifo.h111
-rw-r--r--drivers/gpu/drm/nouveau/core/include/engine/graph.h72
-rw-r--r--drivers/gpu/drm/nouveau/core/include/engine/mpeg.h61
-rw-r--r--drivers/gpu/drm/nouveau/core/include/engine/ppp.h45
-rw-r--r--drivers/gpu/drm/nouveau/core/include/engine/software.h60
-rw-r--r--drivers/gpu/drm/nouveau/core/include/engine/vp.h45
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/bar.h55
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/bios.h34
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/bios/bit.h13
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/bios/bmp.h39
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/bios/conn.h27
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/bios/dcb.h90
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/bios/dp.h8
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/bios/extdev.h30
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/bios/gpio.h33
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/bios/i2c.h25
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/bios/init.h21
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/bios/mxm.h9
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/bios/perf.h14
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/bios/pll.h77
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/bios/therm.h46
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/clock.h59
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/device.h24
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/devinit.h40
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/fb.h134
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/gpio.h64
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/i2c.h60
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/ibus.h34
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/instmem.h73
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/ltcg.h33
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/mc.h49
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/mxm.h37
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/therm.h58
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/timer.h53
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/vga.h30
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/vm.h (renamed from drivers/gpu/drm/nouveau/nouveau_vm.h)87
-rw-r--r--drivers/gpu/drm/nouveau/core/os.h47
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bar/base.c135
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bar/nv50.c263
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bar/nvc0.c215
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bios/base.c479
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bios/bit.c52
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bios/conn.c56
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bios/dcb.c135
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bios/dp.c76
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bios/extdev.c100
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bios/gpio.c121
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bios/i2c.c129
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bios/init.c2120
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bios/mxm.c135
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bios/perf.c75
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bios/pll.c417
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bios/therm.c177
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/clock/nv04.c359
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/clock/nv40.c (renamed from drivers/gpu/drm/nouveau/nouveau_ramht.h)56
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/clock/nv50.c105
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/clock/nva3.c95
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/clock/nvc0.c94
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/clock/pll.h9
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/clock/pllnv04.c242
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/clock/pllnva3.c (renamed from drivers/gpu/drm/nouveau/nv50_calc.c)69
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/device/base.c472
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/device/nv04.c86
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/device/nv10.c195
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/device/nv20.c126
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/device/nv30.c147
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/device/nv40.c375
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/device/nv50.c410
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/device/nvc0.c285
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/device/nve0.c109
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/devinit/base.c (renamed from drivers/gpu/drm/nouveau/nv98_ppp.c)69
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/devinit/fbmem.h98
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/devinit/nv04.c189
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/devinit/nv05.c159
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/devinit/nv10.c124
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/devinit/nv1a.c (renamed from drivers/gpu/drm/nouveau/nouveau_i2c.h)65
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/devinit/nv20.c96
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/devinit/nv50.c87
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/base.c130
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/nv04.c130
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/nv10.c120
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/nv20.c136
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/nv30.c148
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/nv40.c178
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/nv50.c498
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/nvc0.c245
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/gpio/base.c271
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/gpio/nv10.c169
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/gpio/nv50.c194
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/gpio/nvd0.c104
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/i2c/aux.c212
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/i2c/base.c407
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/i2c/bit.c230
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/ibus/nvc0.c123
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/ibus/nve0.c123
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/instmem/base.c135
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/instmem/nv04.c198
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/instmem/nv04.h39
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/instmem/nv40.c138
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/instmem/nv50.c172
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/ltcg/nvc0.c93
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/mc/base.c49
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/mc/nv04.c83
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/mc/nv44.c74
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/mc/nv50.c80
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/mc/nv98.c73
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/mc/nvc0.c75
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/mxm/base.c290
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/mxm/mxms.c193
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/mxm/mxms.h22
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/mxm/nv50.c233
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/therm/base.c144
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/therm/fan.c234
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/therm/ic.c116
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/therm/nv40.c163
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/therm/nv50.c157
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/therm/priv.h73
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/therm/temp.c81
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/timer/base.c87
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/timer/nv04.c249
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/vm/base.c (renamed from drivers/gpu/drm/nouveau/nouveau_vm.c)163
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/vm/nv04.c151
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/vm/nv04.h19
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/vm/nv41.c158
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/vm/nv44.c248
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/vm/nv50.c (renamed from drivers/gpu/drm/nouveau/nv50_vm.c)118
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/vm/nvc0.c (renamed from drivers/gpu/drm/nouveau/nvc0_vm.c)123
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_abi16.c426
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_abi16.h32
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_acpi.c26
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_acpi.h22
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_agp.c152
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_agp.h10
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_backlight.c94
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bios.c4567
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bios.h178
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.c439
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.h99
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_calc.c235
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_chan.c400
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_chan.h47
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_channel.c396
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_connector.c219
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_connector.h3
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_debugfs.c196
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.c259
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.h94
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_dma.c55
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_dma.h51
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_dp.c280
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drm.c693
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drm.h144
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drv.c512
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drv.h1655
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_encoder.h22
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fb.h47
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fbcon.c226
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fbcon.h3
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fence.c27
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fence.h41
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fifo.h32
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_gem.c177
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_gem.h43
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_gpio.c400
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_gpio.h71
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_gpuobj.c807
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_hdmi.c41
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_hw.c435
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_hw.h182
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_i2c.c394
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_ioc32.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_ioctl.h6
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_irq.c132
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_irq.h11
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_mem.c742
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_mm.h67
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_mxm.c723
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_notifier.c162
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_perf.c65
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_pm.c462
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_pm.h186
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_prime.c9
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_ramht.c309
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_sgdma.c377
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_software.h56
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_state.c1304
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_temp.c331
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_ttm.c354
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_ttm.h25
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_util.h49
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_vga.c99
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_vga.h8
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_volt.c53
-rw-r--r--drivers/gpu/drm/nouveau/nv04_crtc.c142
-rw-r--r--drivers/gpu/drm/nouveau/nv04_cursor.c9
-rw-r--r--drivers/gpu/drm/nouveau/nv04_dac.c148
-rw-r--r--drivers/gpu/drm/nouveau/nv04_dfp.c132
-rw-r--r--drivers/gpu/drm/nouveau/nv04_display.c129
-rw-r--r--drivers/gpu/drm/nouveau/nv04_display.h184
-rw-r--r--drivers/gpu/drm/nouveau/nv04_fb.c54
-rw-r--r--drivers/gpu/drm/nouveau/nv04_fbcon.c70
-rw-r--r--drivers/gpu/drm/nouveau/nv04_fence.c67
-rw-r--r--drivers/gpu/drm/nouveau/nv04_fifo.c505
-rw-r--r--drivers/gpu/drm/nouveau/nv04_graph.c1325
-rw-r--r--drivers/gpu/drm/nouveau/nv04_instmem.c192
-rw-r--r--drivers/gpu/drm/nouveau/nv04_mc.c23
-rw-r--r--drivers/gpu/drm/nouveau/nv04_pm.c37
-rw-r--r--drivers/gpu/drm/nouveau/nv04_software.c147
-rw-r--r--drivers/gpu/drm/nouveau/nv04_timer.c83
-rw-r--r--drivers/gpu/drm/nouveau/nv04_tv.c36
-rw-r--r--drivers/gpu/drm/nouveau/nv10_fb.c103
-rw-r--r--drivers/gpu/drm/nouveau/nv10_fence.c103
-rw-r--r--drivers/gpu/drm/nouveau/nv10_fifo.c137
-rw-r--r--drivers/gpu/drm/nouveau/nv10_gpio.c123
-rw-r--r--drivers/gpu/drm/nouveau/nv10_graph.c1188
-rw-r--r--drivers/gpu/drm/nouveau/nv17_fifo.c176
-rw-r--r--drivers/gpu/drm/nouveau/nv17_tv.c98
-rw-r--r--drivers/gpu/drm/nouveau/nv17_tv.h6
-rw-r--r--drivers/gpu/drm/nouveau/nv17_tv_modes.c5
-rw-r--r--drivers/gpu/drm/nouveau/nv20_fb.c147
-rw-r--r--drivers/gpu/drm/nouveau/nv20_graph.c835
-rw-r--r--drivers/gpu/drm/nouveau/nv30_fb.c115
-rw-r--r--drivers/gpu/drm/nouveau/nv31_mpeg.c346
-rw-r--r--drivers/gpu/drm/nouveau/nv40_fb.c162
-rw-r--r--drivers/gpu/drm/nouveau/nv40_fifo.c209
-rw-r--r--drivers/gpu/drm/nouveau/nv40_graph.c466
-rw-r--r--drivers/gpu/drm/nouveau/nv40_mc.c27
-rw-r--r--drivers/gpu/drm/nouveau/nv40_pm.c182
-rw-r--r--drivers/gpu/drm/nouveau/nv50_crtc.c118
-rw-r--r--drivers/gpu/drm/nouveau/nv50_cursor.c32
-rw-r--r--drivers/gpu/drm/nouveau/nv50_dac.c89
-rw-r--r--drivers/gpu/drm/nouveau/nv50_display.c551
-rw-r--r--drivers/gpu/drm/nouveau/nv50_display.h31
-rw-r--r--drivers/gpu/drm/nouveau/nv50_evo.c268
-rw-r--r--drivers/gpu/drm/nouveau/nv50_fb.c295
-rw-r--r--drivers/gpu/drm/nouveau/nv50_fbcon.c36
-rw-r--r--drivers/gpu/drm/nouveau/nv50_fence.c127
-rw-r--r--drivers/gpu/drm/nouveau/nv50_fifo.c293
-rw-r--r--drivers/gpu/drm/nouveau/nv50_gpio.c155
-rw-r--r--drivers/gpu/drm/nouveau/nv50_graph.c867
-rw-r--r--drivers/gpu/drm/nouveau/nv50_instmem.c427
-rw-r--r--drivers/gpu/drm/nouveau/nv50_mc.c39
-rw-r--r--drivers/gpu/drm/nouveau/nv50_mpeg.c241
-rw-r--r--drivers/gpu/drm/nouveau/nv50_pm.c247
-rw-r--r--drivers/gpu/drm/nouveau/nv50_software.c203
-rw-r--r--drivers/gpu/drm/nouveau/nv50_sor.c133
-rw-r--r--drivers/gpu/drm/nouveau/nv50_vram.c237
-rw-r--r--drivers/gpu/drm/nouveau/nv84_bsp.c83
-rw-r--r--drivers/gpu/drm/nouveau/nv84_crypt.c205
-rw-r--r--drivers/gpu/drm/nouveau/nv84_fence.c127
-rw-r--r--drivers/gpu/drm/nouveau/nv84_fifo.c249
-rw-r--r--drivers/gpu/drm/nouveau/nv84_vp.c83
-rw-r--r--drivers/gpu/drm/nouveau/nv98_crypt.c216
-rw-r--r--drivers/gpu/drm/nouveau/nva3_copy.c203
-rw-r--r--drivers/gpu/drm/nouveau/nva3_pm.c274
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_copy.c243
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_fb.c134
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_fbcon.c31
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_fence.c150
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_fifo.c477
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_graph.c897
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_graph.h97
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_grctx.c2878
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_instmem.c223
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_pm.c178
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_software.c153
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_vram.c160
-rw-r--r--drivers/gpu/drm/nouveau/nvd0_display.c504
-rw-r--r--drivers/gpu/drm/nouveau/nve0_fifo.c453
-rw-r--r--drivers/gpu/drm/nouveau/nve0_graph.c831
-rw-r--r--drivers/gpu/drm/nouveau/nve0_graph.h89
-rw-r--r--drivers/gpu/drm/nouveau/nve0_grctx.c2777
-rw-r--r--drivers/gpu/drm/radeon/atombios_crtc.c663
-rw-r--r--drivers/gpu/drm/radeon/atombios_dp.c4
-rw-r--r--drivers/gpu/drm/radeon/atombios_encoders.c367
-rw-r--r--drivers/gpu/drm/radeon/evergreen.c282
-rw-r--r--drivers/gpu/drm/radeon/evergreen_cs.c61
-rw-r--r--drivers/gpu/drm/radeon/evergreen_reg.h2
-rw-r--r--drivers/gpu/drm/radeon/evergreend.h7
-rw-r--r--drivers/gpu/drm/radeon/ni.c134
-rw-r--r--drivers/gpu/drm/radeon/nid.h1
-rw-r--r--drivers/gpu/drm/radeon/r100.c96
-rw-r--r--drivers/gpu/drm/radeon/r300.c4
-rw-r--r--drivers/gpu/drm/radeon/r520.c4
-rw-r--r--drivers/gpu/drm/radeon/r600.c37
-rw-r--r--drivers/gpu/drm/radeon/r600_blit.c115
-rw-r--r--drivers/gpu/drm/radeon/r600_blit_kms.c52
-rw-r--r--drivers/gpu/drm/radeon/r600_blit_shaders.h1
-rw-r--r--drivers/gpu/drm/radeon/r600_cs.c5
-rw-r--r--drivers/gpu/drm/radeon/r600_hdmi.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon.h192
-rw-r--r--drivers/gpu/drm/radeon/radeon_acpi.c607
-rw-r--r--drivers/gpu/drm/radeon/radeon_acpi.h445
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.c93
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.h28
-rw-r--r--drivers/gpu/drm/radeon/radeon_atombios.c15
-rw-r--r--drivers/gpu/drm/radeon/radeon_atpx_handler.c411
-rw-r--r--drivers/gpu/drm/radeon/radeon_combios.c9
-rw-r--r--drivers/gpu/drm/radeon/radeon_connectors.c43
-rw-r--r--drivers/gpu/drm/radeon/radeon_cs.c83
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c11
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_encoders.c19
-rw-r--r--drivers/gpu/drm/radeon/radeon_fb.c16
-rw-r--r--drivers/gpu/drm/radeon/radeon_fence.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_gart.c602
-rw-r--r--drivers/gpu/drm/radeon/radeon_gem.c54
-rw-r--r--drivers/gpu/drm/radeon/radeon_ioc32.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_irq_kms.c43
-rw-r--r--drivers/gpu/drm/radeon/radeon_kms.c17
-rw-r--r--drivers/gpu/drm/radeon/radeon_legacy_crtc.c7
-rw-r--r--drivers/gpu/drm/radeon/radeon_legacy_encoders.c65
-rw-r--r--drivers/gpu/drm/radeon/radeon_mode.h49
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.c22
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.h4
-rw-r--r--drivers/gpu/drm/radeon/radeon_pm.c104
-rw-r--r--drivers/gpu/drm/radeon/radeon_ring.c30
-rw-r--r--drivers/gpu/drm/radeon/radeon_sa.c20
-rw-r--r--drivers/gpu/drm/radeon/radeon_test.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_ttm.c2
-rw-r--r--drivers/gpu/drm/radeon/rs400.c6
-rw-r--r--drivers/gpu/drm/radeon/rs600.c49
-rw-r--r--drivers/gpu/drm/radeon/rs690.c6
-rw-r--r--drivers/gpu/drm/radeon/rv515.c18
-rw-r--r--drivers/gpu/drm/radeon/rv770.c10
-rw-r--r--drivers/gpu/drm/radeon/si.c113
-rw-r--r--drivers/gpu/drm/radeon/sid.h15
-rw-r--r--drivers/gpu/drm/savage/savage_bci.c2
-rw-r--r--drivers/gpu/drm/shmobile/Kconfig10
-rw-r--r--drivers/gpu/drm/shmobile/Makefile7
-rw-r--r--drivers/gpu/drm/shmobile/shmob_drm_backlight.c90
-rw-r--r--drivers/gpu/drm/shmobile/shmob_drm_backlight.h23
-rw-r--r--drivers/gpu/drm/shmobile/shmob_drm_crtc.c763
-rw-r--r--drivers/gpu/drm/shmobile/shmob_drm_crtc.h60
-rw-r--r--drivers/gpu/drm/shmobile/shmob_drm_drv.c361
-rw-r--r--drivers/gpu/drm/shmobile/shmob_drm_drv.h47
-rw-r--r--drivers/gpu/drm/shmobile/shmob_drm_kms.c160
-rw-r--r--drivers/gpu/drm/shmobile/shmob_drm_kms.h34
-rw-r--r--drivers/gpu/drm/shmobile/shmob_drm_plane.c268
-rw-r--r--drivers/gpu/drm/shmobile/shmob_drm_plane.h22
-rw-r--r--drivers/gpu/drm/shmobile/shmob_drm_regs.h311
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_util.c2
-rw-r--r--drivers/gpu/drm/ttm/ttm_page_alloc_dma.c5
-rw-r--r--drivers/gpu/drm/ttm/ttm_tt.c16
-rw-r--r--drivers/gpu/drm/udl/udl_connector.c3
-rw-r--r--drivers/gpu/drm/udl/udl_encoder.c2
-rw-r--r--drivers/gpu/drm/udl/udl_fb.c26
-rw-r--r--drivers/gpu/drm/udl/udl_gem.c7
-rw-r--r--drivers/gpu/drm/udl/udl_main.c7
-rw-r--r--drivers/gpu/drm/udl/udl_modeset.c2
-rw-r--r--drivers/gpu/drm/udl/udl_transfer.c8
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.c1
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.c1
-rw-r--r--drivers/staging/omapdrm/omap_connector.c5
-rw-r--r--drivers/staging/omapdrm/omap_fbdev.c4
552 files changed, 68605 insertions, 42692 deletions
diff --git a/drivers/acpi/video.c b/drivers/acpi/video.c
index 1e0a9e17c31d..f94d4c818fc7 100644
--- a/drivers/acpi/video.c
+++ b/drivers/acpi/video.c
@@ -1448,8 +1448,7 @@ static void acpi_video_bus_notify(struct acpi_device *device, u32 event)
 	case ACPI_VIDEO_NOTIFY_SWITCH:	/* User requested a switch,
 					 * most likely via hotkey. */
 		acpi_bus_generate_proc_event(device, event, 0);
-		if (!acpi_notifier_call_chain(device, event, 0))
-			keycode = KEY_SWITCHVIDEOMODE;
+		keycode = KEY_SWITCHVIDEOMODE;
 		break;
 
 	case ACPI_VIDEO_NOTIFY_PROBE:	/* User plugged in or removed a video
@@ -1479,8 +1478,9 @@ static void acpi_video_bus_notify(struct acpi_device *device, u32 event)
 		break;
 	}
 
-	if (event != ACPI_VIDEO_NOTIFY_SWITCH)
-		acpi_notifier_call_chain(device, event, 0);
+	if (acpi_notifier_call_chain(device, event, 0))
+		/* Something vetoed the keypress. */
+		keycode = 0;
 
 	if (keycode) {
 		input_report_key(input, keycode, 1);
diff --git a/drivers/char/agp/intel-gtt.c b/drivers/char/agp/intel-gtt.c
index 58e32f7c3229..e01f5eaaec82 100644
--- a/drivers/char/agp/intel-gtt.c
+++ b/drivers/char/agp/intel-gtt.c
@@ -84,40 +84,33 @@ static struct _intel_private {
 #define IS_IRONLAKE	intel_private.driver->is_ironlake
 #define HAS_PGTBL_EN	intel_private.driver->has_pgtbl_enable
 
-int intel_gtt_map_memory(struct page **pages, unsigned int num_entries,
-			 struct scatterlist **sg_list, int *num_sg)
+static int intel_gtt_map_memory(struct page **pages,
+				unsigned int num_entries,
+				struct sg_table *st)
 {
-	struct sg_table st;
 	struct scatterlist *sg;
 	int i;
 
-	if (*sg_list)
-		return 0; /* already mapped (for e.g. resume */
-
 	DBG("try mapping %lu pages\n", (unsigned long)num_entries);
 
-	if (sg_alloc_table(&st, num_entries, GFP_KERNEL))
+	if (sg_alloc_table(st, num_entries, GFP_KERNEL))
 		goto err;
 
-	*sg_list = sg = st.sgl;
-
-	for (i = 0 ; i < num_entries; i++, sg = sg_next(sg))
+	for_each_sg(st->sgl, sg, num_entries, i)
 		sg_set_page(sg, pages[i], PAGE_SIZE, 0);
 
-	*num_sg = pci_map_sg(intel_private.pcidev, *sg_list,
-				 num_entries, PCI_DMA_BIDIRECTIONAL);
-	if (unlikely(!*num_sg))
+	if (!pci_map_sg(intel_private.pcidev,
+			st->sgl, st->nents, PCI_DMA_BIDIRECTIONAL))
 		goto err;
 
 	return 0;
 
 err:
-	sg_free_table(&st);
+	sg_free_table(st);
 	return -ENOMEM;
 }
-EXPORT_SYMBOL(intel_gtt_map_memory);
 
-void intel_gtt_unmap_memory(struct scatterlist *sg_list, int num_sg)
+static void intel_gtt_unmap_memory(struct scatterlist *sg_list, int num_sg)
 {
 	struct sg_table st;
 	DBG("try unmapping %lu pages\n", (unsigned long)mem->page_count);
@@ -130,7 +123,6 @@ void intel_gtt_unmap_memory(struct scatterlist *sg_list, int num_sg)
 
 	sg_free_table(&st);
 }
-EXPORT_SYMBOL(intel_gtt_unmap_memory);
 
 static void intel_fake_agp_enable(struct agp_bridge_data *bridge, u32 mode)
 {
@@ -674,9 +666,14 @@ static int intel_gtt_init(void)
 
 	gtt_map_size = intel_private.base.gtt_total_entries * 4;
 
-	intel_private.gtt = ioremap(intel_private.gtt_bus_addr,
-				    gtt_map_size);
-	if (!intel_private.gtt) {
+	intel_private.gtt = NULL;
+	if (INTEL_GTT_GEN < 6)
+		intel_private.gtt = ioremap_wc(intel_private.gtt_bus_addr,
+					       gtt_map_size);
+	if (intel_private.gtt == NULL)
+		intel_private.gtt = ioremap(intel_private.gtt_bus_addr,
+					    gtt_map_size);
+	if (intel_private.gtt == NULL) {
 		intel_private.driver->cleanup();
 		iounmap(intel_private.registers);
 		return -ENOMEM;
@@ -879,8 +876,7 @@ static bool i830_check_flags(unsigned int flags)
 	return false;
 }
 
-void intel_gtt_insert_sg_entries(struct scatterlist *sg_list,
-				 unsigned int sg_len,
+void intel_gtt_insert_sg_entries(struct sg_table *st,
 				 unsigned int pg_start,
 				 unsigned int flags)
 {
@@ -892,12 +888,11 @@ void intel_gtt_insert_sg_entries(struct scatterlist *sg_list,
 
 	/* sg may merge pages, but we have to separate
 	 * per-page addr for GTT */
-	for_each_sg(sg_list, sg, sg_len, i) {
+	for_each_sg(st->sgl, sg, st->nents, i) {
 		len = sg_dma_len(sg) >> PAGE_SHIFT;
 		for (m = 0; m < len; m++) {
 			dma_addr_t addr = sg_dma_address(sg) + (m << PAGE_SHIFT);
-			intel_private.driver->write_entry(addr,
-							  j, flags);
+			intel_private.driver->write_entry(addr, j, flags);
 			j++;
 		}
 	}
@@ -905,8 +900,10 @@ void intel_gtt_insert_sg_entries(struct scatterlist *sg_list,
 }
 EXPORT_SYMBOL(intel_gtt_insert_sg_entries);
 
-void intel_gtt_insert_pages(unsigned int first_entry, unsigned int num_entries,
-			    struct page **pages, unsigned int flags)
+static void intel_gtt_insert_pages(unsigned int first_entry,
+				   unsigned int num_entries,
+				   struct page **pages,
+				   unsigned int flags)
 {
 	int i, j;
 
@@ -917,7 +914,6 @@ void intel_gtt_insert_pages(unsigned int first_entry, unsigned int num_entries,
 	}
 	readl(intel_private.gtt+j-1);
 }
-EXPORT_SYMBOL(intel_gtt_insert_pages);
 
 static int intel_fake_agp_insert_entries(struct agp_memory *mem,
 					 off_t pg_start, int type)
@@ -953,13 +949,15 @@ static int intel_fake_agp_insert_entries(struct agp_memory *mem,
 		global_cache_flush();
 
 	if (intel_private.base.needs_dmar) {
-		ret = intel_gtt_map_memory(mem->pages, mem->page_count,
-					   &mem->sg_list, &mem->num_sg);
+		struct sg_table st;
+
+		ret = intel_gtt_map_memory(mem->pages, mem->page_count, &st);
 		if (ret != 0)
 			return ret;
 
-		intel_gtt_insert_sg_entries(mem->sg_list, mem->num_sg,
-					    pg_start, type);
+		intel_gtt_insert_sg_entries(&st, pg_start, type);
+		mem->sg_list = st.sgl;
+		mem->num_sg = st.nents;
 	} else
 		intel_gtt_insert_pages(pg_start, mem->page_count, mem->pages,
 				       type);
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index 90e28081712d..18321b68b880 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -22,7 +22,7 @@ menuconfig DRM
 config DRM_USB
 	tristate
 	depends on DRM
-	depends on USB_ARCH_HAS_HCD
+	depends on USB_SUPPORT && USB_ARCH_HAS_HCD
 	select USB
 
 config DRM_KMS_HELPER
@@ -54,6 +54,21 @@ config DRM_TTM
 	  GPU memory types. Will be enabled automatically if a device driver
 	  uses it.
 
+config DRM_GEM_CMA_HELPER
+	bool
+	depends on DRM
+	help
+	  Choose this if you need the GEM CMA helper functions
+
+config DRM_KMS_CMA_HELPER
+	bool
+	select DRM_GEM_CMA_HELPER
+	select FB_SYS_FILLRECT
+	select FB_SYS_COPYAREA
+	select FB_SYS_IMAGEBLIT
+	help
+	  Choose this if you need the KMS CMA helper functions
+
 config DRM_TDFX
 	tristate "3dfx Banshee/Voodoo3+"
 	depends on DRM && PCI
@@ -193,3 +208,5 @@ source "drivers/gpu/drm/ast/Kconfig"
 source "drivers/gpu/drm/mgag200/Kconfig"
 
 source "drivers/gpu/drm/cirrus/Kconfig"
+
+source "drivers/gpu/drm/shmobile/Kconfig"
diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile
index f65f65ed0ddf..2ff5cefe9ead 100644
--- a/drivers/gpu/drm/Makefile
+++ b/drivers/gpu/drm/Makefile
@@ -15,11 +15,13 @@ drm-y       :=	drm_auth.o drm_buffer.o drm_bufs.o drm_cache.o \
 		drm_trace_points.o drm_global.o drm_prime.o
 
 drm-$(CONFIG_COMPAT) += drm_ioc32.o
+drm-$(CONFIG_DRM_GEM_CMA_HELPER) += drm_gem_cma_helper.o
 
 drm-usb-y   := drm_usb.o
 
 drm_kms_helper-y := drm_fb_helper.o drm_crtc_helper.o drm_dp_i2c_helper.o
 drm_kms_helper-$(CONFIG_DRM_LOAD_EDID_FIRMWARE) += drm_edid_load.o
+drm_kms_helper-$(CONFIG_DRM_KMS_CMA_HELPER) += drm_fb_cma_helper.o
 
 obj-$(CONFIG_DRM_KMS_HELPER) += drm_kms_helper.o
 
@@ -45,4 +47,5 @@ obj-$(CONFIG_DRM_EXYNOS) +=exynos/
 obj-$(CONFIG_DRM_GMA500) += gma500/
 obj-$(CONFIG_DRM_UDL) += udl/
 obj-$(CONFIG_DRM_AST) += ast/
+obj-$(CONFIG_DRM_SHMOBILE) +=shmobile/
 obj-y			+= i2c/
diff --git a/drivers/gpu/drm/ast/ast_drv.h b/drivers/gpu/drm/ast/ast_drv.h
index aea439760b60..5ccf984f063a 100644
--- a/drivers/gpu/drm/ast/ast_drv.h
+++ b/drivers/gpu/drm/ast/ast_drv.h
@@ -94,7 +94,6 @@ struct ast_private {
 		struct drm_global_reference mem_global_ref;
 		struct ttm_bo_global_ref bo_global_ref;
 		struct ttm_bo_device bdev;
-		atomic_t validate_sequence;
 	} ttm;
 
 	struct drm_gem_object *cursor_cache;
diff --git a/drivers/gpu/drm/ast/ast_mode.c b/drivers/gpu/drm/ast/ast_mode.c
index a6982b86df9b..7fc9f7272b56 100644
--- a/drivers/gpu/drm/ast/ast_mode.c
+++ b/drivers/gpu/drm/ast/ast_mode.c
@@ -582,7 +582,6 @@ static const struct drm_crtc_helper_funcs ast_crtc_helper_funcs = {
 	.mode_set_base = ast_crtc_mode_set_base,
 	.disable = ast_crtc_disable,
 	.load_lut = ast_crtc_load_lut,
-	.disable = ast_crtc_disable,
 	.prepare = ast_crtc_prepare,
 	.commit = ast_crtc_commit,
 
@@ -737,6 +736,7 @@ static int ast_get_modes(struct drm_connector *connector)
 	if (edid) {
 		drm_mode_connector_update_edid_property(&ast_connector->base, edid);
 		ret = drm_add_edid_modes(connector, edid);
+		kfree(edid);
 		return ret;
 	} else
 		drm_mode_connector_update_edid_property(&ast_connector->base, NULL);
diff --git a/drivers/gpu/drm/cirrus/cirrus_drv.h b/drivers/gpu/drm/cirrus/cirrus_drv.h
index 7f0d71ffba3f..6e0cc724e5a2 100644
--- a/drivers/gpu/drm/cirrus/cirrus_drv.h
+++ b/drivers/gpu/drm/cirrus/cirrus_drv.h
@@ -143,7 +143,6 @@ struct cirrus_device {
 		struct drm_global_reference mem_global_ref;
 		struct ttm_bo_global_ref bo_global_ref;
 		struct ttm_bo_device bdev;
-		atomic_t validate_sequence;
 	} ttm;
 	bool mm_inited;
 };
diff --git a/drivers/gpu/drm/drm_cache.c b/drivers/gpu/drm/drm_cache.c
index ec4698246213..a575cb2e6bdb 100644
--- a/drivers/gpu/drm/drm_cache.c
+++ b/drivers/gpu/drm/drm_cache.c
@@ -37,12 +37,13 @@ drm_clflush_page(struct page *page)
 {
 	uint8_t *page_virtual;
 	unsigned int i;
+	const int size = boot_cpu_data.x86_clflush_size;
 
 	if (unlikely(page == NULL))
 		return;
 
 	page_virtual = kmap_atomic(page);
-	for (i = 0; i < PAGE_SIZE; i += boot_cpu_data.x86_clflush_size)
+	for (i = 0; i < PAGE_SIZE; i += size)
 		clflush(page_virtual + i);
 	kunmap_atomic(page_virtual);
 }
@@ -100,6 +101,31 @@ drm_clflush_pages(struct page *pages[], unsigned long num_pages)
 EXPORT_SYMBOL(drm_clflush_pages);
 
 void
+drm_clflush_sg(struct sg_table *st)
+{
+#if defined(CONFIG_X86)
+	if (cpu_has_clflush) {
+		struct scatterlist *sg;
+		int i;
+
+		mb();
+		for_each_sg(st->sgl, sg, st->nents, i)
+			drm_clflush_page(sg_page(sg));
+		mb();
+
+		return;
+	}
+
+	if (on_each_cpu(drm_clflush_ipi_handler, NULL, 1) != 0)
+		printk(KERN_ERR "Timed out waiting for cache flush.\n");
+#else
+	printk(KERN_ERR "Architecture has no drm_cache.c support\n");
+	WARN_ON_ONCE(1);
+#endif
+}
+EXPORT_SYMBOL(drm_clflush_sg);
+
+void
 drm_clflush_virt_range(char *addr, unsigned long length)
 {
 #if defined(CONFIG_X86)
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
index 271ffa4fdb47..ef1b22144d37 100644
--- a/drivers/gpu/drm/drm_crtc.c
+++ b/drivers/gpu/drm/drm_crtc.c
@@ -293,6 +293,8 @@ int drm_framebuffer_init(struct drm_device *dev, struct drm_framebuffer *fb,
 {
 	int ret;
 
+	kref_init(&fb->refcount);
+
 	ret = drm_mode_object_get(dev, &fb->base, DRM_MODE_OBJECT_FB);
 	if (ret)
 		return ret;
@@ -306,6 +308,38 @@ int drm_framebuffer_init(struct drm_device *dev, struct drm_framebuffer *fb,
 }
 EXPORT_SYMBOL(drm_framebuffer_init);
 
+static void drm_framebuffer_free(struct kref *kref)
+{
+	struct drm_framebuffer *fb =
+			container_of(kref, struct drm_framebuffer, refcount);
+	fb->funcs->destroy(fb);
+}
+
+/**
+ * drm_framebuffer_unreference - unref a framebuffer
+ *
+ * LOCKING:
+ * Caller must hold mode config lock.
+ */
+void drm_framebuffer_unreference(struct drm_framebuffer *fb)
+{
+	struct drm_device *dev = fb->dev;
+	DRM_DEBUG("FB ID: %d\n", fb->base.id);
+	WARN_ON(!mutex_is_locked(&dev->mode_config.mutex));
+	kref_put(&fb->refcount, drm_framebuffer_free);
+}
+EXPORT_SYMBOL(drm_framebuffer_unreference);
+
+/**
+ * drm_framebuffer_reference - incr the fb refcnt
+ */
+void drm_framebuffer_reference(struct drm_framebuffer *fb)
+{
+	DRM_DEBUG("FB ID: %d\n", fb->base.id);
+	kref_get(&fb->refcount);
+}
+EXPORT_SYMBOL(drm_framebuffer_reference);
+
 /**
  * drm_framebuffer_cleanup - remove a framebuffer object
  * @fb: framebuffer to remove
@@ -319,6 +353,32 @@ EXPORT_SYMBOL(drm_framebuffer_init);
 void drm_framebuffer_cleanup(struct drm_framebuffer *fb)
 {
 	struct drm_device *dev = fb->dev;
+	/*
+	 * This could be moved to drm_framebuffer_remove(), but for
+	 * debugging is nice to keep around the list of fb's that are
+	 * no longer associated w/ a drm_file but are not unreferenced
+	 * yet.  (i915 and omapdrm have debugfs files which will show
+	 * this.)
+	 */
+	drm_mode_object_put(dev, &fb->base);
+	list_del(&fb->head);
+	dev->mode_config.num_fb--;
+}
+EXPORT_SYMBOL(drm_framebuffer_cleanup);
+
+/**
+ * drm_framebuffer_remove - remove and unreference a framebuffer object
+ * @fb: framebuffer to remove
+ *
+ * LOCKING:
+ * Caller must hold mode config lock.
+ *
+ * Scans all the CRTCs and planes in @dev's mode_config.  If they're
+ * using @fb, removes it, setting it to NULL.
+ */
+void drm_framebuffer_remove(struct drm_framebuffer *fb)
+{
+	struct drm_device *dev = fb->dev;
 	struct drm_crtc *crtc;
 	struct drm_plane *plane;
 	struct drm_mode_set set;
@@ -349,11 +409,11 @@ void drm_framebuffer_cleanup(struct drm_framebuffer *fb)
 		}
 	}
 
-	drm_mode_object_put(dev, &fb->base);
-	list_del(&fb->head);
-	dev->mode_config.num_fb--;
+	list_del(&fb->filp_head);
+
+	drm_framebuffer_unreference(fb);
 }
-EXPORT_SYMBOL(drm_framebuffer_cleanup);
+EXPORT_SYMBOL(drm_framebuffer_remove);
 
 /**
  * drm_crtc_init - Initialise a new CRTC object
@@ -376,6 +436,7 @@ int drm_crtc_init(struct drm_device *dev, struct drm_crtc *crtc,
 
 	crtc->dev = dev;
 	crtc->funcs = funcs;
+	crtc->invert_dimensions = false;
 
 	mutex_lock(&dev->mode_config.mutex);
 
@@ -1030,11 +1091,7 @@ void drm_mode_config_cleanup(struct drm_device *dev)
 	}
 
 	list_for_each_entry_safe(fb, fbt, &dev->mode_config.fb_list, head) {
-		fb->funcs->destroy(fb);
-	}
-
-	list_for_each_entry_safe(crtc, ct, &dev->mode_config.crtc_list, head) {
-		crtc->funcs->destroy(crtc);
+		drm_framebuffer_remove(fb);
 	}
 
 	list_for_each_entry_safe(plane, plt, &dev->mode_config.plane_list,
@@ -1042,6 +1099,10 @@ void drm_mode_config_cleanup(struct drm_device *dev)
 		plane->funcs->destroy(plane);
 	}
 
+	list_for_each_entry_safe(crtc, ct, &dev->mode_config.crtc_list, head) {
+		crtc->funcs->destroy(crtc);
+	}
+
 	idr_remove_all(&dev->mode_config.crtc_idr);
 	idr_destroy(&dev->mode_config.crtc_idr);
 }
@@ -1851,6 +1912,7 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data,
 	DRM_DEBUG_KMS("[CRTC:%d]\n", crtc->base.id);
 
 	if (crtc_req->mode_valid) {
+		int hdisplay, vdisplay;
 		/* If we have a mode we need a framebuffer. */
 		/* If we pass -1, set the mode with the currently bound fb */
 		if (crtc_req->fb_id == -1) {
@@ -1886,14 +1948,20 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data,
 
 		drm_mode_set_crtcinfo(mode, CRTC_INTERLACE_HALVE_V);
 
-		if (mode->hdisplay > fb->width ||
-		    mode->vdisplay > fb->height ||
-		    crtc_req->x > fb->width - mode->hdisplay ||
-		    crtc_req->y > fb->height - mode->vdisplay) {
-			DRM_DEBUG_KMS("Invalid CRTC viewport %ux%u+%u+%u for fb size %ux%u.\n",
-				      mode->hdisplay, mode->vdisplay,
-				      crtc_req->x, crtc_req->y,
-				      fb->width, fb->height);
+		hdisplay = mode->hdisplay;
+		vdisplay = mode->vdisplay;
+
+		if (crtc->invert_dimensions)
+			swap(hdisplay, vdisplay);
+
+		if (hdisplay > fb->width ||
+		    vdisplay > fb->height ||
+		    crtc_req->x > fb->width - hdisplay ||
+		    crtc_req->y > fb->height - vdisplay) {
+			DRM_DEBUG_KMS("Invalid fb size %ux%u for CRTC viewport %ux%u+%d+%d%s.\n",
+				      fb->width, fb->height,
+				      hdisplay, vdisplay, crtc_req->x, crtc_req->y,
+				      crtc->invert_dimensions ? " (inverted)" : "");
 			ret = -ENOSPC;
 			goto out;
 		}
@@ -2168,6 +2236,8 @@ static int format_check(const struct drm_mode_fb_cmd2 *r)
 	case DRM_FORMAT_NV21:
 	case DRM_FORMAT_NV16:
 	case DRM_FORMAT_NV61:
+	case DRM_FORMAT_NV24:
+	case DRM_FORMAT_NV42:
 	case DRM_FORMAT_YUV410:
 	case DRM_FORMAT_YVU410:
 	case DRM_FORMAT_YUV411:
@@ -2334,11 +2404,7 @@ int drm_mode_rmfb(struct drm_device *dev,
 		goto out;
 	}
 
-	/* TODO release all crtc connected to the framebuffer */
-	/* TODO unhock the destructor from the buffer object */
-
-	list_del(&fb->filp_head);
-	fb->funcs->destroy(fb);
+	drm_framebuffer_remove(fb);
 
 out:
 	mutex_unlock(&dev->mode_config.mutex);
@@ -2488,8 +2554,7 @@ void drm_fb_release(struct drm_file *priv)
 
 	mutex_lock(&dev->mode_config.mutex);
 	list_for_each_entry_safe(fb, tfb, &priv->fbs, filp_head) {
-		list_del(&fb->filp_head);
-		fb->funcs->destroy(fb);
+		drm_framebuffer_remove(fb);
 	}
 	mutex_unlock(&dev->mode_config.mutex);
 }
@@ -3488,6 +3553,7 @@ int drm_mode_page_flip_ioctl(struct drm_device *dev,
 	struct drm_framebuffer *fb;
 	struct drm_pending_vblank_event *e = NULL;
 	unsigned long flags;
+	int hdisplay, vdisplay;
 	int ret = -EINVAL;
 
 	if (page_flip->flags & ~DRM_MODE_PAGE_FLIP_FLAGS ||
@@ -3517,14 +3583,19 @@ int drm_mode_page_flip_ioctl(struct drm_device *dev,
 		goto out;
 	fb = obj_to_fb(obj);
 
-	if (crtc->mode.hdisplay > fb->width ||
-	    crtc->mode.vdisplay > fb->height ||
-	    crtc->x > fb->width - crtc->mode.hdisplay ||
-	    crtc->y > fb->height - crtc->mode.vdisplay) {
-		DRM_DEBUG_KMS("Invalid fb size %ux%u for CRTC viewport %ux%u+%d+%d.\n",
-			      fb->width, fb->height,
-			      crtc->mode.hdisplay, crtc->mode.vdisplay,
-			      crtc->x, crtc->y);
+	hdisplay = crtc->mode.hdisplay;
+	vdisplay = crtc->mode.vdisplay;
+
+	if (crtc->invert_dimensions)
+		swap(hdisplay, vdisplay);
+
+	if (hdisplay > fb->width ||
+	    vdisplay > fb->height ||
+	    crtc->x > fb->width - hdisplay ||
+	    crtc->y > fb->height - vdisplay) {
+		DRM_DEBUG_KMS("Invalid fb size %ux%u for CRTC viewport %ux%u+%d+%d%s.\n",
+			      fb->width, fb->height, hdisplay, vdisplay, crtc->x, crtc->y,
+			      crtc->invert_dimensions ? " (inverted)" : "");
 		ret = -ENOSPC;
 		goto out;
 	}
@@ -3717,6 +3788,8 @@ int drm_format_num_planes(uint32_t format)
 	case DRM_FORMAT_NV21:
 	case DRM_FORMAT_NV16:
 	case DRM_FORMAT_NV61:
+	case DRM_FORMAT_NV24:
+	case DRM_FORMAT_NV42:
 		return 2;
 	default:
 		return 1;
@@ -3750,6 +3823,8 @@ int drm_format_plane_cpp(uint32_t format, int plane)
 	case DRM_FORMAT_NV21:
 	case DRM_FORMAT_NV16:
 	case DRM_FORMAT_NV61:
+	case DRM_FORMAT_NV24:
+	case DRM_FORMAT_NV42:
 		return plane ? 2 : 1;
 	case DRM_FORMAT_YUV410:
 	case DRM_FORMAT_YVU410:
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
index c8fdf03f32c2..be174cab105a 100644
--- a/drivers/gpu/drm/drm_drv.c
+++ b/drivers/gpu/drm/drm_drv.c
@@ -140,10 +140,10 @@ static struct drm_ioctl_desc drm_ioctls[] = {
 	DRM_IOCTL_DEF(DRM_IOCTL_PRIME_HANDLE_TO_FD, drm_prime_handle_to_fd_ioctl, DRM_AUTH|DRM_UNLOCKED),
 	DRM_IOCTL_DEF(DRM_IOCTL_PRIME_FD_TO_HANDLE, drm_prime_fd_to_handle_ioctl, DRM_AUTH|DRM_UNLOCKED),
 
-	DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPLANERESOURCES, drm_mode_getplane_res, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+	DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPLANERESOURCES, drm_mode_getplane_res, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
 	DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETCRTC, drm_mode_getcrtc, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
 	DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETCRTC, drm_mode_setcrtc, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
-	DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPLANE, drm_mode_getplane, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+	DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPLANE, drm_mode_getplane, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
 	DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETPLANE, drm_mode_setplane, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
 	DRM_IOCTL_DEF(DRM_IOCTL_MODE_CURSOR, drm_mode_cursor_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
 	DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETGAMMA, drm_mode_gamma_get_ioctl, DRM_UNLOCKED),
@@ -152,19 +152,19 @@ static struct drm_ioctl_desc drm_ioctls[] = {
 	DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETCONNECTOR, drm_mode_getconnector, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
 	DRM_IOCTL_DEF(DRM_IOCTL_MODE_ATTACHMODE, drm_mode_attachmode_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
 	DRM_IOCTL_DEF(DRM_IOCTL_MODE_DETACHMODE, drm_mode_detachmode_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
-	DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPROPERTY, drm_mode_getproperty_ioctl, DRM_MASTER | DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+	DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPROPERTY, drm_mode_getproperty_ioctl, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
 	DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETPROPERTY, drm_mode_connector_property_set_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
-	DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPROPBLOB, drm_mode_getblob_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+	DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPROPBLOB, drm_mode_getblob_ioctl, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
 	DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETFB, drm_mode_getfb, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
-	DRM_IOCTL_DEF(DRM_IOCTL_MODE_ADDFB, drm_mode_addfb, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
-	DRM_IOCTL_DEF(DRM_IOCTL_MODE_ADDFB2, drm_mode_addfb2, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
-	DRM_IOCTL_DEF(DRM_IOCTL_MODE_RMFB, drm_mode_rmfb, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+	DRM_IOCTL_DEF(DRM_IOCTL_MODE_ADDFB, drm_mode_addfb, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+	DRM_IOCTL_DEF(DRM_IOCTL_MODE_ADDFB2, drm_mode_addfb2, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+	DRM_IOCTL_DEF(DRM_IOCTL_MODE_RMFB, drm_mode_rmfb, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
 	DRM_IOCTL_DEF(DRM_IOCTL_MODE_PAGE_FLIP, drm_mode_page_flip_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
 	DRM_IOCTL_DEF(DRM_IOCTL_MODE_DIRTYFB, drm_mode_dirtyfb_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
-	DRM_IOCTL_DEF(DRM_IOCTL_MODE_CREATE_DUMB, drm_mode_create_dumb_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
-	DRM_IOCTL_DEF(DRM_IOCTL_MODE_MAP_DUMB, drm_mode_mmap_dumb_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
-	DRM_IOCTL_DEF(DRM_IOCTL_MODE_DESTROY_DUMB, drm_mode_destroy_dumb_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
-	DRM_IOCTL_DEF(DRM_IOCTL_MODE_OBJ_GETPROPERTIES, drm_mode_obj_get_properties_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+	DRM_IOCTL_DEF(DRM_IOCTL_MODE_CREATE_DUMB, drm_mode_create_dumb_ioctl, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+	DRM_IOCTL_DEF(DRM_IOCTL_MODE_MAP_DUMB, drm_mode_mmap_dumb_ioctl, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+	DRM_IOCTL_DEF(DRM_IOCTL_MODE_DESTROY_DUMB, drm_mode_destroy_dumb_ioctl, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+	DRM_IOCTL_DEF(DRM_IOCTL_MODE_OBJ_GETPROPERTIES, drm_mode_obj_get_properties_ioctl, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
 	DRM_IOCTL_DEF(DRM_IOCTL_MODE_OBJ_SETPROPERTY, drm_mode_obj_set_property_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
 };
 
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index a2e54769344a..5dda07cf7097 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -161,7 +161,7 @@ MODULE_PARM_DESC(edid_fixup,
  * Sanity check the EDID block (base or extension).  Return 0 if the block
  * doesn't check out, or 1 if it's valid.
  */
-bool drm_edid_block_valid(u8 *raw_edid, int block)
+bool drm_edid_block_valid(u8 *raw_edid, int block, bool print_bad_edid)
 {
 	int i;
 	u8 csum = 0;
@@ -184,7 +184,9 @@ bool drm_edid_block_valid(u8 *raw_edid, int block)
 	for (i = 0; i < EDID_LENGTH; i++)
 		csum += raw_edid[i];
 	if (csum) {
-		DRM_ERROR("EDID checksum is invalid, remainder is %d\n", csum);
+		if (print_bad_edid) {
+			DRM_ERROR("EDID checksum is invalid, remainder is %d\n", csum);
+		}
 
 		/* allow CEA to slide through, switches mangle this */
 		if (raw_edid[0] != 0x02)
@@ -210,7 +212,7 @@ bool drm_edid_block_valid(u8 *raw_edid, int block)
 	return 1;
 
 bad:
-	if (raw_edid) {
+	if (raw_edid && print_bad_edid) {
 		printk(KERN_ERR "Raw EDID:\n");
 		print_hex_dump(KERN_ERR, " \t", DUMP_PREFIX_NONE, 16, 1,
 			       raw_edid, EDID_LENGTH, false);
@@ -234,7 +236,7 @@ bool drm_edid_is_valid(struct edid *edid)
 		return false;
 
 	for (i = 0; i <= edid->extensions; i++)
-		if (!drm_edid_block_valid(raw + i * EDID_LENGTH, i))
+		if (!drm_edid_block_valid(raw + i * EDID_LENGTH, i, true))
 			return false;
 
 	return true;
@@ -257,6 +259,8 @@ drm_do_probe_ddc_edid(struct i2c_adapter *adapter, unsigned char *buf,
 		      int block, int len)
 {
 	unsigned char start = block * EDID_LENGTH;
+	unsigned char segment = block >> 1;
+	unsigned char xfers = segment ? 3 : 2;
 	int ret, retries = 5;
 
 	/* The core i2c driver will automatically retry the transfer if the
@@ -268,6 +272,11 @@ drm_do_probe_ddc_edid(struct i2c_adapter *adapter, unsigned char *buf,
 	do {
 		struct i2c_msg msgs[] = {
 			{
+				.addr	= DDC_SEGMENT_ADDR,
+				.flags	= 0,
+				.len	= 1,
+				.buf	= &segment,
+			}, {
 				.addr	= DDC_ADDR,
 				.flags	= 0,
 				.len	= 1,
@@ -279,15 +288,21 @@ drm_do_probe_ddc_edid(struct i2c_adapter *adapter, unsigned char *buf,
 				.buf	= buf,
 			}
 		};
-		ret = i2c_transfer(adapter, msgs, 2);
+
+	/*
+	 * Avoid sending the segment addr to not upset non-compliant ddc
+	 * monitors.
+	 */
+		ret = i2c_transfer(adapter, &msgs[3 - xfers], xfers);
+
 		if (ret == -ENXIO) {
 			DRM_DEBUG_KMS("drm: skipping non-existent adapter %s\n",
 					adapter->name);
 			break;
 		}
-	} while (ret != 2 && --retries);
+	} while (ret != xfers && --retries);
 
-	return ret == 2 ? 0 : -1;
+	return ret == xfers ? 0 : -1;
 }
 
 static bool drm_edid_is_zero(u8 *in_edid, int length)
@@ -306,6 +321,7 @@ drm_do_get_edid(struct drm_connector *connector, struct i2c_adapter *adapter)
 {
 	int i, j = 0, valid_extensions = 0;
 	u8 *block, *new;
+	bool print_bad_edid = !connector->bad_edid_counter || (drm_debug & DRM_UT_KMS);
 
 	if ((block = kmalloc(EDID_LENGTH, GFP_KERNEL)) == NULL)
 		return NULL;
@@ -314,7 +330,7 @@ drm_do_get_edid(struct drm_connector *connector, struct i2c_adapter *adapter)
 	for (i = 0; i < 4; i++) {
 		if (drm_do_probe_ddc_edid(adapter, block, 0, EDID_LENGTH))
 			goto out;
-		if (drm_edid_block_valid(block, 0))
+		if (drm_edid_block_valid(block, 0, print_bad_edid))
 			break;
 		if (i == 0 && drm_edid_is_zero(block, EDID_LENGTH)) {
 			connector->null_edid_counter++;
@@ -339,7 +355,7 @@ drm_do_get_edid(struct drm_connector *connector, struct i2c_adapter *adapter)
 				  block + (valid_extensions + 1) * EDID_LENGTH,
 				  j, EDID_LENGTH))
 				goto out;
-			if (drm_edid_block_valid(block + (valid_extensions + 1) * EDID_LENGTH, j)) {
+			if (drm_edid_block_valid(block + (valid_extensions + 1) * EDID_LENGTH, j, print_bad_edid)) {
 				valid_extensions++;
 				break;
 			}
@@ -362,8 +378,11 @@ drm_do_get_edid(struct drm_connector *connector, struct i2c_adapter *adapter)
 	return block;
 
 carp:
-	dev_warn(connector->dev->dev, "%s: EDID block %d invalid.\n",
-		 drm_get_connector_name(connector), j);
+	if (print_bad_edid) {
+		dev_warn(connector->dev->dev, "%s: EDID block %d invalid.\n",
+			 drm_get_connector_name(connector), j);
+	}
+	connector->bad_edid_counter++;
 
 out:
 	kfree(block);
@@ -402,10 +421,7 @@ struct edid *drm_get_edid(struct drm_connector *connector,
 	if (drm_probe_ddc(adapter))
 		edid = (struct edid *)drm_do_get_edid(connector, adapter);
 
-	connector->display_info.raw_edid = (char *)edid;
-
 	return edid;
-
 }
 EXPORT_SYMBOL(drm_get_edid);
 
@@ -1523,16 +1539,57 @@ do_cea_modes (struct drm_connector *connector, u8 *db, u8 len)
 }
 
 static int
+cea_db_payload_len(const u8 *db)
+{
+	return db[0] & 0x1f;
+}
+
+static int
+cea_db_tag(const u8 *db)
+{
+	return db[0] >> 5;
+}
+
+static int
+cea_revision(const u8 *cea)
+{
+	return cea[1];
+}
+
+static int
+cea_db_offsets(const u8 *cea, int *start, int *end)
+{
+	/* Data block offset in CEA extension block */
+	*start = 4;
+	*end = cea[2];
+	if (*end == 0)
+		*end = 127;
+	if (*end < 4 || *end > 127)
+		return -ERANGE;
+	return 0;
+}
+
+#define for_each_cea_db(cea, i, start, end) \
+	for ((i) = (start); (i) < (end) && (i) + cea_db_payload_len(&(cea)[(i)]) < (end); (i) += cea_db_payload_len(&(cea)[(i)]) + 1)
+
+static int
 add_cea_modes(struct drm_connector *connector, struct edid *edid)
 {
 	u8 * cea = drm_find_cea_extension(edid);
 	u8 * db, dbl;
 	int modes = 0;
 
-	if (cea && cea[1] >= 3) {
-		for (db = cea + 4; db < cea + cea[2]; db += dbl + 1) {
-			dbl = db[0] & 0x1f;
-			if (((db[0] & 0xe0) >> 5) == VIDEO_BLOCK)
+	if (cea && cea_revision(cea) >= 3) {
+		int i, start, end;
+
+		if (cea_db_offsets(cea, &start, &end))
+			return 0;
+
+		for_each_cea_db(cea, i, start, end) {
+			db = &cea[i];
+			dbl = cea_db_payload_len(db);
+
+			if (cea_db_tag(db) == VIDEO_BLOCK)
 				modes += do_cea_modes (connector, db+1, dbl);
 		}
 	}
@@ -1541,19 +1598,28 @@ add_cea_modes(struct drm_connector *connector, struct edid *edid)
 }
 
 static void
-parse_hdmi_vsdb(struct drm_connector *connector, uint8_t *db)
+parse_hdmi_vsdb(struct drm_connector *connector, const u8 *db)
 {
-	connector->eld[5] |= (db[6] >> 7) << 1;  /* Supports_AI */
-
-	connector->dvi_dual = db[6] & 1;
-	connector->max_tmds_clock = db[7] * 5;
+	u8 len = cea_db_payload_len(db);
 
-	connector->latency_present[0] = db[8] >> 7;
-	connector->latency_present[1] = (db[8] >> 6) & 1;
-	connector->video_latency[0] = db[9];
-	connector->audio_latency[0] = db[10];
-	connector->video_latency[1] = db[11];
-	connector->audio_latency[1] = db[12];
+	if (len >= 6) {
+		connector->eld[5] |= (db[6] >> 7) << 1;  /* Supports_AI */
+		connector->dvi_dual = db[6] & 1;
+	}
+	if (len >= 7)
+		connector->max_tmds_clock = db[7] * 5;
+	if (len >= 8) {
+		connector->latency_present[0] = db[8] >> 7;
+		connector->latency_present[1] = (db[8] >> 6) & 1;
+	}
+	if (len >= 9)
+		connector->video_latency[0] = db[9];
+	if (len >= 10)
+		connector->audio_latency[0] = db[10];
+	if (len >= 11)
+		connector->video_latency[1] = db[11];
+	if (len >= 12)
+		connector->audio_latency[1] = db[12];
 
 	DRM_LOG_KMS("HDMI: DVI dual %d, "
 		    "max TMDS clock %d, "
@@ -1577,6 +1643,21 @@ monitor_name(struct detailed_timing *t, void *data)
 		*(u8 **)data = t->data.other_data.data.str.str;
 }
 
+static bool cea_db_is_hdmi_vsdb(const u8 *db)
+{
+	int hdmi_id;
+
+	if (cea_db_tag(db) != VENDOR_BLOCK)
+		return false;
+
+	if (cea_db_payload_len(db) < 5)
+		return false;
+
+	hdmi_id = db[1] | (db[2] << 8) | (db[3] << 16);
+
+	return hdmi_id == HDMI_IDENTIFIER;
+}
+
 /**
  * drm_edid_to_eld - build ELD from EDID
  * @connector: connector corresponding to the HDMI/DP sink
@@ -1623,29 +1704,40 @@ void drm_edid_to_eld(struct drm_connector *connector, struct edid *edid)
 	eld[18] = edid->prod_code[0];
 	eld[19] = edid->prod_code[1];
 
-	if (cea[1] >= 3)
-		for (db = cea + 4; db < cea + cea[2]; db += dbl + 1) {
-			dbl = db[0] & 0x1f;
-			
-			switch ((db[0] & 0xe0) >> 5) {
+	if (cea_revision(cea) >= 3) {
+		int i, start, end;
+
+		if (cea_db_offsets(cea, &start, &end)) {
+			start = 0;
+			end = 0;
+		}
+
+		for_each_cea_db(cea, i, start, end) {
+			db = &cea[i];
+			dbl = cea_db_payload_len(db);
+
+			switch (cea_db_tag(db)) {
 			case AUDIO_BLOCK:
 				/* Audio Data Block, contains SADs */
 				sad_count = dbl / 3;
-				memcpy(eld + 20 + mnl, &db[1], dbl);
+				if (dbl >= 1)
+					memcpy(eld + 20 + mnl, &db[1], dbl);
 				break;
 			case SPEAKER_BLOCK:
-                                /* Speaker Allocation Data Block */
-				eld[7] = db[1];
+				/* Speaker Allocation Data Block */
+				if (dbl >= 1)
+					eld[7] = db[1];
 				break;
 			case VENDOR_BLOCK:
 				/* HDMI Vendor-Specific Data Block */
-				if (db[1] == 0x03 && db[2] == 0x0c && db[3] == 0)
+				if (cea_db_is_hdmi_vsdb(db))
 					parse_hdmi_vsdb(connector, db);
 				break;
 			default:
 				break;
 			}
 		}
+	}
 	eld[5] |= sad_count << 4;
 	eld[2] = (20 + mnl + sad_count * 3 + 3) / 4;
 
@@ -1723,38 +1815,26 @@ EXPORT_SYMBOL(drm_select_eld);
 bool drm_detect_hdmi_monitor(struct edid *edid)
 {
 	u8 *edid_ext;
-	int i, hdmi_id;
+	int i;
 	int start_offset, end_offset;
-	bool is_hdmi = false;
 
 	edid_ext = drm_find_cea_extension(edid);
 	if (!edid_ext)
-		goto end;
+		return false;
 
-	/* Data block offset in CEA extension block */
-	start_offset = 4;
-	end_offset = edid_ext[2];
+	if (cea_db_offsets(edid_ext, &start_offset, &end_offset))
+		return false;
 
 	/*
 	 * Because HDMI identifier is in Vendor Specific Block,
 	 * search it from all data blocks of CEA extension.
 	 */
-	for (i = start_offset; i < end_offset;
-		/* Increased by data block len */
-		i += ((edid_ext[i] & 0x1f) + 1)) {
-		/* Find vendor specific block */
-		if ((edid_ext[i] >> 5) == VENDOR_BLOCK) {
-			hdmi_id = edid_ext[i + 1] | (edid_ext[i + 2] << 8) |
-				  edid_ext[i + 3] << 16;
-			/* Find HDMI identifier */
-			if (hdmi_id == HDMI_IDENTIFIER)
-				is_hdmi = true;
-			break;
-		}
+	for_each_cea_db(edid_ext, i, start_offset, end_offset) {
+		if (cea_db_is_hdmi_vsdb(&edid_ext[i]))
+			return true;
 	}
 
-end:
-	return is_hdmi;
+	return false;
 }
 EXPORT_SYMBOL(drm_detect_hdmi_monitor);
 
@@ -1786,15 +1866,13 @@ bool drm_detect_monitor_audio(struct edid *edid)
 		goto end;
 	}
 
-	/* Data block offset in CEA extension block */
-	start_offset = 4;
-	end_offset = edid_ext[2];
+	if (cea_db_offsets(edid_ext, &start_offset, &end_offset))
+		goto end;
 
-	for (i = start_offset; i < end_offset;
-			i += ((edid_ext[i] & 0x1f) + 1)) {
-		if ((edid_ext[i] >> 5) == AUDIO_BLOCK) {
+	for_each_cea_db(edid_ext, i, start_offset, end_offset) {
+		if (cea_db_tag(&edid_ext[i]) == AUDIO_BLOCK) {
 			has_audio = true;
-			for (j = 1; j < (edid_ext[i] & 0x1f); j += 3)
+			for (j = 1; j < cea_db_payload_len(&edid_ext[i]) + 1; j += 3)
 				DRM_DEBUG_KMS("CEA audio format %d\n",
 					      (edid_ext[i + j] >> 3) & 0xf);
 			goto end;
diff --git a/drivers/gpu/drm/drm_edid_load.c b/drivers/gpu/drm/drm_edid_load.c
index 9d53e6503f9a..38d3943f72de 100644
--- a/drivers/gpu/drm/drm_edid_load.c
+++ b/drivers/gpu/drm/drm_edid_load.c
@@ -114,8 +114,8 @@ static u8 generic_edid[GENERIC_EDIDS][128] = {
 	},
 };
 
-static int edid_load(struct drm_connector *connector, char *name,
-		     char *connector_name)
+static u8 *edid_load(struct drm_connector *connector, char *name,
+			char *connector_name)
 {
 	const struct firmware *fw;
 	struct platform_device *pdev;
@@ -123,6 +123,7 @@ static int edid_load(struct drm_connector *connector, char *name,
 	int fwsize, expected;
 	int builtin = 0, err = 0;
 	int i, valid_extensions = 0;
+	bool print_bad_edid = !connector->bad_edid_counter || (drm_debug & DRM_UT_KMS);
 
 	pdev = platform_device_register_simple(connector_name, -1, NULL, 0);
 	if (IS_ERR(pdev)) {
@@ -173,7 +174,8 @@ static int edid_load(struct drm_connector *connector, char *name,
 	}
 	memcpy(edid, fwdata, fwsize);
 
-	if (!drm_edid_block_valid(edid, 0)) {
+	if (!drm_edid_block_valid(edid, 0, print_bad_edid)) {
+		connector->bad_edid_counter++;
 		DRM_ERROR("Base block of EDID firmware \"%s\" is invalid ",
 		    name);
 		kfree(edid);
@@ -185,7 +187,7 @@ static int edid_load(struct drm_connector *connector, char *name,
 		if (i != valid_extensions + 1)
 			memcpy(edid + (valid_extensions + 1) * EDID_LENGTH,
 			    edid + i * EDID_LENGTH, EDID_LENGTH);
-		if (drm_edid_block_valid(edid + i * EDID_LENGTH, i))
+		if (drm_edid_block_valid(edid + i * EDID_LENGTH, i, print_bad_edid))
 			valid_extensions++;
 	}
 
@@ -205,7 +207,6 @@ static int edid_load(struct drm_connector *connector, char *name,
 		edid = new_edid;
 	}
 
-	connector->display_info.raw_edid = edid;
 	DRM_INFO("Got %s EDID base block and %d extension%s from "
 	    "\"%s\" for connector \"%s\"\n", builtin ? "built-in" :
 	    "external", valid_extensions, valid_extensions == 1 ? "" : "s",
@@ -215,7 +216,10 @@ relfw_out:
 	release_firmware(fw);
 
 out:
-	return err;
+	if (err)
+		return ERR_PTR(err);
+
+	return edid;
 }
 
 int drm_load_edid_firmware(struct drm_connector *connector)
@@ -223,6 +227,7 @@ int drm_load_edid_firmware(struct drm_connector *connector)
 	char *connector_name = drm_get_connector_name(connector);
 	char *edidname = edid_firmware, *last, *colon;
 	int ret;
+	struct edid *edid;
 
 	if (*edidname == '\0')
 		return 0;
@@ -240,13 +245,13 @@ int drm_load_edid_firmware(struct drm_connector *connector)
 	if (*last == '\n')
 		*last = '\0';
 
-	ret = edid_load(connector, edidname, connector_name);
-	if (ret)
+	edid = (struct edid *) edid_load(connector, edidname, connector_name);
+	if (IS_ERR_OR_NULL(edid))
 		return 0;
 
-	drm_mode_connector_update_edid_property(connector,
-	    (struct edid *) connector->display_info.raw_edid);
+	drm_mode_connector_update_edid_property(connector, edid);
+	ret = drm_add_edid_modes(connector, edid);
+	kfree(edid);
 
-	return drm_add_edid_modes(connector, (struct edid *)
-	    connector->display_info.raw_edid);
+	return ret;
 }
diff --git a/drivers/gpu/drm/drm_edid_modes.h b/drivers/gpu/drm/drm_edid_modes.h
index fbd354c1f1f4..5dbf7d2557b4 100644
--- a/drivers/gpu/drm/drm_edid_modes.h
+++ b/drivers/gpu/drm/drm_edid_modes.h
@@ -89,7 +89,7 @@ static const struct drm_display_mode drm_dmt_modes[] = {
 		   976, 1088, 0, 480, 486, 494, 517, 0,
 		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
 	/* 1024x768@43Hz, interlace */
-	{ DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 44900, 1024, 1032,
+	{ DRM_MODE("1024x768i", DRM_MODE_TYPE_DRIVER, 44900, 1024, 1032,
 		   1208, 1264, 0, 768, 768, 772, 817, 0,
 		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
 			DRM_MODE_FLAG_INTERLACE) },
@@ -395,7 +395,7 @@ static const struct drm_display_mode edid_est_modes[] = {
 	{ DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 65000, 1024, 1048,
 		   1184, 1344, 0,  768, 771, 777, 806, 0,
 		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 1024x768@60Hz */
-	{ DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER,44900, 1024, 1032,
+	{ DRM_MODE("1024x768i", DRM_MODE_TYPE_DRIVER,44900, 1024, 1032,
 		   1208, 1264, 0, 768, 768, 776, 817, 0,
 		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_INTERLACE) }, /* 1024x768@43Hz */
 	{ DRM_MODE("832x624", DRM_MODE_TYPE_DRIVER, 57284, 832, 864,
@@ -506,17 +506,17 @@ static const struct drm_display_mode edid_cea_modes[] = {
 		   1430, 1650, 0, 720, 725, 730, 750, 0,
 		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
 	/* 5 - 1920x1080i@60Hz */
-	{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2008,
+	{ DRM_MODE("1920x1080i", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2008,
 		   2052, 2200, 0, 1080, 1084, 1094, 1125, 0,
 		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
 			DRM_MODE_FLAG_INTERLACE) },
 	/* 6 - 1440x480i@60Hz */
-	{ DRM_MODE("1440x480", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1478,
+	{ DRM_MODE("1440x480i", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1478,
 		   1602, 1716, 0, 480, 488, 494, 525, 0,
 		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
 			DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK) },
 	/* 7 - 1440x480i@60Hz */
-	{ DRM_MODE("1440x480", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1478,
+	{ DRM_MODE("1440x480i", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1478,
 		   1602, 1716, 0, 480, 488, 494, 525, 0,
 		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
 			DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK) },
@@ -531,12 +531,12 @@ static const struct drm_display_mode edid_cea_modes[] = {
 		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
 			DRM_MODE_FLAG_DBLCLK) },
 	/* 10 - 2880x480i@60Hz */
-	{ DRM_MODE("2880x480", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2956,
+	{ DRM_MODE("2880x480i", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2956,
 		   3204, 3432, 0, 480, 488, 494, 525, 0,
 		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
 			DRM_MODE_FLAG_INTERLACE) },
 	/* 11 - 2880x480i@60Hz */
-	{ DRM_MODE("2880x480", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2956,
+	{ DRM_MODE("2880x480i", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2956,
 		   3204, 3432, 0, 480, 488, 494, 525, 0,
 		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
 			DRM_MODE_FLAG_INTERLACE) },
@@ -573,17 +573,17 @@ static const struct drm_display_mode edid_cea_modes[] = {
 		   1760, 1980, 0, 720, 725, 730, 750, 0,
 		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
 	/* 20 - 1920x1080i@50Hz */
-	{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2448,
+	{ DRM_MODE("1920x1080i", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2448,
 		   2492, 2640, 0, 1080, 1084, 1094, 1125, 0,
 		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
 			DRM_MODE_FLAG_INTERLACE) },
 	/* 21 - 1440x576i@50Hz */
-	{ DRM_MODE("1440x576", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1464,
+	{ DRM_MODE("1440x576i", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1464,
 		   1590, 1728, 0, 576, 580, 586, 625, 0,
 		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
 			DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK) },
 	/* 22 - 1440x576i@50Hz */
-	{ DRM_MODE("1440x576", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1464,
+	{ DRM_MODE("1440x576i", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1464,
 		   1590, 1728, 0, 576, 580, 586, 625, 0,
 		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
 			DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK) },
@@ -598,12 +598,12 @@ static const struct drm_display_mode edid_cea_modes[] = {
 		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
 			DRM_MODE_FLAG_DBLCLK) },
 	/* 25 - 2880x576i@50Hz */
-	{ DRM_MODE("2880x576", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2928,
+	{ DRM_MODE("2880x576i", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2928,
 		   3180, 3456, 0, 576, 580, 586, 625, 0,
 		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
 			DRM_MODE_FLAG_INTERLACE) },
 	/* 26 - 2880x576i@50Hz */
-	{ DRM_MODE("2880x576", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2928,
+	{ DRM_MODE("2880x576i", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2928,
 		   3180, 3456, 0, 576, 580, 586, 625, 0,
 		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
 			DRM_MODE_FLAG_INTERLACE) },
@@ -656,12 +656,12 @@ static const struct drm_display_mode edid_cea_modes[] = {
 		   3184, 3456, 0, 576, 581, 586, 625, 0,
 		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
 	/* 39 - 1920x1080i@50Hz */
-	{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 72000, 1920, 1952,
+	{ DRM_MODE("1920x1080i", DRM_MODE_TYPE_DRIVER, 72000, 1920, 1952,
 		   2120, 2304, 0, 1080, 1126, 1136, 1250, 0,
 		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC |
 			DRM_MODE_FLAG_INTERLACE) },
 	/* 40 - 1920x1080i@100Hz */
-	{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2448,
+	{ DRM_MODE("1920x1080i", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2448,
 		   2492, 2640, 0, 1080, 1084, 1094, 1125, 0,
 		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
 			DRM_MODE_FLAG_INTERLACE) },
@@ -688,7 +688,7 @@ static const struct drm_display_mode edid_cea_modes[] = {
 		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
 			DRM_MODE_FLAG_DBLCLK) },
 	/* 46 - 1920x1080i@120Hz */
-	{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2008,
+	{ DRM_MODE("1920x1080i", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2008,
 		   2052, 2200, 0, 1080, 1084, 1094, 1125, 0,
 		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
 			DRM_MODE_FLAG_INTERLACE) },
@@ -705,12 +705,12 @@ static const struct drm_display_mode edid_cea_modes[] = {
 		   798, 858, 0, 480, 489, 495, 525, 0,
 		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
 	/* 50 - 1440x480i@120Hz */
-	{ DRM_MODE("1440x480", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1478,
+	{ DRM_MODE("1440x480i", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1478,
 		   1602, 1716, 0, 480, 488, 494, 525, 0,
 		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
 			DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK) },
 	/* 51 - 1440x480i@120Hz */
-	{ DRM_MODE("1440x480", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1478,
+	{ DRM_MODE("1440x480i", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1478,
 		   1602, 1716, 0, 480, 488, 494, 525, 0,
 		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
 			DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK) },
@@ -723,12 +723,12 @@ static const struct drm_display_mode edid_cea_modes[] = {
 		   796, 864, 0, 576, 581, 586, 625, 0,
 		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
 	/* 54 - 1440x576i@200Hz */
-	{ DRM_MODE("1440x576", DRM_MODE_TYPE_DRIVER, 108000, 1440, 1464,
+	{ DRM_MODE("1440x576i", DRM_MODE_TYPE_DRIVER, 108000, 1440, 1464,
 		   1590, 1728, 0, 576, 580, 586, 625, 0,
 		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
 			DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK) },
 	/* 55 - 1440x576i@200Hz */
-	{ DRM_MODE("1440x576", DRM_MODE_TYPE_DRIVER, 108000, 1440, 1464,
+	{ DRM_MODE("1440x576i", DRM_MODE_TYPE_DRIVER, 108000, 1440, 1464,
 		   1590, 1728, 0, 576, 580, 586, 625, 0,
 		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
 			DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK) },
@@ -741,12 +741,12 @@ static const struct drm_display_mode edid_cea_modes[] = {
 		   798, 858, 0, 480, 489, 495, 525, 0,
 		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
 	/* 58 - 1440x480i@240 */
-	{ DRM_MODE("1440x480", DRM_MODE_TYPE_DRIVER, 108000, 1440, 1478,
+	{ DRM_MODE("1440x480i", DRM_MODE_TYPE_DRIVER, 108000, 1440, 1478,
 		   1602, 1716, 0, 480, 488, 494, 525, 0,
 		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
 			DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK) },
 	/* 59 - 1440x480i@240 */
-	{ DRM_MODE("1440x480", DRM_MODE_TYPE_DRIVER, 108000, 1440, 1478,
+	{ DRM_MODE("1440x480i", DRM_MODE_TYPE_DRIVER, 108000, 1440, 1478,
 		   1602, 1716, 0, 480, 488, 494, 525, 0,
 		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
 			DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK) },
diff --git a/drivers/gpu/drm/drm_fb_cma_helper.c b/drivers/gpu/drm/drm_fb_cma_helper.c
new file mode 100644
index 000000000000..09e11a5d921a
--- /dev/null
+++ b/drivers/gpu/drm/drm_fb_cma_helper.c
@@ -0,0 +1,406 @@
+/*
+ * drm kms/fb cma (contiguous memory allocator) helper functions
+ *
+ * Copyright (C) 2012 Analog Device Inc.
+ *   Author: Lars-Peter Clausen <lars@metafoo.de>
+ *
+ * Based on udl_fbdev.c
+ *  Copyright (C) 2012 Red Hat
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <drm/drmP.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_fb_helper.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_fb_cma_helper.h>
+#include <linux/module.h>
+
+struct drm_fb_cma {
+	struct drm_framebuffer		fb;
+	struct drm_gem_cma_object	*obj[4];
+};
+
+struct drm_fbdev_cma {
+	struct drm_fb_helper	fb_helper;
+	struct drm_fb_cma	*fb;
+};
+
+static inline struct drm_fbdev_cma *to_fbdev_cma(struct drm_fb_helper *helper)
+{
+	return container_of(helper, struct drm_fbdev_cma, fb_helper);
+}
+
+static inline struct drm_fb_cma *to_fb_cma(struct drm_framebuffer *fb)
+{
+	return container_of(fb, struct drm_fb_cma, fb);
+}
+
+static void drm_fb_cma_destroy(struct drm_framebuffer *fb)
+{
+	struct drm_fb_cma *fb_cma = to_fb_cma(fb);
+	int i;
+
+	for (i = 0; i < 4; i++) {
+		if (fb_cma->obj[i])
+			drm_gem_object_unreference_unlocked(&fb_cma->obj[i]->base);
+	}
+
+	drm_framebuffer_cleanup(fb);
+	kfree(fb_cma);
+}
+
+static int drm_fb_cma_create_handle(struct drm_framebuffer *fb,
+	struct drm_file *file_priv, unsigned int *handle)
+{
+	struct drm_fb_cma *fb_cma = to_fb_cma(fb);
+
+	return drm_gem_handle_create(file_priv,
+			&fb_cma->obj[0]->base, handle);
+}
+
+static struct drm_framebuffer_funcs drm_fb_cma_funcs = {
+	.destroy	= drm_fb_cma_destroy,
+	.create_handle	= drm_fb_cma_create_handle,
+};
+
+static struct drm_fb_cma *drm_fb_cma_alloc(struct drm_device *dev,
+	struct drm_mode_fb_cmd2 *mode_cmd, struct drm_gem_cma_object **obj,
+	unsigned int num_planes)
+{
+	struct drm_fb_cma *fb_cma;
+	int ret;
+	int i;
+
+	fb_cma = kzalloc(sizeof(*fb_cma), GFP_KERNEL);
+	if (!fb_cma)
+		return ERR_PTR(-ENOMEM);
+
+	ret = drm_framebuffer_init(dev, &fb_cma->fb, &drm_fb_cma_funcs);
+	if (ret) {
+		dev_err(dev->dev, "Failed to initalize framebuffer: %d\n", ret);
+		kfree(fb_cma);
+		return ERR_PTR(ret);
+	}
+
+	drm_helper_mode_fill_fb_struct(&fb_cma->fb, mode_cmd);
+
+	for (i = 0; i < num_planes; i++)
+		fb_cma->obj[i] = obj[i];
+
+	return fb_cma;
+}
+
+/**
+ * drm_fb_cma_create() - (struct drm_mode_config_funcs *)->fb_create callback function
+ *
+ * If your hardware has special alignment or pitch requirements these should be
+ * checked before calling this function.
+ */
+struct drm_framebuffer *drm_fb_cma_create(struct drm_device *dev,
+	struct drm_file *file_priv, struct drm_mode_fb_cmd2 *mode_cmd)
+{
+	struct drm_fb_cma *fb_cma;
+	struct drm_gem_cma_object *objs[4];
+	struct drm_gem_object *obj;
+	unsigned int hsub;
+	unsigned int vsub;
+	int ret;
+	int i;
+
+	hsub = drm_format_horz_chroma_subsampling(mode_cmd->pixel_format);
+	vsub = drm_format_vert_chroma_subsampling(mode_cmd->pixel_format);
+
+	for (i = 0; i < drm_format_num_planes(mode_cmd->pixel_format); i++) {
+		unsigned int width = mode_cmd->width / (i ? hsub : 1);
+		unsigned int height = mode_cmd->height / (i ? vsub : 1);
+		unsigned int min_size;
+
+		obj = drm_gem_object_lookup(dev, file_priv, mode_cmd->handles[i]);
+		if (!obj) {
+			dev_err(dev->dev, "Failed to lookup GEM object\n");
+			ret = -ENXIO;
+			goto err_gem_object_unreference;
+		}
+
+		min_size = (height - 1) * mode_cmd->pitches[i]
+			 + width * drm_format_plane_cpp(mode_cmd->pixel_format, i)
+			 + mode_cmd->offsets[i];
+
+		if (obj->size < min_size) {
+			drm_gem_object_unreference_unlocked(obj);
+			ret = -EINVAL;
+			goto err_gem_object_unreference;
+		}
+		objs[i] = to_drm_gem_cma_obj(obj);
+	}
+
+	fb_cma = drm_fb_cma_alloc(dev, mode_cmd, objs, i);
+	if (IS_ERR(fb_cma)) {
+		ret = PTR_ERR(fb_cma);
+		goto err_gem_object_unreference;
+	}
+
+	return &fb_cma->fb;
+
+err_gem_object_unreference:
+	for (i--; i >= 0; i--)
+		drm_gem_object_unreference_unlocked(&objs[i]->base);
+	return ERR_PTR(ret);
+}
+EXPORT_SYMBOL_GPL(drm_fb_cma_create);
+
+/**
+ * drm_fb_cma_get_gem_obj() - Get CMA GEM object for framebuffer
+ * @fb: The framebuffer
+ * @plane: Which plane
+ *
+ * Return the CMA GEM object for given framebuffer.
+ *
+ * This function will usually be called from the CRTC callback functions.
+ */
+struct drm_gem_cma_object *drm_fb_cma_get_gem_obj(struct drm_framebuffer *fb,
+	unsigned int plane)
+{
+	struct drm_fb_cma *fb_cma = to_fb_cma(fb);
+
+	if (plane >= 4)
+		return NULL;
+
+	return fb_cma->obj[plane];
+}
+EXPORT_SYMBOL_GPL(drm_fb_cma_get_gem_obj);
+
+static struct fb_ops drm_fbdev_cma_ops = {
+	.owner		= THIS_MODULE,
+	.fb_fillrect	= sys_fillrect,
+	.fb_copyarea	= sys_copyarea,
+	.fb_imageblit	= sys_imageblit,
+	.fb_check_var	= drm_fb_helper_check_var,
+	.fb_set_par	= drm_fb_helper_set_par,
+	.fb_blank	= drm_fb_helper_blank,
+	.fb_pan_display	= drm_fb_helper_pan_display,
+	.fb_setcmap	= drm_fb_helper_setcmap,
+};
+
+static int drm_fbdev_cma_create(struct drm_fb_helper *helper,
+	struct drm_fb_helper_surface_size *sizes)
+{
+	struct drm_fbdev_cma *fbdev_cma = to_fbdev_cma(helper);
+	struct drm_mode_fb_cmd2 mode_cmd = { 0 };
+	struct drm_device *dev = helper->dev;
+	struct drm_gem_cma_object *obj;
+	struct drm_framebuffer *fb;
+	unsigned int bytes_per_pixel;
+	unsigned long offset;
+	struct fb_info *fbi;
+	size_t size;
+	int ret;
+
+	DRM_DEBUG_KMS("surface width(%d), height(%d) and bpp(%d\n",
+			sizes->surface_width, sizes->surface_height,
+			sizes->surface_bpp);
+
+	bytes_per_pixel = DIV_ROUND_UP(sizes->surface_bpp, 8);
+
+	mode_cmd.width = sizes->surface_width;
+	mode_cmd.height = sizes->surface_height;
+	mode_cmd.pitches[0] = sizes->surface_width * bytes_per_pixel;
+	mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp,
+		sizes->surface_depth);
+
+	size = mode_cmd.pitches[0] * mode_cmd.height;
+	obj = drm_gem_cma_create(dev, size);
+	if (!obj)
+		return -ENOMEM;
+
+	fbi = framebuffer_alloc(0, dev->dev);
+	if (!fbi) {
+		dev_err(dev->dev, "Failed to allocate framebuffer info.\n");
+		ret = -ENOMEM;
+		goto err_drm_gem_cma_free_object;
+	}
+
+	fbdev_cma->fb = drm_fb_cma_alloc(dev, &mode_cmd, &obj, 1);
+	if (IS_ERR(fbdev_cma->fb)) {
+		dev_err(dev->dev, "Failed to allocate DRM framebuffer.\n");
+		ret = PTR_ERR(fbdev_cma->fb);
+		goto err_framebuffer_release;
+	}
+
+	fb = &fbdev_cma->fb->fb;
+	helper->fb = fb;
+	helper->fbdev = fbi;
+
+	fbi->par = helper;
+	fbi->flags = FBINFO_FLAG_DEFAULT;
+	fbi->fbops = &drm_fbdev_cma_ops;
+
+	ret = fb_alloc_cmap(&fbi->cmap, 256, 0);
+	if (ret) {
+		dev_err(dev->dev, "Failed to allocate color map.\n");
+		goto err_drm_fb_cma_destroy;
+	}
+
+	drm_fb_helper_fill_fix(fbi, fb->pitches[0], fb->depth);
+	drm_fb_helper_fill_var(fbi, helper, fb->width, fb->height);
+
+	offset = fbi->var.xoffset * bytes_per_pixel;
+	offset += fbi->var.yoffset * fb->pitches[0];
+
+	dev->mode_config.fb_base = (resource_size_t)obj->paddr;
+	fbi->screen_base = obj->vaddr + offset;
+	fbi->fix.smem_start = (unsigned long)(obj->paddr + offset);
+	fbi->screen_size = size;
+	fbi->fix.smem_len = size;
+
+	return 0;
+
+err_drm_fb_cma_destroy:
+	drm_fb_cma_destroy(fb);
+err_framebuffer_release:
+	framebuffer_release(fbi);
+err_drm_gem_cma_free_object:
+	drm_gem_cma_free_object(&obj->base);
+	return ret;
+}
+
+static int drm_fbdev_cma_probe(struct drm_fb_helper *helper,
+	struct drm_fb_helper_surface_size *sizes)
+{
+	int ret = 0;
+
+	if (!helper->fb) {
+		ret = drm_fbdev_cma_create(helper, sizes);
+		if (ret < 0)
+			return ret;
+		ret = 1;
+	}
+
+	return ret;
+}
+
+static struct drm_fb_helper_funcs drm_fb_cma_helper_funcs = {
+	.fb_probe = drm_fbdev_cma_probe,
+};
+
+/**
+ * drm_fbdev_cma_init() - Allocate and initializes a drm_fbdev_cma struct
+ * @dev: DRM device
+ * @preferred_bpp: Preferred bits per pixel for the device
+ * @num_crtc: Number of CRTCs
+ * @max_conn_count: Maximum number of connectors
+ *
+ * Returns a newly allocated drm_fbdev_cma struct or a ERR_PTR.
+ */
+struct drm_fbdev_cma *drm_fbdev_cma_init(struct drm_device *dev,
+	unsigned int preferred_bpp, unsigned int num_crtc,
+	unsigned int max_conn_count)
+{
+	struct drm_fbdev_cma *fbdev_cma;
+	struct drm_fb_helper *helper;
+	int ret;
+
+	fbdev_cma = kzalloc(sizeof(*fbdev_cma), GFP_KERNEL);
+	if (!fbdev_cma) {
+		dev_err(dev->dev, "Failed to allocate drm fbdev.\n");
+		return ERR_PTR(-ENOMEM);
+	}
+
+	fbdev_cma->fb_helper.funcs = &drm_fb_cma_helper_funcs;
+	helper = &fbdev_cma->fb_helper;
+
+	ret = drm_fb_helper_init(dev, helper, num_crtc, max_conn_count);
+	if (ret < 0) {
+		dev_err(dev->dev, "Failed to initialize drm fb helper.\n");
+		goto err_free;
+	}
+
+	ret = drm_fb_helper_single_add_all_connectors(helper);
+	if (ret < 0) {
+		dev_err(dev->dev, "Failed to add connectors.\n");
+		goto err_drm_fb_helper_fini;
+
+	}
+
+	ret = drm_fb_helper_initial_config(helper, preferred_bpp);
+	if (ret < 0) {
+		dev_err(dev->dev, "Failed to set inital hw configuration.\n");
+		goto err_drm_fb_helper_fini;
+	}
+
+	return fbdev_cma;
+
+err_drm_fb_helper_fini:
+	drm_fb_helper_fini(helper);
+err_free:
+	kfree(fbdev_cma);
+
+	return ERR_PTR(ret);
+}
+EXPORT_SYMBOL_GPL(drm_fbdev_cma_init);
+
+/**
+ * drm_fbdev_cma_fini() - Free drm_fbdev_cma struct
+ * @fbdev_cma: The drm_fbdev_cma struct
+ */
+void drm_fbdev_cma_fini(struct drm_fbdev_cma *fbdev_cma)
+{
+	if (fbdev_cma->fb_helper.fbdev) {
+		struct fb_info *info;
+		int ret;
+
+		info = fbdev_cma->fb_helper.fbdev;
+		ret = unregister_framebuffer(info);
+		if (ret < 0)
+			DRM_DEBUG_KMS("failed unregister_framebuffer()\n");
+
+		if (info->cmap.len)
+			fb_dealloc_cmap(&info->cmap);
+
+		framebuffer_release(info);
+	}
+
+	if (fbdev_cma->fb)
+		drm_fb_cma_destroy(&fbdev_cma->fb->fb);
+
+	drm_fb_helper_fini(&fbdev_cma->fb_helper);
+	kfree(fbdev_cma);
+}
+EXPORT_SYMBOL_GPL(drm_fbdev_cma_fini);
+
+/**
+ * drm_fbdev_cma_restore_mode() - Restores initial framebuffer mode
+ * @fbdev_cma: The drm_fbdev_cma struct, may be NULL
+ *
+ * This function is usually called from the DRM drivers lastclose callback.
+ */
+void drm_fbdev_cma_restore_mode(struct drm_fbdev_cma *fbdev_cma)
+{
+	if (fbdev_cma)
+		drm_fb_helper_restore_fbdev_mode(&fbdev_cma->fb_helper);
+}
+EXPORT_SYMBOL_GPL(drm_fbdev_cma_restore_mode);
+
+/**
+ * drm_fbdev_cma_hotplug_event() - Poll for hotpulug events
+ * @fbdev_cma: The drm_fbdev_cma struct, may be NULL
+ *
+ * This function is usually called from the DRM drivers output_poll_changed
+ * callback.
+ */
+void drm_fbdev_cma_hotplug_event(struct drm_fbdev_cma *fbdev_cma)
+{
+	if (fbdev_cma)
+		drm_fb_helper_hotplug_event(&fbdev_cma->fb_helper);
+}
+EXPORT_SYMBOL_GPL(drm_fbdev_cma_hotplug_event);
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index dde5c345e75f..4d58d7e6af3f 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -236,7 +236,7 @@ bool drm_fb_helper_restore_fbdev_mode(struct drm_fb_helper *fb_helper)
 }
 EXPORT_SYMBOL(drm_fb_helper_restore_fbdev_mode);
 
-bool drm_fb_helper_force_kernel_mode(void)
+static bool drm_fb_helper_force_kernel_mode(void)
 {
 	bool ret, error = false;
 	struct drm_fb_helper *helper;
@@ -330,7 +330,7 @@ static void drm_fb_helper_dpms(struct fb_info *info, int dpms_mode)
 		/* Walk the connectors & encoders on this fb turning them on/off */
 		for (j = 0; j < fb_helper->connector_count; j++) {
 			connector = fb_helper->connector_info[j]->connector;
-			drm_helper_connector_dpms(connector, dpms_mode);
+			connector->funcs->dpms(connector, dpms_mode);
 			drm_connector_property_set_value(connector,
 				dev->mode_config.dpms_property, dpms_mode);
 		}
@@ -1230,7 +1230,6 @@ static void drm_setup_crtcs(struct drm_fb_helper *fb_helper)
 	struct drm_device *dev = fb_helper->dev;
 	struct drm_fb_helper_crtc **crtcs;
 	struct drm_display_mode **modes;
-	struct drm_encoder *encoder;
 	struct drm_mode_set *modeset;
 	bool *enabled;
 	int width, height;
@@ -1241,11 +1240,6 @@ static void drm_setup_crtcs(struct drm_fb_helper *fb_helper)
 	width = dev->mode_config.max_width;
 	height = dev->mode_config.max_height;
 
-	/* clean out all the encoder/crtc combos */
-	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
-		encoder->crtc = NULL;
-	}
-
 	crtcs = kcalloc(dev->mode_config.num_connector,
 			sizeof(struct drm_fb_helper_crtc *), GFP_KERNEL);
 	modes = kcalloc(dev->mode_config.num_connector,
diff --git a/drivers/gpu/drm/drm_gem_cma_helper.c b/drivers/gpu/drm/drm_gem_cma_helper.c
new file mode 100644
index 000000000000..1aa8fee1e865
--- /dev/null
+++ b/drivers/gpu/drm/drm_gem_cma_helper.c
@@ -0,0 +1,251 @@
+/*
+ * drm gem CMA (contiguous memory allocator) helper functions
+ *
+ * Copyright (C) 2012 Sascha Hauer, Pengutronix
+ *
+ * Based on Samsung Exynos code
+ *
+ * Copyright (c) 2011 Samsung Electronics Co., Ltd.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/mm.h>
+#include <linux/slab.h>
+#include <linux/mutex.h>
+#include <linux/export.h>
+#include <linux/dma-mapping.h>
+
+#include <drm/drmP.h>
+#include <drm/drm.h>
+#include <drm/drm_gem_cma_helper.h>
+
+static unsigned int get_gem_mmap_offset(struct drm_gem_object *obj)
+{
+	return (unsigned int)obj->map_list.hash.key << PAGE_SHIFT;
+}
+
+static void drm_gem_cma_buf_destroy(struct drm_device *drm,
+		struct drm_gem_cma_object *cma_obj)
+{
+	dma_free_writecombine(drm->dev, cma_obj->base.size, cma_obj->vaddr,
+			cma_obj->paddr);
+}
+
+/*
+ * drm_gem_cma_create - allocate an object with the given size
+ *
+ * returns a struct drm_gem_cma_object* on success or ERR_PTR values
+ * on failure.
+ */
+struct drm_gem_cma_object *drm_gem_cma_create(struct drm_device *drm,
+		unsigned int size)
+{
+	struct drm_gem_cma_object *cma_obj;
+	struct drm_gem_object *gem_obj;
+	int ret;
+
+	size = round_up(size, PAGE_SIZE);
+
+	cma_obj = kzalloc(sizeof(*cma_obj), GFP_KERNEL);
+	if (!cma_obj)
+		return ERR_PTR(-ENOMEM);
+
+	cma_obj->vaddr = dma_alloc_writecombine(drm->dev, size,
+			&cma_obj->paddr, GFP_KERNEL | __GFP_NOWARN);
+	if (!cma_obj->vaddr) {
+		dev_err(drm->dev, "failed to allocate buffer with size %d\n", size);
+		ret = -ENOMEM;
+		goto err_dma_alloc;
+	}
+
+	gem_obj = &cma_obj->base;
+
+	ret = drm_gem_object_init(drm, gem_obj, size);
+	if (ret)
+		goto err_obj_init;
+
+	ret = drm_gem_create_mmap_offset(gem_obj);
+	if (ret)
+		goto err_create_mmap_offset;
+
+	return cma_obj;
+
+err_create_mmap_offset:
+	drm_gem_object_release(gem_obj);
+
+err_obj_init:
+	drm_gem_cma_buf_destroy(drm, cma_obj);
+
+err_dma_alloc:
+	kfree(cma_obj);
+
+	return ERR_PTR(ret);
+}
+EXPORT_SYMBOL_GPL(drm_gem_cma_create);
+
+/*
+ * drm_gem_cma_create_with_handle - allocate an object with the given
+ * size and create a gem handle on it
+ *
+ * returns a struct drm_gem_cma_object* on success or ERR_PTR values
+ * on failure.
+ */
+static struct drm_gem_cma_object *drm_gem_cma_create_with_handle(
+		struct drm_file *file_priv,
+		struct drm_device *drm, unsigned int size,
+		unsigned int *handle)
+{
+	struct drm_gem_cma_object *cma_obj;
+	struct drm_gem_object *gem_obj;
+	int ret;
+
+	cma_obj = drm_gem_cma_create(drm, size);
+	if (IS_ERR(cma_obj))
+		return cma_obj;
+
+	gem_obj = &cma_obj->base;
+
+	/*
+	 * allocate a id of idr table where the obj is registered
+	 * and handle has the id what user can see.
+	 */
+	ret = drm_gem_handle_create(file_priv, gem_obj, handle);
+	if (ret)
+		goto err_handle_create;
+
+	/* drop reference from allocate - handle holds it now. */
+	drm_gem_object_unreference_unlocked(gem_obj);
+
+	return cma_obj;
+
+err_handle_create:
+	drm_gem_cma_free_object(gem_obj);
+
+	return ERR_PTR(ret);
+}
+
+/*
+ * drm_gem_cma_free_object - (struct drm_driver)->gem_free_object callback
+ * function
+ */
+void drm_gem_cma_free_object(struct drm_gem_object *gem_obj)
+{
+	struct drm_gem_cma_object *cma_obj;
+
+	if (gem_obj->map_list.map)
+		drm_gem_free_mmap_offset(gem_obj);
+
+	drm_gem_object_release(gem_obj);
+
+	cma_obj = to_drm_gem_cma_obj(gem_obj);
+
+	drm_gem_cma_buf_destroy(gem_obj->dev, cma_obj);
+
+	kfree(cma_obj);
+}
+EXPORT_SYMBOL_GPL(drm_gem_cma_free_object);
+
+/*
+ * drm_gem_cma_dumb_create - (struct drm_driver)->dumb_create callback
+ * function
+ *
+ * This aligns the pitch and size arguments to the minimum required. wrap
+ * this into your own function if you need bigger alignment.
+ */
+int drm_gem_cma_dumb_create(struct drm_file *file_priv,
+		struct drm_device *dev, struct drm_mode_create_dumb *args)
+{
+	struct drm_gem_cma_object *cma_obj;
+	int min_pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
+
+	if (args->pitch < min_pitch)
+		args->pitch = min_pitch;
+
+	if (args->size < args->pitch * args->height)
+		args->size = args->pitch * args->height;
+
+	cma_obj = drm_gem_cma_create_with_handle(file_priv, dev,
+			args->size, &args->handle);
+	if (IS_ERR(cma_obj))
+		return PTR_ERR(cma_obj);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(drm_gem_cma_dumb_create);
+
+/*
+ * drm_gem_cma_dumb_map_offset - (struct drm_driver)->dumb_map_offset callback
+ * function
+ */
+int drm_gem_cma_dumb_map_offset(struct drm_file *file_priv,
+		struct drm_device *drm, uint32_t handle, uint64_t *offset)
+{
+	struct drm_gem_object *gem_obj;
+
+	mutex_lock(&drm->struct_mutex);
+
+	gem_obj = drm_gem_object_lookup(drm, file_priv, handle);
+	if (!gem_obj) {
+		dev_err(drm->dev, "failed to lookup gem object\n");
+		mutex_unlock(&drm->struct_mutex);
+		return -EINVAL;
+	}
+
+	*offset = get_gem_mmap_offset(gem_obj);
+
+	drm_gem_object_unreference(gem_obj);
+
+	mutex_unlock(&drm->struct_mutex);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(drm_gem_cma_dumb_map_offset);
+
+const struct vm_operations_struct drm_gem_cma_vm_ops = {
+	.open = drm_gem_vm_open,
+	.close = drm_gem_vm_close,
+};
+EXPORT_SYMBOL_GPL(drm_gem_cma_vm_ops);
+
+/*
+ * drm_gem_cma_mmap - (struct file_operation)->mmap callback function
+ */
+int drm_gem_cma_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+	struct drm_gem_object *gem_obj;
+	struct drm_gem_cma_object *cma_obj;
+	int ret;
+
+	ret = drm_gem_mmap(filp, vma);
+	if (ret)
+		return ret;
+
+	gem_obj = vma->vm_private_data;
+	cma_obj = to_drm_gem_cma_obj(gem_obj);
+
+	ret = remap_pfn_range(vma, vma->vm_start, cma_obj->paddr >> PAGE_SHIFT,
+			vma->vm_end - vma->vm_start, vma->vm_page_prot);
+	if (ret)
+		drm_gem_vm_close(vma);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(drm_gem_cma_mmap);
+
+/*
+ * drm_gem_cma_dumb_destroy - (struct drm_driver)->dumb_destroy callback function
+ */
+int drm_gem_cma_dumb_destroy(struct drm_file *file_priv,
+		struct drm_device *drm, unsigned int handle)
+{
+	return drm_gem_handle_delete(file_priv, handle);
+}
+EXPORT_SYMBOL_GPL(drm_gem_cma_dumb_destroy);
diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c
index 09975ba1a8f7..3a3d0ce891b9 100644
--- a/drivers/gpu/drm/drm_irq.c
+++ b/drivers/gpu/drm/drm_irq.c
@@ -1236,7 +1236,7 @@ done:
 	return ret;
 }
 
-void drm_handle_vblank_events(struct drm_device *dev, int crtc)
+static void drm_handle_vblank_events(struct drm_device *dev, int crtc)
 {
 	struct drm_pending_vblank_event *e, *t;
 	struct timeval now;
diff --git a/drivers/gpu/drm/drm_vm.c b/drivers/gpu/drm/drm_vm.c
index 85a8fa6e09fe..23a824e6a22a 100644
--- a/drivers/gpu/drm/drm_vm.c
+++ b/drivers/gpu/drm/drm_vm.c
@@ -62,7 +62,7 @@ static pgprot_t drm_io_prot(uint32_t map_type, struct vm_area_struct *vma)
 		tmp = pgprot_writecombine(tmp);
 	else
 		tmp = pgprot_noncached(tmp);
-#elif defined(__sparc__) || defined(__arm__)
+#elif defined(__sparc__) || defined(__arm__) || defined(__mips__)
 	tmp = pgprot_noncached(tmp);
 #endif
 	return tmp;
@@ -619,20 +619,11 @@ int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma)
 		offset = drm_core_get_reg_ofs(dev);
 		vma->vm_flags |= VM_IO;	/* not in core dump */
 		vma->vm_page_prot = drm_io_prot(map->type, vma);
-#if !defined(__arm__)
 		if (io_remap_pfn_range(vma, vma->vm_start,
 				       (map->offset + offset) >> PAGE_SHIFT,
 				       vma->vm_end - vma->vm_start,
 				       vma->vm_page_prot))
 			return -EAGAIN;
-#else
-		if (remap_pfn_range(vma, vma->vm_start,
-					(map->offset + offset) >> PAGE_SHIFT,
-					vma->vm_end - vma->vm_start,
-					vma->vm_page_prot))
-			return -EAGAIN;
-#endif
-
 		DRM_DEBUG("   Type = %d; start = 0x%lx, end = 0x%lx,"
 			  " offset = 0x%llx\n",
 			  map->type,
diff --git a/drivers/gpu/drm/exynos/exynos_drm_connector.c b/drivers/gpu/drm/exynos/exynos_drm_connector.c
index ad01d3a09c11..c2b1b1441ed0 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_connector.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_connector.c
@@ -147,9 +147,7 @@ static int exynos_drm_connector_get_modes(struct drm_connector *connector)
 
 		drm_mode_connector_update_edid_property(connector, edid);
 		count = drm_add_edid_modes(connector, edid);
-
-		kfree(connector->display_info.raw_edid);
-		connector->display_info.raw_edid = edid;
+		kfree(edid);
 	} else {
 		struct drm_display_mode *mode = drm_mode_create(connector->dev);
 		struct exynos_drm_panel_info *panel;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
index be879c079346..bd4ff6348239 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
@@ -266,8 +266,8 @@ static void exynos_drm_fbdev_destroy(struct drm_device *dev,
 	/* release drm framebuffer and real buffer */
 	if (fb_helper->fb && fb_helper->fb->funcs) {
 		fb = fb_helper->fb;
-		if (fb && fb->funcs->destroy)
-			fb->funcs->destroy(fb);
+		if (fb)
+			drm_framebuffer_remove(fb);
 	}
 
 	/* release linux framebuffer */
diff --git a/drivers/gpu/drm/exynos/exynos_drm_vidi.c b/drivers/gpu/drm/exynos/exynos_drm_vidi.c
index 3e933c911017..8fe431ae537b 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_vidi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_vidi.c
@@ -102,7 +102,6 @@ static int vidi_get_edid(struct device *dev, struct drm_connector *connector,
 				u8 *edid, int len)
 {
 	struct vidi_context *ctx = get_vidi_context(dev);
-	struct edid *raw_edid;
 
 	DRM_DEBUG_KMS("%s\n", __FILE__);
 
@@ -115,18 +114,6 @@ static int vidi_get_edid(struct device *dev, struct drm_connector *connector,
 		return -EFAULT;
 	}
 
-	raw_edid = kzalloc(len, GFP_KERNEL);
-	if (!raw_edid) {
-		DRM_DEBUG_KMS("failed to allocate raw_edid.\n");
-		return -ENOMEM;
-	}
-
-	memcpy(raw_edid, ctx->raw_edid, min((1 + ctx->raw_edid->extensions)
-						* EDID_LENGTH, len));
-
-	/* attach the edid data to connector. */
-	connector->display_info.raw_edid = (char *)raw_edid;
-
 	memcpy(edid, ctx->raw_edid, min((1 + ctx->raw_edid->extensions)
 					* EDID_LENGTH, len));
 
diff --git a/drivers/gpu/drm/gma500/Makefile b/drivers/gpu/drm/gma500/Makefile
index abfa2a93f0d0..7a2d40a5c1e1 100644
--- a/drivers/gpu/drm/gma500/Makefile
+++ b/drivers/gpu/drm/gma500/Makefile
@@ -3,7 +3,7 @@
 #
 ccflags-y += -I$(srctree)/include/drm
 
-gma500_gfx-y += gem_glue.o \
+gma500_gfx-y += \
 	  accel_2d.o \
 	  backlight.o \
 	  framebuffer.o \
@@ -30,7 +30,8 @@ gma500_gfx-$(CONFIG_DRM_GMA3600) +=  cdv_device.o \
 	  cdv_intel_crt.o \
 	  cdv_intel_display.o \
 	  cdv_intel_hdmi.o \
-	  cdv_intel_lvds.o
+	  cdv_intel_lvds.o \
+	  cdv_intel_dp.o
 
 gma500_gfx-$(CONFIG_DRM_GMA600) += oaktrail_device.o \
 	  oaktrail_crtc.o \
diff --git a/drivers/gpu/drm/gma500/backlight.c b/drivers/gpu/drm/gma500/backlight.c
index 20793951fcac..143eba3309c5 100644
--- a/drivers/gpu/drm/gma500/backlight.c
+++ b/drivers/gpu/drm/gma500/backlight.c
@@ -26,10 +26,55 @@
 #include "intel_bios.h"
 #include "power.h"
 
+static void do_gma_backlight_set(struct drm_device *dev)
+{
+#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
+	struct drm_psb_private *dev_priv = dev->dev_private;
+	backlight_update_status(dev_priv->backlight_device);
+#endif	
+}
+
+void gma_backlight_enable(struct drm_device *dev)
+{
+#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
+	struct drm_psb_private *dev_priv = dev->dev_private;
+	dev_priv->backlight_enabled = true;
+	if (dev_priv->backlight_device) {
+		dev_priv->backlight_device->props.brightness = dev_priv->backlight_level;
+		do_gma_backlight_set(dev);
+	}
+#endif	
+}
+
+void gma_backlight_disable(struct drm_device *dev)
+{
+#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
+	struct drm_psb_private *dev_priv = dev->dev_private;
+	dev_priv->backlight_enabled = false;
+	if (dev_priv->backlight_device) {
+		dev_priv->backlight_device->props.brightness = 0;
+		do_gma_backlight_set(dev);
+	}
+#endif	
+}
+
+void gma_backlight_set(struct drm_device *dev, int v)
+{
+#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
+	struct drm_psb_private *dev_priv = dev->dev_private;
+	dev_priv->backlight_level = v;
+	if (dev_priv->backlight_device && dev_priv->backlight_enabled) {
+		dev_priv->backlight_device->props.brightness = v;
+		do_gma_backlight_set(dev);
+	}
+#endif	
+}
+
 int gma_backlight_init(struct drm_device *dev)
 {
 #ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
 	struct drm_psb_private *dev_priv = dev->dev_private;
+	dev_priv->backlight_enabled = true;
 	return dev_priv->ops->backlight_init(dev);
 #else
 	return 0;
diff --git a/drivers/gpu/drm/gma500/cdv_device.c b/drivers/gpu/drm/gma500/cdv_device.c
index 7db0e3bf5a5b..1ceca3d13b65 100644
--- a/drivers/gpu/drm/gma500/cdv_device.c
+++ b/drivers/gpu/drm/gma500/cdv_device.c
@@ -58,10 +58,17 @@ static int cdv_output_init(struct drm_device *dev)
 	cdv_intel_lvds_init(dev, &dev_priv->mode_dev);
 
 	/* These bits indicate HDMI not SDVO on CDV */
-	if (REG_READ(SDVOB) & SDVO_DETECTED)
+	if (REG_READ(SDVOB) & SDVO_DETECTED) {
 		cdv_hdmi_init(dev, &dev_priv->mode_dev, SDVOB);
-	if (REG_READ(SDVOC) & SDVO_DETECTED)
+		if (REG_READ(DP_B) & DP_DETECTED)
+			cdv_intel_dp_init(dev, &dev_priv->mode_dev, DP_B);
+	}
+
+	if (REG_READ(SDVOC) & SDVO_DETECTED) {
 		cdv_hdmi_init(dev, &dev_priv->mode_dev, SDVOC);
+		if (REG_READ(DP_C) & DP_DETECTED)
+			cdv_intel_dp_init(dev, &dev_priv->mode_dev, DP_C);
+	}
 	return 0;
 }
 
@@ -163,6 +170,7 @@ static int cdv_backlight_init(struct drm_device *dev)
 			cdv_get_brightness(cdv_backlight_device);
 	backlight_update_status(cdv_backlight_device);
 	dev_priv->backlight_device = cdv_backlight_device;
+	dev_priv->backlight_enabled = true;
 	return 0;
 }
 
@@ -449,6 +457,7 @@ static void cdv_get_core_freq(struct drm_device *dev)
 	case 6:
 	case 7:
 		dev_priv->core_freq = 266;
+		break;
 	default:
 		dev_priv->core_freq = 0;
 	}
@@ -488,6 +497,65 @@ static void cdv_hotplug_enable(struct drm_device *dev, bool on)
 	}	
 }
 
+static const char *force_audio_names[] = {
+	"off",
+	"auto",
+	"on",
+};
+
+void cdv_intel_attach_force_audio_property(struct drm_connector *connector)
+{
+	struct drm_device *dev = connector->dev;
+	struct drm_psb_private *dev_priv = dev->dev_private;
+	struct drm_property *prop;
+	int i;
+
+	prop = dev_priv->force_audio_property;
+	if (prop == NULL) {
+		prop = drm_property_create(dev, DRM_MODE_PROP_ENUM,
+					   "audio",
+					   ARRAY_SIZE(force_audio_names));
+		if (prop == NULL)
+			return;
+
+		for (i = 0; i < ARRAY_SIZE(force_audio_names); i++)
+			drm_property_add_enum(prop, i, i-1, force_audio_names[i]);
+
+		dev_priv->force_audio_property = prop;
+	}
+	drm_connector_attach_property(connector, prop, 0);
+}
+
+
+static const char *broadcast_rgb_names[] = {
+	"Full",
+	"Limited 16:235",
+};
+
+void cdv_intel_attach_broadcast_rgb_property(struct drm_connector *connector)
+{
+	struct drm_device *dev = connector->dev;
+	struct drm_psb_private *dev_priv = dev->dev_private;
+	struct drm_property *prop;
+	int i;
+
+	prop = dev_priv->broadcast_rgb_property;
+	if (prop == NULL) {
+		prop = drm_property_create(dev, DRM_MODE_PROP_ENUM,
+					   "Broadcast RGB",
+					   ARRAY_SIZE(broadcast_rgb_names));
+		if (prop == NULL)
+			return;
+
+		for (i = 0; i < ARRAY_SIZE(broadcast_rgb_names); i++)
+			drm_property_add_enum(prop, i, i, broadcast_rgb_names[i]);
+
+		dev_priv->broadcast_rgb_property = prop;
+	}
+
+	drm_connector_attach_property(connector, prop, 0);
+}
+
 /* Cedarview */
 static const struct psb_offset cdv_regmap[2] = {
 	{
diff --git a/drivers/gpu/drm/gma500/cdv_intel_display.c b/drivers/gpu/drm/gma500/cdv_intel_display.c
index a68509ba22a8..3cfd0931fbfb 100644
--- a/drivers/gpu/drm/gma500/cdv_intel_display.c
+++ b/drivers/gpu/drm/gma500/cdv_intel_display.c
@@ -57,15 +57,26 @@ struct cdv_intel_clock_t {
 struct cdv_intel_limit_t {
 	struct cdv_intel_range_t dot, vco, n, m, m1, m2, p, p1;
 	struct cdv_intel_p2_t p2;
+	bool (*find_pll)(const struct cdv_intel_limit_t *, struct drm_crtc *,
+			int, int, struct cdv_intel_clock_t *);
 };
 
+static bool cdv_intel_find_best_PLL(const struct cdv_intel_limit_t *limit,
+	struct drm_crtc *crtc, int target, int refclk,
+	struct cdv_intel_clock_t *best_clock);
+static bool cdv_intel_find_dp_pll(const struct cdv_intel_limit_t *limit, struct drm_crtc *crtc, int target,
+				int refclk,
+				struct cdv_intel_clock_t *best_clock);
+
 #define CDV_LIMIT_SINGLE_LVDS_96	0
 #define CDV_LIMIT_SINGLE_LVDS_100	1
 #define CDV_LIMIT_DAC_HDMI_27		2
 #define CDV_LIMIT_DAC_HDMI_96		3
+#define CDV_LIMIT_DP_27			4
+#define CDV_LIMIT_DP_100		5
 
 static const struct cdv_intel_limit_t cdv_intel_limits[] = {
-	{			/* CDV_SIGNLE_LVDS_96MHz */
+	{			/* CDV_SINGLE_LVDS_96MHz */
 	 .dot = {.min = 20000, .max = 115500},
 	 .vco = {.min = 1800000, .max = 3600000},
 	 .n = {.min = 2, .max = 6},
@@ -76,6 +87,7 @@ static const struct cdv_intel_limit_t cdv_intel_limits[] = {
 	 .p1 = {.min = 2, .max = 10},
 	 .p2 = {.dot_limit = 200000,
 		.p2_slow = 14, .p2_fast = 14},
+		.find_pll = cdv_intel_find_best_PLL,
 	 },
 	{			/* CDV_SINGLE_LVDS_100MHz */
 	 .dot = {.min = 20000, .max = 115500},
@@ -90,6 +102,7 @@ static const struct cdv_intel_limit_t cdv_intel_limits[] = {
 	  * is 80-224Mhz.  Prefer single channel as much as possible.
 	  */
 	 .p2 = {.dot_limit = 200000, .p2_slow = 14, .p2_fast = 14},
+	.find_pll = cdv_intel_find_best_PLL,
 	 },
 	{			/* CDV_DAC_HDMI_27MHz */
 	 .dot = {.min = 20000, .max = 400000},
@@ -101,6 +114,7 @@ static const struct cdv_intel_limit_t cdv_intel_limits[] = {
 	 .p = {.min = 5, .max = 90},
 	 .p1 = {.min = 1, .max = 9},
 	 .p2 = {.dot_limit = 225000, .p2_slow = 10, .p2_fast = 5},
+	.find_pll = cdv_intel_find_best_PLL,
 	 },
 	{			/* CDV_DAC_HDMI_96MHz */
 	 .dot = {.min = 20000, .max = 400000},
@@ -112,7 +126,32 @@ static const struct cdv_intel_limit_t cdv_intel_limits[] = {
 	 .p = {.min = 5, .max = 100},
 	 .p1 = {.min = 1, .max = 10},
 	 .p2 = {.dot_limit = 225000, .p2_slow = 10, .p2_fast = 5},
+	.find_pll = cdv_intel_find_best_PLL,
+	 },
+	{			/* CDV_DP_27MHz */
+	 .dot = {.min = 160000, .max = 272000},
+	 .vco = {.min = 1809000, .max = 3564000},
+	 .n = {.min = 1, .max = 1},
+	 .m = {.min = 67, .max = 132},
+	 .m1 = {.min = 0, .max = 0},
+	 .m2 = {.min = 65, .max = 130},
+	 .p = {.min = 5, .max = 90},
+	 .p1 = {.min = 1, .max = 9},
+	 .p2 = {.dot_limit = 225000, .p2_slow = 10, .p2_fast = 10},
+	 .find_pll = cdv_intel_find_dp_pll,
 	 },
+	{			/* CDV_DP_100MHz */
+	 .dot = {.min = 160000, .max = 272000},
+	 .vco = {.min = 1800000, .max = 3600000},
+	 .n = {.min = 2, .max = 6},
+	 .m = {.min = 60, .max = 164},
+	 .m1 = {.min = 0, .max = 0},
+	 .m2 = {.min = 58, .max = 162},
+	 .p = {.min = 5, .max = 100},
+	 .p1 = {.min = 1, .max = 10},
+	 .p2 = {.dot_limit = 225000, .p2_slow = 10, .p2_fast = 10},
+	 .find_pll = cdv_intel_find_dp_pll,
+	 }	
 };
 
 #define _wait_for(COND, MS, W) ({ \
@@ -132,7 +171,7 @@ static const struct cdv_intel_limit_t cdv_intel_limits[] = {
 #define wait_for(COND, MS) _wait_for(COND, MS, 1)
 
 
-static int cdv_sb_read(struct drm_device *dev, u32 reg, u32 *val)
+int cdv_sb_read(struct drm_device *dev, u32 reg, u32 *val)
 {
 	int ret;
 
@@ -159,7 +198,7 @@ static int cdv_sb_read(struct drm_device *dev, u32 reg, u32 *val)
 	return 0;
 }
 
-static int cdv_sb_write(struct drm_device *dev, u32 reg, u32 val)
+int cdv_sb_write(struct drm_device *dev, u32 reg, u32 val)
 {
 	int ret;
 	static bool dpio_debug = true;
@@ -201,7 +240,7 @@ static int cdv_sb_write(struct drm_device *dev, u32 reg, u32 val)
 /* Reset the DPIO configuration register.  The BIOS does this at every
  * mode set.
  */
-static void cdv_sb_reset(struct drm_device *dev)
+void cdv_sb_reset(struct drm_device *dev)
 {
 
 	REG_WRITE(DPIO_CFG, 0);
@@ -216,7 +255,7 @@ static void cdv_sb_reset(struct drm_device *dev)
  */
 static int
 cdv_dpll_set_clock_cdv(struct drm_device *dev, struct drm_crtc *crtc,
-			       struct cdv_intel_clock_t *clock, bool is_lvds)
+			       struct cdv_intel_clock_t *clock, bool is_lvds, u32 ddi_select)
 {
 	struct psb_intel_crtc *psb_crtc = to_psb_intel_crtc(crtc);
 	int pipe = psb_crtc->pipe;
@@ -259,7 +298,7 @@ cdv_dpll_set_clock_cdv(struct drm_device *dev, struct drm_crtc *crtc,
 	ref_value &= ~(REF_CLK_MASK);
 
 	/* use DPLL_A for pipeB on CRT/HDMI */
-	if (pipe == 1 && !is_lvds) {
+	if (pipe == 1 && !is_lvds && !(ddi_select & DP_MASK)) {
 		DRM_DEBUG_KMS("use DPLLA for pipe B\n");
 		ref_value |= REF_CLK_DPLLA;
 	} else {
@@ -336,30 +375,33 @@ cdv_dpll_set_clock_cdv(struct drm_device *dev, struct drm_crtc *crtc,
 	if (ret)
 		return ret;
 
-	lane_reg = PSB_LANE0;
-	cdv_sb_read(dev, lane_reg, &lane_value);
-	lane_value &= ~(LANE_PLL_MASK);
-	lane_value |= LANE_PLL_ENABLE | LANE_PLL_PIPE(pipe);
-	cdv_sb_write(dev, lane_reg, lane_value);
-
-	lane_reg = PSB_LANE1;
-	cdv_sb_read(dev, lane_reg, &lane_value);
-	lane_value &= ~(LANE_PLL_MASK);
-	lane_value |= LANE_PLL_ENABLE | LANE_PLL_PIPE(pipe);
-	cdv_sb_write(dev, lane_reg, lane_value);
-
-	lane_reg = PSB_LANE2;
-	cdv_sb_read(dev, lane_reg, &lane_value);
-	lane_value &= ~(LANE_PLL_MASK);
-	lane_value |= LANE_PLL_ENABLE | LANE_PLL_PIPE(pipe);
-	cdv_sb_write(dev, lane_reg, lane_value);
-
-	lane_reg = PSB_LANE3;
-	cdv_sb_read(dev, lane_reg, &lane_value);
-	lane_value &= ~(LANE_PLL_MASK);
-	lane_value |= LANE_PLL_ENABLE | LANE_PLL_PIPE(pipe);
-	cdv_sb_write(dev, lane_reg, lane_value);
-
+	if (ddi_select) {
+		if ((ddi_select & DDI_MASK) == DDI0_SELECT) {
+			lane_reg = PSB_LANE0;
+			cdv_sb_read(dev, lane_reg, &lane_value);
+			lane_value &= ~(LANE_PLL_MASK);
+			lane_value |= LANE_PLL_ENABLE | LANE_PLL_PIPE(pipe);
+			cdv_sb_write(dev, lane_reg, lane_value);
+
+			lane_reg = PSB_LANE1;
+			cdv_sb_read(dev, lane_reg, &lane_value);
+			lane_value &= ~(LANE_PLL_MASK);
+			lane_value |= LANE_PLL_ENABLE | LANE_PLL_PIPE(pipe);
+			cdv_sb_write(dev, lane_reg, lane_value);
+		} else {
+			lane_reg = PSB_LANE2;
+			cdv_sb_read(dev, lane_reg, &lane_value);
+			lane_value &= ~(LANE_PLL_MASK);
+			lane_value |= LANE_PLL_ENABLE | LANE_PLL_PIPE(pipe);
+			cdv_sb_write(dev, lane_reg, lane_value);
+
+			lane_reg = PSB_LANE3;
+			cdv_sb_read(dev, lane_reg, &lane_value);
+			lane_value &= ~(LANE_PLL_MASK);
+			lane_value |= LANE_PLL_ENABLE | LANE_PLL_PIPE(pipe);
+			cdv_sb_write(dev, lane_reg, lane_value);
+		}
+	}
 	return 0;
 }
 
@@ -396,6 +438,12 @@ static const struct cdv_intel_limit_t *cdv_intel_limit(struct drm_crtc *crtc,
 			limit = &cdv_intel_limits[CDV_LIMIT_SINGLE_LVDS_96];
 		else
 			limit = &cdv_intel_limits[CDV_LIMIT_SINGLE_LVDS_100];
+	} else if (psb_intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT) ||
+			psb_intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP)) {
+		if (refclk == 27000)
+			limit = &cdv_intel_limits[CDV_LIMIT_DP_27];
+		else
+			limit = &cdv_intel_limits[CDV_LIMIT_DP_100];
 	} else {
 		if (refclk == 27000)
 			limit = &cdv_intel_limits[CDV_LIMIT_DAC_HDMI_27];
@@ -438,13 +486,12 @@ static bool cdv_intel_PLL_is_valid(struct drm_crtc *crtc,
 	return true;
 }
 
-static bool cdv_intel_find_best_PLL(struct drm_crtc *crtc, int target,
-				int refclk,
-				struct cdv_intel_clock_t *best_clock)
+static bool cdv_intel_find_best_PLL(const struct cdv_intel_limit_t *limit,
+	struct drm_crtc *crtc, int target, int refclk,
+	struct cdv_intel_clock_t *best_clock)
 {
 	struct drm_device *dev = crtc->dev;
 	struct cdv_intel_clock_t clock;
-	const struct cdv_intel_limit_t *limit = cdv_intel_limit(crtc, refclk);
 	int err = target;
 
 
@@ -498,6 +545,49 @@ static bool cdv_intel_find_best_PLL(struct drm_crtc *crtc, int target,
 	return err != target;
 }
 
+static bool cdv_intel_find_dp_pll(const struct cdv_intel_limit_t *limit, struct drm_crtc *crtc, int target,
+				int refclk,
+				struct cdv_intel_clock_t *best_clock)
+{
+	struct cdv_intel_clock_t clock;
+	if (refclk == 27000) {
+		if (target < 200000) {
+			clock.p1 = 2;
+			clock.p2 = 10;
+			clock.n = 1;
+			clock.m1 = 0;
+			clock.m2 = 118;
+		} else {
+			clock.p1 = 1;
+			clock.p2 = 10;
+			clock.n = 1;
+			clock.m1 = 0;
+			clock.m2 = 98;
+		}
+	} else if (refclk == 100000) {
+		if (target < 200000) {
+			clock.p1 = 2;
+			clock.p2 = 10;
+			clock.n = 5;
+			clock.m1 = 0;
+			clock.m2 = 160;
+		} else {
+			clock.p1 = 1;
+			clock.p2 = 10;
+			clock.n = 5;
+			clock.m1 = 0;
+			clock.m2 = 133;
+		}
+	} else
+		return false;
+	clock.m = clock.m2 + 2;
+	clock.p = clock.p1 * clock.p2;
+	clock.vco = (refclk * clock.m) / clock.n;
+	clock.dot = clock.vco / clock.p;
+	memcpy(best_clock, &clock, sizeof(struct cdv_intel_clock_t));
+	return true;
+}
+
 static int cdv_intel_pipe_set_base(struct drm_crtc *crtc,
 			    int x, int y, struct drm_framebuffer *old_fb)
 {
@@ -791,7 +881,7 @@ static void cdv_intel_crtc_dpms(struct drm_crtc *crtc, int mode)
 	case DRM_MODE_DPMS_STANDBY:
 	case DRM_MODE_DPMS_SUSPEND:
 		if (psb_intel_crtc->active)
-			return;
+			break;
 
 		psb_intel_crtc->active = true;
 
@@ -835,17 +925,15 @@ static void cdv_intel_crtc_dpms(struct drm_crtc *crtc, int mode)
 		REG_WRITE(map->status, temp);
 		REG_READ(map->status);
 
-		cdv_intel_update_watermark(dev, crtc);
 		cdv_intel_crtc_load_lut(crtc);
 
 		/* Give the overlay scaler a chance to enable
 		 * if it's on this pipe */
 		/* psb_intel_crtc_dpms_video(crtc, true); TODO */
-		psb_intel_crtc->crtc_enable = true;
 		break;
 	case DRM_MODE_DPMS_OFF:
 		if (!psb_intel_crtc->active)
-			return;
+			break;
 
 		psb_intel_crtc->active = false;
 
@@ -892,10 +980,9 @@ static void cdv_intel_crtc_dpms(struct drm_crtc *crtc, int mode)
 
 		/* Wait for the clocks to turn off. */
 		udelay(150);
-		cdv_intel_update_watermark(dev, crtc);
-		psb_intel_crtc->crtc_enable = false;
 		break;
 	}
+	cdv_intel_update_watermark(dev, crtc);
 	/*Set FIFO Watermarks*/
 	REG_WRITE(DSPARB, 0x3F3E);
 }
@@ -952,9 +1039,12 @@ static int cdv_intel_crtc_mode_set(struct drm_crtc *crtc,
 	u32 dpll = 0, dspcntr, pipeconf;
 	bool ok;
 	bool is_crt = false, is_lvds = false, is_tv = false;
-	bool is_hdmi = false;
+	bool is_hdmi = false, is_dp = false;
 	struct drm_mode_config *mode_config = &dev->mode_config;
 	struct drm_connector *connector;
+	const struct cdv_intel_limit_t *limit;
+	u32 ddi_select = 0;
+	bool is_edp = false;
 
 	list_for_each_entry(connector, &mode_config->connector_list, head) {
 		struct psb_intel_encoder *psb_intel_encoder =
@@ -964,6 +1054,7 @@ static int cdv_intel_crtc_mode_set(struct drm_crtc *crtc,
 		    || connector->encoder->crtc != crtc)
 			continue;
 
+		ddi_select = psb_intel_encoder->ddi_select;
 		switch (psb_intel_encoder->type) {
 		case INTEL_OUTPUT_LVDS:
 			is_lvds = true;
@@ -977,6 +1068,15 @@ static int cdv_intel_crtc_mode_set(struct drm_crtc *crtc,
 		case INTEL_OUTPUT_HDMI:
 			is_hdmi = true;
 			break;
+		case INTEL_OUTPUT_DISPLAYPORT:
+			is_dp = true;
+			break;
+		case INTEL_OUTPUT_EDP:
+			is_edp = true;
+			break;
+		default:
+			DRM_ERROR("invalid output type.\n");
+			return 0;
 		}
 	}
 
@@ -986,6 +1086,20 @@ static int cdv_intel_crtc_mode_set(struct drm_crtc *crtc,
 	else
 		/* high-end sku, 27/100 mhz */
 		refclk = 27000;
+	if (is_dp || is_edp) {
+		/*
+		 * Based on the spec the low-end SKU has only CRT/LVDS. So it is
+		 * unnecessary to consider it for DP/eDP.
+		 * On the high-end SKU, it will use the 27/100M reference clk
+		 * for DP/eDP. When using SSC clock, the ref clk is 100MHz.Otherwise
+		 * it will be 27MHz. From the VBIOS code it seems that the pipe A choose
+		 * 27MHz for DP/eDP while the Pipe B chooses the 100MHz.
+		 */ 
+		if (pipe == 0)
+			refclk = 27000;
+		else
+			refclk = 100000;
+	}
 
 	if (is_lvds && dev_priv->lvds_use_ssc) {
 		refclk = dev_priv->lvds_ssc_freq * 1000;
@@ -993,8 +1107,10 @@ static int cdv_intel_crtc_mode_set(struct drm_crtc *crtc,
 	}
 
 	drm_mode_debug_printmodeline(adjusted_mode);
+	
+	limit = cdv_intel_limit(crtc, refclk);
 
-	ok = cdv_intel_find_best_PLL(crtc, adjusted_mode->clock, refclk,
+	ok = limit->find_pll(limit, crtc, adjusted_mode->clock, refclk,
 				 &clock);
 	if (!ok) {
 		dev_err(dev->dev, "Couldn't find PLL settings for mode!\n");
@@ -1009,6 +1125,15 @@ static int cdv_intel_crtc_mode_set(struct drm_crtc *crtc,
 	}
 /*		dpll |= PLL_REF_INPUT_DREFCLK; */
 
+	if (is_dp || is_edp) {
+		cdv_intel_dp_set_m_n(crtc, mode, adjusted_mode);
+	} else {
+		REG_WRITE(PIPE_GMCH_DATA_M(pipe), 0);
+		REG_WRITE(PIPE_GMCH_DATA_N(pipe), 0);
+		REG_WRITE(PIPE_DP_LINK_M(pipe), 0);
+		REG_WRITE(PIPE_DP_LINK_N(pipe), 0);
+	}
+
 	dpll |= DPLL_SYNCLOCK_ENABLE;
 /*	if (is_lvds)
 		dpll |= DPLLB_MODE_LVDS;
@@ -1019,6 +1144,31 @@ static int cdv_intel_crtc_mode_set(struct drm_crtc *crtc,
 	/* setup pipeconf */
 	pipeconf = REG_READ(map->conf);
 
+	pipeconf &= ~(PIPE_BPC_MASK);
+	if (is_edp) {
+		switch (dev_priv->edp.bpp) {
+		case 24:
+			pipeconf |= PIPE_8BPC;
+			break;
+		case 18:
+			pipeconf |= PIPE_6BPC;
+			break;
+		case 30:
+			pipeconf |= PIPE_10BPC;
+			break;
+		default:
+			pipeconf |= PIPE_8BPC;
+			break;
+		}
+	} else if (is_lvds) {
+		/* the BPC will be 6 if it is 18-bit LVDS panel */
+		if ((REG_READ(LVDS) & LVDS_A3_POWER_MASK) == LVDS_A3_POWER_UP)
+			pipeconf |= PIPE_8BPC;
+		else
+			pipeconf |= PIPE_6BPC;
+	} else
+		pipeconf |= PIPE_8BPC;
+			
 	/* Set up the display plane register */
 	dspcntr = DISPPLANE_GAMMA_ENABLE;
 
@@ -1033,7 +1183,7 @@ static int cdv_intel_crtc_mode_set(struct drm_crtc *crtc,
 	REG_WRITE(map->dpll, dpll | DPLL_VGA_MODE_DIS | DPLL_SYNCLOCK_ENABLE);
 	REG_READ(map->dpll);
 
-	cdv_dpll_set_clock_cdv(dev, crtc, &clock, is_lvds);
+	cdv_dpll_set_clock_cdv(dev, crtc, &clock, is_lvds, ddi_select);
 
 	udelay(150);
 
diff --git a/drivers/gpu/drm/gma500/cdv_intel_dp.c b/drivers/gpu/drm/gma500/cdv_intel_dp.c
new file mode 100644
index 000000000000..e3a3978cf320
--- /dev/null
+++ b/drivers/gpu/drm/gma500/cdv_intel_dp.c
@@ -0,0 +1,1950 @@
+/*
+ * Copyright © 2012 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ *    Keith Packard <keithp@keithp.com>
+ *
+ */
+
+#include <linux/i2c.h>
+#include <linux/slab.h>
+#include <drm/drmP.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_crtc_helper.h>
+#include "psb_drv.h"
+#include "psb_intel_drv.h"
+#include "psb_intel_reg.h"
+#include <drm/drm_dp_helper.h>
+
+#define _wait_for(COND, MS, W) ({ \
+        unsigned long timeout__ = jiffies + msecs_to_jiffies(MS);       \
+        int ret__ = 0;                                                  \
+        while (! (COND)) {                                              \
+                if (time_after(jiffies, timeout__)) {                   \
+                        ret__ = -ETIMEDOUT;                             \
+                        break;                                          \
+                }                                                       \
+                if (W && !in_dbg_master()) msleep(W);                   \
+        }                                                               \
+        ret__;                                                          \
+})      
+
+#define wait_for(COND, MS) _wait_for(COND, MS, 1)
+
+#define DP_LINK_STATUS_SIZE	6
+#define DP_LINK_CHECK_TIMEOUT	(10 * 1000)
+
+#define DP_LINK_CONFIGURATION_SIZE	9
+
+#define CDV_FAST_LINK_TRAIN	1
+
+struct cdv_intel_dp {
+	uint32_t output_reg;
+	uint32_t DP;
+	uint8_t  link_configuration[DP_LINK_CONFIGURATION_SIZE];
+	bool has_audio;
+	int force_audio;
+	uint32_t color_range;
+	uint8_t link_bw;
+	uint8_t lane_count;
+	uint8_t dpcd[4];
+	struct psb_intel_encoder *encoder;
+	struct i2c_adapter adapter;
+	struct i2c_algo_dp_aux_data algo;
+	uint8_t	train_set[4];
+	uint8_t link_status[DP_LINK_STATUS_SIZE];
+	int panel_power_up_delay;
+	int panel_power_down_delay;
+	int panel_power_cycle_delay;
+	int backlight_on_delay;
+	int backlight_off_delay;
+	struct drm_display_mode *panel_fixed_mode;  /* for eDP */
+	bool panel_on;
+};
+
+struct ddi_regoff {
+	uint32_t	PreEmph1;
+	uint32_t	PreEmph2;
+	uint32_t	VSwing1;
+	uint32_t	VSwing2;
+	uint32_t	VSwing3;
+	uint32_t	VSwing4;
+	uint32_t	VSwing5;
+};
+
+static struct ddi_regoff ddi_DP_train_table[] = {
+	{.PreEmph1 = 0x812c, .PreEmph2 = 0x8124, .VSwing1 = 0x8154,
+	.VSwing2 = 0x8148, .VSwing3 = 0x814C, .VSwing4 = 0x8150,
+	.VSwing5 = 0x8158,},
+	{.PreEmph1 = 0x822c, .PreEmph2 = 0x8224, .VSwing1 = 0x8254,
+	.VSwing2 = 0x8248, .VSwing3 = 0x824C, .VSwing4 = 0x8250,
+	.VSwing5 = 0x8258,},
+};
+
+static uint32_t dp_vswing_premph_table[] = {
+        0x55338954,	0x4000,
+        0x554d8954,	0x2000,
+        0x55668954,	0,
+        0x559ac0d4,	0x6000,
+};
+/**
+ * is_edp - is the given port attached to an eDP panel (either CPU or PCH)
+ * @intel_dp: DP struct
+ *
+ * If a CPU or PCH DP output is attached to an eDP panel, this function
+ * will return true, and false otherwise.
+ */
+static bool is_edp(struct psb_intel_encoder *encoder)
+{
+	return encoder->type == INTEL_OUTPUT_EDP;
+}
+
+
+static void cdv_intel_dp_start_link_train(struct psb_intel_encoder *encoder);
+static void cdv_intel_dp_complete_link_train(struct psb_intel_encoder *encoder);
+static void cdv_intel_dp_link_down(struct psb_intel_encoder *encoder);
+
+static int
+cdv_intel_dp_max_lane_count(struct psb_intel_encoder *encoder)
+{
+	struct cdv_intel_dp *intel_dp = encoder->dev_priv;
+	int max_lane_count = 4;
+
+	if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11) {
+		max_lane_count = intel_dp->dpcd[DP_MAX_LANE_COUNT] & 0x1f;
+		switch (max_lane_count) {
+		case 1: case 2: case 4:
+			break;
+		default:
+			max_lane_count = 4;
+		}
+	}
+	return max_lane_count;
+}
+
+static int
+cdv_intel_dp_max_link_bw(struct psb_intel_encoder *encoder)
+{
+	struct cdv_intel_dp *intel_dp = encoder->dev_priv;
+	int max_link_bw = intel_dp->dpcd[DP_MAX_LINK_RATE];
+
+	switch (max_link_bw) {
+	case DP_LINK_BW_1_62:
+	case DP_LINK_BW_2_7:
+		break;
+	default:
+		max_link_bw = DP_LINK_BW_1_62;
+		break;
+	}
+	return max_link_bw;
+}
+
+static int
+cdv_intel_dp_link_clock(uint8_t link_bw)
+{
+	if (link_bw == DP_LINK_BW_2_7)
+		return 270000;
+	else
+		return 162000;
+}
+
+static int
+cdv_intel_dp_link_required(int pixel_clock, int bpp)
+{
+	return (pixel_clock * bpp + 7) / 8;
+}
+
+static int
+cdv_intel_dp_max_data_rate(int max_link_clock, int max_lanes)
+{
+	return (max_link_clock * max_lanes * 19) / 20;
+}
+
+static void cdv_intel_edp_panel_vdd_on(struct psb_intel_encoder *intel_encoder)
+{
+	struct drm_device *dev = intel_encoder->base.dev;
+	struct cdv_intel_dp *intel_dp = intel_encoder->dev_priv;
+	u32 pp;
+
+	if (intel_dp->panel_on) {
+		DRM_DEBUG_KMS("Skip VDD on because of panel on\n");
+		return;
+	}	
+	DRM_DEBUG_KMS("\n");
+
+	pp = REG_READ(PP_CONTROL);
+
+	pp |= EDP_FORCE_VDD;
+	REG_WRITE(PP_CONTROL, pp);
+	REG_READ(PP_CONTROL);
+	msleep(intel_dp->panel_power_up_delay);
+}
+
+static void cdv_intel_edp_panel_vdd_off(struct psb_intel_encoder *intel_encoder)
+{
+	struct drm_device *dev = intel_encoder->base.dev;
+	u32 pp;
+
+	DRM_DEBUG_KMS("\n");
+	pp = REG_READ(PP_CONTROL);
+
+	pp &= ~EDP_FORCE_VDD;
+	REG_WRITE(PP_CONTROL, pp);
+	REG_READ(PP_CONTROL);
+
+}
+
+/* Returns true if the panel was already on when called */
+static bool cdv_intel_edp_panel_on(struct psb_intel_encoder *intel_encoder)
+{
+	struct drm_device *dev = intel_encoder->base.dev;
+	struct cdv_intel_dp *intel_dp = intel_encoder->dev_priv;
+	u32 pp, idle_on_mask = PP_ON | PP_SEQUENCE_NONE;
+
+	if (intel_dp->panel_on)
+		return true;
+
+	DRM_DEBUG_KMS("\n");
+	pp = REG_READ(PP_CONTROL);
+	pp &= ~PANEL_UNLOCK_MASK;
+
+	pp |= (PANEL_UNLOCK_REGS | POWER_TARGET_ON);
+	REG_WRITE(PP_CONTROL, pp);
+	REG_READ(PP_CONTROL);
+
+	if (wait_for(((REG_READ(PP_STATUS) & idle_on_mask) == idle_on_mask), 1000)) {
+		DRM_DEBUG_KMS("Error in Powering up eDP panel, status %x\n", REG_READ(PP_STATUS));
+		intel_dp->panel_on = false;
+	} else
+		intel_dp->panel_on = true;	
+	msleep(intel_dp->panel_power_up_delay);
+
+	return false;
+}
+
+static void cdv_intel_edp_panel_off (struct psb_intel_encoder *intel_encoder)
+{
+	struct drm_device *dev = intel_encoder->base.dev;
+	u32 pp, idle_off_mask = PP_ON ;
+	struct cdv_intel_dp *intel_dp = intel_encoder->dev_priv;
+
+	DRM_DEBUG_KMS("\n");
+
+	pp = REG_READ(PP_CONTROL);
+
+	if ((pp & POWER_TARGET_ON) == 0) 
+		return;
+
+	intel_dp->panel_on = false;
+	pp &= ~PANEL_UNLOCK_MASK;
+	/* ILK workaround: disable reset around power sequence */
+
+	pp &= ~POWER_TARGET_ON;
+	pp &= ~EDP_FORCE_VDD;
+	pp &= ~EDP_BLC_ENABLE;
+	REG_WRITE(PP_CONTROL, pp);
+	REG_READ(PP_CONTROL);
+	DRM_DEBUG_KMS("PP_STATUS %x\n", REG_READ(PP_STATUS));
+
+	if (wait_for((REG_READ(PP_STATUS) & idle_off_mask) == 0, 1000)) {
+		DRM_DEBUG_KMS("Error in turning off Panel\n");	
+	}
+
+	msleep(intel_dp->panel_power_cycle_delay);
+	DRM_DEBUG_KMS("Over\n");
+}
+
+static void cdv_intel_edp_backlight_on (struct psb_intel_encoder *intel_encoder)
+{
+	struct drm_device *dev = intel_encoder->base.dev;
+	u32 pp;
+
+	DRM_DEBUG_KMS("\n");
+	/*
+	 * If we enable the backlight right away following a panel power
+	 * on, we may see slight flicker as the panel syncs with the eDP
+	 * link.  So delay a bit to make sure the image is solid before
+	 * allowing it to appear.
+	 */
+	msleep(300);
+	pp = REG_READ(PP_CONTROL);
+
+	pp |= EDP_BLC_ENABLE;
+	REG_WRITE(PP_CONTROL, pp);
+	gma_backlight_enable(dev);
+}
+
+static void cdv_intel_edp_backlight_off (struct psb_intel_encoder *intel_encoder)
+{
+	struct drm_device *dev = intel_encoder->base.dev;
+	struct cdv_intel_dp *intel_dp = intel_encoder->dev_priv;
+	u32 pp;
+
+	DRM_DEBUG_KMS("\n");
+	gma_backlight_disable(dev);
+	msleep(10);
+	pp = REG_READ(PP_CONTROL);
+
+	pp &= ~EDP_BLC_ENABLE;
+	REG_WRITE(PP_CONTROL, pp);
+	msleep(intel_dp->backlight_off_delay);
+}
+
+static int
+cdv_intel_dp_mode_valid(struct drm_connector *connector,
+		    struct drm_display_mode *mode)
+{
+	struct psb_intel_encoder *encoder = psb_intel_attached_encoder(connector);
+	struct cdv_intel_dp *intel_dp = encoder->dev_priv;
+	int max_link_clock = cdv_intel_dp_link_clock(cdv_intel_dp_max_link_bw(encoder));
+	int max_lanes = cdv_intel_dp_max_lane_count(encoder);
+	struct drm_psb_private *dev_priv = connector->dev->dev_private;
+
+	if (is_edp(encoder) && intel_dp->panel_fixed_mode) {
+		if (mode->hdisplay > intel_dp->panel_fixed_mode->hdisplay)
+			return MODE_PANEL;
+		if (mode->vdisplay > intel_dp->panel_fixed_mode->vdisplay)
+			return MODE_PANEL;
+	}
+
+	/* only refuse the mode on non eDP since we have seen some weird eDP panels
+	   which are outside spec tolerances but somehow work by magic */
+	if (!is_edp(encoder) &&
+	    (cdv_intel_dp_link_required(mode->clock, dev_priv->edp.bpp)
+	     > cdv_intel_dp_max_data_rate(max_link_clock, max_lanes)))
+		return MODE_CLOCK_HIGH;
+
+	if (is_edp(encoder)) {
+	    if (cdv_intel_dp_link_required(mode->clock, 24)
+	     	> cdv_intel_dp_max_data_rate(max_link_clock, max_lanes))
+		return MODE_CLOCK_HIGH;
+		
+	}
+	if (mode->clock < 10000)
+		return MODE_CLOCK_LOW;
+
+	return MODE_OK;
+}
+
+static uint32_t
+pack_aux(uint8_t *src, int src_bytes)
+{
+	int	i;
+	uint32_t v = 0;
+
+	if (src_bytes > 4)
+		src_bytes = 4;
+	for (i = 0; i < src_bytes; i++)
+		v |= ((uint32_t) src[i]) << ((3-i) * 8);
+	return v;
+}
+
+static void
+unpack_aux(uint32_t src, uint8_t *dst, int dst_bytes)
+{
+	int i;
+	if (dst_bytes > 4)
+		dst_bytes = 4;
+	for (i = 0; i < dst_bytes; i++)
+		dst[i] = src >> ((3-i) * 8);
+}
+
+static int
+cdv_intel_dp_aux_ch(struct psb_intel_encoder *encoder,
+		uint8_t *send, int send_bytes,
+		uint8_t *recv, int recv_size)
+{
+	struct cdv_intel_dp *intel_dp = encoder->dev_priv;
+	uint32_t output_reg = intel_dp->output_reg;
+	struct drm_device *dev = encoder->base.dev;
+	uint32_t ch_ctl = output_reg + 0x10;
+	uint32_t ch_data = ch_ctl + 4;
+	int i;
+	int recv_bytes;
+	uint32_t status;
+	uint32_t aux_clock_divider;
+	int try, precharge;
+
+	/* The clock divider is based off the hrawclk,
+	 * and would like to run at 2MHz. So, take the
+	 * hrawclk value and divide by 2 and use that
+	 * On CDV platform it uses 200MHz as hrawclk.
+	 *
+	 */
+	aux_clock_divider = 200 / 2;
+
+	precharge = 4;
+	if (is_edp(encoder))
+		precharge = 10;
+
+	if (REG_READ(ch_ctl) & DP_AUX_CH_CTL_SEND_BUSY) {
+		DRM_ERROR("dp_aux_ch not started status 0x%08x\n",
+			  REG_READ(ch_ctl));
+		return -EBUSY;
+	}
+
+	/* Must try at least 3 times according to DP spec */
+	for (try = 0; try < 5; try++) {
+		/* Load the send data into the aux channel data registers */
+		for (i = 0; i < send_bytes; i += 4)
+			REG_WRITE(ch_data + i,
+				   pack_aux(send + i, send_bytes - i));
+	
+		/* Send the command and wait for it to complete */
+		REG_WRITE(ch_ctl,
+			   DP_AUX_CH_CTL_SEND_BUSY |
+			   DP_AUX_CH_CTL_TIME_OUT_400us |
+			   (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
+			   (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
+			   (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT) |
+			   DP_AUX_CH_CTL_DONE |
+			   DP_AUX_CH_CTL_TIME_OUT_ERROR |
+			   DP_AUX_CH_CTL_RECEIVE_ERROR);
+		for (;;) {
+			status = REG_READ(ch_ctl);
+			if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0)
+				break;
+			udelay(100);
+		}
+	
+		/* Clear done status and any errors */
+		REG_WRITE(ch_ctl,
+			   status |
+			   DP_AUX_CH_CTL_DONE |
+			   DP_AUX_CH_CTL_TIME_OUT_ERROR |
+			   DP_AUX_CH_CTL_RECEIVE_ERROR);
+		if (status & DP_AUX_CH_CTL_DONE)
+			break;
+	}
+
+	if ((status & DP_AUX_CH_CTL_DONE) == 0) {
+		DRM_ERROR("dp_aux_ch not done status 0x%08x\n", status);
+		return -EBUSY;
+	}
+
+	/* Check for timeout or receive error.
+	 * Timeouts occur when the sink is not connected
+	 */
+	if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
+		DRM_ERROR("dp_aux_ch receive error status 0x%08x\n", status);
+		return -EIO;
+	}
+
+	/* Timeouts occur when the device isn't connected, so they're
+	 * "normal" -- don't fill the kernel log with these */
+	if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR) {
+		DRM_DEBUG_KMS("dp_aux_ch timeout status 0x%08x\n", status);
+		return -ETIMEDOUT;
+	}
+
+	/* Unload any bytes sent back from the other side */
+	recv_bytes = ((status & DP_AUX_CH_CTL_MESSAGE_SIZE_MASK) >>
+		      DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT);
+	if (recv_bytes > recv_size)
+		recv_bytes = recv_size;
+	
+	for (i = 0; i < recv_bytes; i += 4)
+		unpack_aux(REG_READ(ch_data + i),
+			   recv + i, recv_bytes - i);
+
+	return recv_bytes;
+}
+
+/* Write data to the aux channel in native mode */
+static int
+cdv_intel_dp_aux_native_write(struct psb_intel_encoder *encoder,
+			  uint16_t address, uint8_t *send, int send_bytes)
+{
+	int ret;
+	uint8_t	msg[20];
+	int msg_bytes;
+	uint8_t	ack;
+
+	if (send_bytes > 16)
+		return -1;
+	msg[0] = AUX_NATIVE_WRITE << 4;
+	msg[1] = address >> 8;
+	msg[2] = address & 0xff;
+	msg[3] = send_bytes - 1;
+	memcpy(&msg[4], send, send_bytes);
+	msg_bytes = send_bytes + 4;
+	for (;;) {
+		ret = cdv_intel_dp_aux_ch(encoder, msg, msg_bytes, &ack, 1);
+		if (ret < 0)
+			return ret;
+		if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_ACK)
+			break;
+		else if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_DEFER)
+			udelay(100);
+		else
+			return -EIO;
+	}
+	return send_bytes;
+}
+
+/* Write a single byte to the aux channel in native mode */
+static int
+cdv_intel_dp_aux_native_write_1(struct psb_intel_encoder *encoder,
+			    uint16_t address, uint8_t byte)
+{
+	return cdv_intel_dp_aux_native_write(encoder, address, &byte, 1);
+}
+
+/* read bytes from a native aux channel */
+static int
+cdv_intel_dp_aux_native_read(struct psb_intel_encoder *encoder,
+			 uint16_t address, uint8_t *recv, int recv_bytes)
+{
+	uint8_t msg[4];
+	int msg_bytes;
+	uint8_t reply[20];
+	int reply_bytes;
+	uint8_t ack;
+	int ret;
+
+	msg[0] = AUX_NATIVE_READ << 4;
+	msg[1] = address >> 8;
+	msg[2] = address & 0xff;
+	msg[3] = recv_bytes - 1;
+
+	msg_bytes = 4;
+	reply_bytes = recv_bytes + 1;
+
+	for (;;) {
+		ret = cdv_intel_dp_aux_ch(encoder, msg, msg_bytes,
+				      reply, reply_bytes);
+		if (ret == 0)
+			return -EPROTO;
+		if (ret < 0)
+			return ret;
+		ack = reply[0];
+		if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_ACK) {
+			memcpy(recv, reply + 1, ret - 1);
+			return ret - 1;
+		}
+		else if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_DEFER)
+			udelay(100);
+		else
+			return -EIO;
+	}
+}
+
+static int
+cdv_intel_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode,
+		    uint8_t write_byte, uint8_t *read_byte)
+{
+	struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data;
+	struct cdv_intel_dp *intel_dp = container_of(adapter,
+						struct cdv_intel_dp,
+						adapter);
+	struct psb_intel_encoder *encoder = intel_dp->encoder;
+	uint16_t address = algo_data->address;
+	uint8_t msg[5];
+	uint8_t reply[2];
+	unsigned retry;
+	int msg_bytes;
+	int reply_bytes;
+	int ret;
+
+	/* Set up the command byte */
+	if (mode & MODE_I2C_READ)
+		msg[0] = AUX_I2C_READ << 4;
+	else
+		msg[0] = AUX_I2C_WRITE << 4;
+
+	if (!(mode & MODE_I2C_STOP))
+		msg[0] |= AUX_I2C_MOT << 4;
+
+	msg[1] = address >> 8;
+	msg[2] = address;
+
+	switch (mode) {
+	case MODE_I2C_WRITE:
+		msg[3] = 0;
+		msg[4] = write_byte;
+		msg_bytes = 5;
+		reply_bytes = 1;
+		break;
+	case MODE_I2C_READ:
+		msg[3] = 0;
+		msg_bytes = 4;
+		reply_bytes = 2;
+		break;
+	default:
+		msg_bytes = 3;
+		reply_bytes = 1;
+		break;
+	}
+
+	for (retry = 0; retry < 5; retry++) {
+		ret = cdv_intel_dp_aux_ch(encoder,
+				      msg, msg_bytes,
+				      reply, reply_bytes);
+		if (ret < 0) {
+			DRM_DEBUG_KMS("aux_ch failed %d\n", ret);
+			return ret;
+		}
+
+		switch (reply[0] & AUX_NATIVE_REPLY_MASK) {
+		case AUX_NATIVE_REPLY_ACK:
+			/* I2C-over-AUX Reply field is only valid
+			 * when paired with AUX ACK.
+			 */
+			break;
+		case AUX_NATIVE_REPLY_NACK:
+			DRM_DEBUG_KMS("aux_ch native nack\n");
+			return -EREMOTEIO;
+		case AUX_NATIVE_REPLY_DEFER:
+			udelay(100);
+			continue;
+		default:
+			DRM_ERROR("aux_ch invalid native reply 0x%02x\n",
+				  reply[0]);
+			return -EREMOTEIO;
+		}
+
+		switch (reply[0] & AUX_I2C_REPLY_MASK) {
+		case AUX_I2C_REPLY_ACK:
+			if (mode == MODE_I2C_READ) {
+				*read_byte = reply[1];
+			}
+			return reply_bytes - 1;
+		case AUX_I2C_REPLY_NACK:
+			DRM_DEBUG_KMS("aux_i2c nack\n");
+			return -EREMOTEIO;
+		case AUX_I2C_REPLY_DEFER:
+			DRM_DEBUG_KMS("aux_i2c defer\n");
+			udelay(100);
+			break;
+		default:
+			DRM_ERROR("aux_i2c invalid reply 0x%02x\n", reply[0]);
+			return -EREMOTEIO;
+		}
+	}
+
+	DRM_ERROR("too many retries, giving up\n");
+	return -EREMOTEIO;
+}
+
+static int
+cdv_intel_dp_i2c_init(struct psb_intel_connector *connector, struct psb_intel_encoder *encoder, const char *name)
+{
+	struct cdv_intel_dp *intel_dp = encoder->dev_priv;
+	int ret;
+
+	DRM_DEBUG_KMS("i2c_init %s\n", name);
+
+	intel_dp->algo.running = false;
+	intel_dp->algo.address = 0;
+	intel_dp->algo.aux_ch = cdv_intel_dp_i2c_aux_ch;
+
+	memset(&intel_dp->adapter, '\0', sizeof (intel_dp->adapter));
+	intel_dp->adapter.owner = THIS_MODULE;
+	intel_dp->adapter.class = I2C_CLASS_DDC;
+	strncpy (intel_dp->adapter.name, name, sizeof(intel_dp->adapter.name) - 1);
+	intel_dp->adapter.name[sizeof(intel_dp->adapter.name) - 1] = '\0';
+	intel_dp->adapter.algo_data = &intel_dp->algo;
+	intel_dp->adapter.dev.parent = &connector->base.kdev;
+
+	if (is_edp(encoder))
+		cdv_intel_edp_panel_vdd_on(encoder);
+	ret = i2c_dp_aux_add_bus(&intel_dp->adapter);
+	if (is_edp(encoder))
+		cdv_intel_edp_panel_vdd_off(encoder);
+	
+	return ret;
+}
+
+void cdv_intel_fixed_panel_mode(struct drm_display_mode *fixed_mode,
+	struct drm_display_mode *adjusted_mode)
+{
+	adjusted_mode->hdisplay = fixed_mode->hdisplay;
+	adjusted_mode->hsync_start = fixed_mode->hsync_start;
+	adjusted_mode->hsync_end = fixed_mode->hsync_end;
+	adjusted_mode->htotal = fixed_mode->htotal;
+
+	adjusted_mode->vdisplay = fixed_mode->vdisplay;
+	adjusted_mode->vsync_start = fixed_mode->vsync_start;
+	adjusted_mode->vsync_end = fixed_mode->vsync_end;
+	adjusted_mode->vtotal = fixed_mode->vtotal;
+
+	adjusted_mode->clock = fixed_mode->clock;
+
+	drm_mode_set_crtcinfo(adjusted_mode, CRTC_INTERLACE_HALVE_V);
+}
+
+static bool
+cdv_intel_dp_mode_fixup(struct drm_encoder *encoder, const struct drm_display_mode *mode,
+		    struct drm_display_mode *adjusted_mode)
+{
+	struct drm_psb_private *dev_priv = encoder->dev->dev_private;
+	struct psb_intel_encoder *intel_encoder = to_psb_intel_encoder(encoder);
+	struct cdv_intel_dp *intel_dp = intel_encoder->dev_priv;
+	int lane_count, clock;
+	int max_lane_count = cdv_intel_dp_max_lane_count(intel_encoder);
+	int max_clock = cdv_intel_dp_max_link_bw(intel_encoder) == DP_LINK_BW_2_7 ? 1 : 0;
+	static int bws[2] = { DP_LINK_BW_1_62, DP_LINK_BW_2_7 };
+	int refclock = mode->clock;
+	int bpp = 24;
+
+	if (is_edp(intel_encoder) && intel_dp->panel_fixed_mode) {
+		cdv_intel_fixed_panel_mode(intel_dp->panel_fixed_mode, adjusted_mode);
+		refclock = intel_dp->panel_fixed_mode->clock;
+		bpp = dev_priv->edp.bpp;
+	}
+
+	for (lane_count = 1; lane_count <= max_lane_count; lane_count <<= 1) {
+		for (clock = max_clock; clock >= 0; clock--) {
+			int link_avail = cdv_intel_dp_max_data_rate(cdv_intel_dp_link_clock(bws[clock]), lane_count);
+
+			if (cdv_intel_dp_link_required(refclock, bpp) <= link_avail) {
+				intel_dp->link_bw = bws[clock];
+				intel_dp->lane_count = lane_count;
+				adjusted_mode->clock = cdv_intel_dp_link_clock(intel_dp->link_bw);
+				DRM_DEBUG_KMS("Display port link bw %02x lane "
+						"count %d clock %d\n",
+				       intel_dp->link_bw, intel_dp->lane_count,
+				       adjusted_mode->clock);
+				return true;
+			}
+		}
+	}
+	if (is_edp(intel_encoder)) {
+		/* okay we failed just pick the highest */
+		intel_dp->lane_count = max_lane_count;
+		intel_dp->link_bw = bws[max_clock];
+		adjusted_mode->clock = cdv_intel_dp_link_clock(intel_dp->link_bw);
+		DRM_DEBUG_KMS("Force picking display port link bw %02x lane "
+			      "count %d clock %d\n",
+			      intel_dp->link_bw, intel_dp->lane_count,
+			      adjusted_mode->clock);
+
+		return true;
+	}
+	return false;
+}
+
+struct cdv_intel_dp_m_n {
+	uint32_t	tu;
+	uint32_t	gmch_m;
+	uint32_t	gmch_n;
+	uint32_t	link_m;
+	uint32_t	link_n;
+};
+
+static void
+cdv_intel_reduce_ratio(uint32_t *num, uint32_t *den)
+{
+	/*
+	while (*num > 0xffffff || *den > 0xffffff) {
+		*num >>= 1;
+		*den >>= 1;
+	}*/
+	uint64_t value, m;
+	m = *num;
+	value = m * (0x800000);
+	m = do_div(value, *den);
+	*num = value;
+	*den = 0x800000;
+}
+
+static void
+cdv_intel_dp_compute_m_n(int bpp,
+		     int nlanes,
+		     int pixel_clock,
+		     int link_clock,
+		     struct cdv_intel_dp_m_n *m_n)
+{
+	m_n->tu = 64;
+	m_n->gmch_m = (pixel_clock * bpp + 7) >> 3;
+	m_n->gmch_n = link_clock * nlanes;
+	cdv_intel_reduce_ratio(&m_n->gmch_m, &m_n->gmch_n);
+	m_n->link_m = pixel_clock;
+	m_n->link_n = link_clock;
+	cdv_intel_reduce_ratio(&m_n->link_m, &m_n->link_n);
+}
+
+void
+cdv_intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode,
+		 struct drm_display_mode *adjusted_mode)
+{
+	struct drm_device *dev = crtc->dev;
+	struct drm_psb_private *dev_priv = dev->dev_private;
+	struct drm_mode_config *mode_config = &dev->mode_config;
+	struct drm_encoder *encoder;
+	struct psb_intel_crtc *intel_crtc = to_psb_intel_crtc(crtc);
+	int lane_count = 4, bpp = 24;
+	struct cdv_intel_dp_m_n m_n;
+	int pipe = intel_crtc->pipe;
+
+	/*
+	 * Find the lane count in the intel_encoder private
+	 */
+	list_for_each_entry(encoder, &mode_config->encoder_list, head) {
+		struct psb_intel_encoder *intel_encoder;
+		struct cdv_intel_dp *intel_dp;
+
+		if (encoder->crtc != crtc)
+			continue;
+
+		intel_encoder = to_psb_intel_encoder(encoder);
+		intel_dp = intel_encoder->dev_priv;
+		if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT) {
+			lane_count = intel_dp->lane_count;
+			break;
+		} else if (is_edp(intel_encoder)) {
+			lane_count = intel_dp->lane_count;
+			bpp = dev_priv->edp.bpp;
+			break;
+		}
+	}
+
+	/*
+	 * Compute the GMCH and Link ratios. The '3' here is
+	 * the number of bytes_per_pixel post-LUT, which we always
+	 * set up for 8-bits of R/G/B, or 3 bytes total.
+	 */
+	cdv_intel_dp_compute_m_n(bpp, lane_count,
+			     mode->clock, adjusted_mode->clock, &m_n);
+
+	{
+		REG_WRITE(PIPE_GMCH_DATA_M(pipe),
+			   ((m_n.tu - 1) << PIPE_GMCH_DATA_M_TU_SIZE_SHIFT) |
+			   m_n.gmch_m);
+		REG_WRITE(PIPE_GMCH_DATA_N(pipe), m_n.gmch_n);
+		REG_WRITE(PIPE_DP_LINK_M(pipe), m_n.link_m);
+		REG_WRITE(PIPE_DP_LINK_N(pipe), m_n.link_n);
+	}
+}
+
+static void
+cdv_intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
+		  struct drm_display_mode *adjusted_mode)
+{
+	struct psb_intel_encoder *intel_encoder = to_psb_intel_encoder(encoder);
+	struct drm_crtc *crtc = encoder->crtc;
+	struct psb_intel_crtc *intel_crtc = to_psb_intel_crtc(crtc);
+	struct cdv_intel_dp *intel_dp = intel_encoder->dev_priv;
+	struct drm_device *dev = encoder->dev;
+
+	intel_dp->DP = DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
+	intel_dp->DP |= intel_dp->color_range;
+
+	if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
+		intel_dp->DP |= DP_SYNC_HS_HIGH;
+	if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
+		intel_dp->DP |= DP_SYNC_VS_HIGH;
+
+	intel_dp->DP |= DP_LINK_TRAIN_OFF;
+
+	switch (intel_dp->lane_count) {
+	case 1:
+		intel_dp->DP |= DP_PORT_WIDTH_1;
+		break;
+	case 2:
+		intel_dp->DP |= DP_PORT_WIDTH_2;
+		break;
+	case 4:
+		intel_dp->DP |= DP_PORT_WIDTH_4;
+		break;
+	}
+	if (intel_dp->has_audio)
+		intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE;
+
+	memset(intel_dp->link_configuration, 0, DP_LINK_CONFIGURATION_SIZE);
+	intel_dp->link_configuration[0] = intel_dp->link_bw;
+	intel_dp->link_configuration[1] = intel_dp->lane_count;
+
+	/*
+	 * Check for DPCD version > 1.1 and enhanced framing support
+	 */
+	if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
+	    (intel_dp->dpcd[DP_MAX_LANE_COUNT] & DP_ENHANCED_FRAME_CAP)) {
+		intel_dp->link_configuration[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
+		intel_dp->DP |= DP_ENHANCED_FRAMING;
+	}
+
+	/* CPT DP's pipe select is decided in TRANS_DP_CTL */
+	if (intel_crtc->pipe == 1)
+		intel_dp->DP |= DP_PIPEB_SELECT;
+
+	REG_WRITE(intel_dp->output_reg, (intel_dp->DP | DP_PORT_EN));
+	DRM_DEBUG_KMS("DP expected reg is %x\n", intel_dp->DP);
+	if (is_edp(intel_encoder)) {
+		uint32_t pfit_control;
+		cdv_intel_edp_panel_on(intel_encoder);
+
+		if (mode->hdisplay != adjusted_mode->hdisplay ||
+			    mode->vdisplay != adjusted_mode->vdisplay)
+			pfit_control = PFIT_ENABLE;
+		else
+			pfit_control = 0;
+
+		pfit_control |= intel_crtc->pipe << PFIT_PIPE_SHIFT;
+
+		REG_WRITE(PFIT_CONTROL, pfit_control);
+	}
+}
+
+
+/* If the sink supports it, try to set the power state appropriately */
+static void cdv_intel_dp_sink_dpms(struct psb_intel_encoder *encoder, int mode)
+{
+	struct cdv_intel_dp *intel_dp = encoder->dev_priv;
+	int ret, i;
+
+	/* Should have a valid DPCD by this point */
+	if (intel_dp->dpcd[DP_DPCD_REV] < 0x11)
+		return;
+
+	if (mode != DRM_MODE_DPMS_ON) {
+		ret = cdv_intel_dp_aux_native_write_1(encoder, DP_SET_POWER,
+						  DP_SET_POWER_D3);
+		if (ret != 1)
+			DRM_DEBUG_DRIVER("failed to write sink power state\n");
+	} else {
+		/*
+		 * When turning on, we need to retry for 1ms to give the sink
+		 * time to wake up.
+		 */
+		for (i = 0; i < 3; i++) {
+			ret = cdv_intel_dp_aux_native_write_1(encoder,
+							  DP_SET_POWER,
+							  DP_SET_POWER_D0);
+			if (ret == 1)
+				break;
+			udelay(1000);
+		}
+	}
+}
+
+static void cdv_intel_dp_prepare(struct drm_encoder *encoder)
+{
+	struct psb_intel_encoder *intel_encoder = to_psb_intel_encoder(encoder);
+	int edp = is_edp(intel_encoder);
+
+	if (edp) {
+		cdv_intel_edp_backlight_off(intel_encoder);
+		cdv_intel_edp_panel_off(intel_encoder);
+		cdv_intel_edp_panel_vdd_on(intel_encoder);
+        }
+	/* Wake up the sink first */
+	cdv_intel_dp_sink_dpms(intel_encoder, DRM_MODE_DPMS_ON);
+	cdv_intel_dp_link_down(intel_encoder);
+	if (edp)
+		cdv_intel_edp_panel_vdd_off(intel_encoder);
+}
+
+static void cdv_intel_dp_commit(struct drm_encoder *encoder)
+{
+	struct psb_intel_encoder *intel_encoder = to_psb_intel_encoder(encoder);
+	int edp = is_edp(intel_encoder);
+
+	if (edp)
+		cdv_intel_edp_panel_on(intel_encoder);
+	cdv_intel_dp_start_link_train(intel_encoder);
+	cdv_intel_dp_complete_link_train(intel_encoder);
+	if (edp)
+		cdv_intel_edp_backlight_on(intel_encoder);
+}
+
+static void
+cdv_intel_dp_dpms(struct drm_encoder *encoder, int mode)
+{
+	struct psb_intel_encoder *intel_encoder = to_psb_intel_encoder(encoder);
+	struct cdv_intel_dp *intel_dp = intel_encoder->dev_priv;
+	struct drm_device *dev = encoder->dev;
+	uint32_t dp_reg = REG_READ(intel_dp->output_reg);
+	int edp = is_edp(intel_encoder);
+
+	if (mode != DRM_MODE_DPMS_ON) {
+		if (edp) {
+			cdv_intel_edp_backlight_off(intel_encoder);
+			cdv_intel_edp_panel_vdd_on(intel_encoder);
+		}
+		cdv_intel_dp_sink_dpms(intel_encoder, mode);
+		cdv_intel_dp_link_down(intel_encoder);
+		if (edp) {
+			cdv_intel_edp_panel_vdd_off(intel_encoder);
+			cdv_intel_edp_panel_off(intel_encoder);
+		}
+	} else {
+        	if (edp)
+			cdv_intel_edp_panel_on(intel_encoder);
+		cdv_intel_dp_sink_dpms(intel_encoder, mode);
+		if (!(dp_reg & DP_PORT_EN)) {
+			cdv_intel_dp_start_link_train(intel_encoder);
+			cdv_intel_dp_complete_link_train(intel_encoder);
+		}
+		if (edp)
+        		cdv_intel_edp_backlight_on(intel_encoder);
+	}
+}
+
+/*
+ * Native read with retry for link status and receiver capability reads for
+ * cases where the sink may still be asleep.
+ */
+static bool
+cdv_intel_dp_aux_native_read_retry(struct psb_intel_encoder *encoder, uint16_t address,
+			       uint8_t *recv, int recv_bytes)
+{
+	int ret, i;
+
+	/*
+	 * Sinks are *supposed* to come up within 1ms from an off state,
+	 * but we're also supposed to retry 3 times per the spec.
+	 */
+	for (i = 0; i < 3; i++) {
+		ret = cdv_intel_dp_aux_native_read(encoder, address, recv,
+					       recv_bytes);
+		if (ret == recv_bytes)
+			return true;
+		udelay(1000);
+	}
+
+	return false;
+}
+
+/*
+ * Fetch AUX CH registers 0x202 - 0x207 which contain
+ * link status information
+ */
+static bool
+cdv_intel_dp_get_link_status(struct psb_intel_encoder *encoder)
+{
+	struct cdv_intel_dp *intel_dp = encoder->dev_priv;
+	return cdv_intel_dp_aux_native_read_retry(encoder,
+					      DP_LANE0_1_STATUS,
+					      intel_dp->link_status,
+					      DP_LINK_STATUS_SIZE);
+}
+
+static uint8_t
+cdv_intel_dp_link_status(uint8_t link_status[DP_LINK_STATUS_SIZE],
+		     int r)
+{
+	return link_status[r - DP_LANE0_1_STATUS];
+}
+
+static uint8_t
+cdv_intel_get_adjust_request_voltage(uint8_t link_status[DP_LINK_STATUS_SIZE],
+				 int lane)
+{
+	int	    i = DP_ADJUST_REQUEST_LANE0_1 + (lane >> 1);
+	int	    s = ((lane & 1) ?
+			 DP_ADJUST_VOLTAGE_SWING_LANE1_SHIFT :
+			 DP_ADJUST_VOLTAGE_SWING_LANE0_SHIFT);
+	uint8_t l = cdv_intel_dp_link_status(link_status, i);
+
+	return ((l >> s) & 3) << DP_TRAIN_VOLTAGE_SWING_SHIFT;
+}
+
+static uint8_t
+cdv_intel_get_adjust_request_pre_emphasis(uint8_t link_status[DP_LINK_STATUS_SIZE],
+				      int lane)
+{
+	int	    i = DP_ADJUST_REQUEST_LANE0_1 + (lane >> 1);
+	int	    s = ((lane & 1) ?
+			 DP_ADJUST_PRE_EMPHASIS_LANE1_SHIFT :
+			 DP_ADJUST_PRE_EMPHASIS_LANE0_SHIFT);
+	uint8_t l = cdv_intel_dp_link_status(link_status, i);
+
+	return ((l >> s) & 3) << DP_TRAIN_PRE_EMPHASIS_SHIFT;
+}
+
+
+#if 0
+static char	*voltage_names[] = {
+	"0.4V", "0.6V", "0.8V", "1.2V"
+};
+static char	*pre_emph_names[] = {
+	"0dB", "3.5dB", "6dB", "9.5dB"
+};
+static char	*link_train_names[] = {
+	"pattern 1", "pattern 2", "idle", "off"
+};
+#endif
+
+#define CDV_DP_VOLTAGE_MAX	    DP_TRAIN_VOLTAGE_SWING_1200
+/*
+static uint8_t
+cdv_intel_dp_pre_emphasis_max(uint8_t voltage_swing)
+{
+	switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
+	case DP_TRAIN_VOLTAGE_SWING_400:
+		return DP_TRAIN_PRE_EMPHASIS_6;
+	case DP_TRAIN_VOLTAGE_SWING_600:
+		return DP_TRAIN_PRE_EMPHASIS_6;
+	case DP_TRAIN_VOLTAGE_SWING_800:
+		return DP_TRAIN_PRE_EMPHASIS_3_5;
+	case DP_TRAIN_VOLTAGE_SWING_1200:
+	default:
+		return DP_TRAIN_PRE_EMPHASIS_0;
+	}
+}
+*/
+static void
+cdv_intel_get_adjust_train(struct psb_intel_encoder *encoder)
+{
+	struct cdv_intel_dp *intel_dp = encoder->dev_priv;
+	uint8_t v = 0;
+	uint8_t p = 0;
+	int lane;
+
+	for (lane = 0; lane < intel_dp->lane_count; lane++) {
+		uint8_t this_v = cdv_intel_get_adjust_request_voltage(intel_dp->link_status, lane);
+		uint8_t this_p = cdv_intel_get_adjust_request_pre_emphasis(intel_dp->link_status, lane);
+
+		if (this_v > v)
+			v = this_v;
+		if (this_p > p)
+			p = this_p;
+	}
+	
+	if (v >= CDV_DP_VOLTAGE_MAX)
+		v = CDV_DP_VOLTAGE_MAX | DP_TRAIN_MAX_SWING_REACHED;
+
+	if (p == DP_TRAIN_PRE_EMPHASIS_MASK)
+		p |= DP_TRAIN_MAX_PRE_EMPHASIS_REACHED;
+		
+	for (lane = 0; lane < 4; lane++)
+		intel_dp->train_set[lane] = v | p;
+}
+
+
+static uint8_t
+cdv_intel_get_lane_status(uint8_t link_status[DP_LINK_STATUS_SIZE],
+		      int lane)
+{
+	int i = DP_LANE0_1_STATUS + (lane >> 1);
+	int s = (lane & 1) * 4;
+	uint8_t l = cdv_intel_dp_link_status(link_status, i);
+
+	return (l >> s) & 0xf;
+}
+
+/* Check for clock recovery is done on all channels */
+static bool
+cdv_intel_clock_recovery_ok(uint8_t link_status[DP_LINK_STATUS_SIZE], int lane_count)
+{
+	int lane;
+	uint8_t lane_status;
+
+	for (lane = 0; lane < lane_count; lane++) {
+		lane_status = cdv_intel_get_lane_status(link_status, lane);
+		if ((lane_status & DP_LANE_CR_DONE) == 0)
+			return false;
+	}
+	return true;
+}
+
+/* Check to see if channel eq is done on all channels */
+#define CHANNEL_EQ_BITS (DP_LANE_CR_DONE|\
+			 DP_LANE_CHANNEL_EQ_DONE|\
+			 DP_LANE_SYMBOL_LOCKED)
+static bool
+cdv_intel_channel_eq_ok(struct psb_intel_encoder *encoder)
+{
+	struct cdv_intel_dp *intel_dp = encoder->dev_priv;
+	uint8_t lane_align;
+	uint8_t lane_status;
+	int lane;
+
+	lane_align = cdv_intel_dp_link_status(intel_dp->link_status,
+					  DP_LANE_ALIGN_STATUS_UPDATED);
+	if ((lane_align & DP_INTERLANE_ALIGN_DONE) == 0)
+		return false;
+	for (lane = 0; lane < intel_dp->lane_count; lane++) {
+		lane_status = cdv_intel_get_lane_status(intel_dp->link_status, lane);
+		if ((lane_status & CHANNEL_EQ_BITS) != CHANNEL_EQ_BITS)
+			return false;
+	}
+	return true;
+}
+
+static bool
+cdv_intel_dp_set_link_train(struct psb_intel_encoder *encoder,
+			uint32_t dp_reg_value,
+			uint8_t dp_train_pat)
+{
+	
+	struct drm_device *dev = encoder->base.dev;
+	int ret;
+	struct cdv_intel_dp *intel_dp = encoder->dev_priv;
+
+	REG_WRITE(intel_dp->output_reg, dp_reg_value);
+	REG_READ(intel_dp->output_reg);
+
+	ret = cdv_intel_dp_aux_native_write_1(encoder,
+				    DP_TRAINING_PATTERN_SET,
+				    dp_train_pat);
+
+	if (ret != 1) {
+		DRM_DEBUG_KMS("Failure in setting link pattern %x\n",
+				dp_train_pat);
+		return false;
+	}
+
+	return true;
+}
+
+
+static bool
+cdv_intel_dplink_set_level(struct psb_intel_encoder *encoder,
+			uint8_t dp_train_pat)
+{
+	
+	int ret;
+	struct cdv_intel_dp *intel_dp = encoder->dev_priv;
+
+	ret = cdv_intel_dp_aux_native_write(encoder,
+					DP_TRAINING_LANE0_SET,
+					intel_dp->train_set,
+					intel_dp->lane_count);
+
+	if (ret != intel_dp->lane_count) {
+		DRM_DEBUG_KMS("Failure in setting level %d, lane_cnt= %d\n",
+				intel_dp->train_set[0], intel_dp->lane_count);
+		return false;
+	}
+	return true;
+}
+
+static void
+cdv_intel_dp_set_vswing_premph(struct psb_intel_encoder *encoder, uint8_t signal_level)
+{
+	struct drm_device *dev = encoder->base.dev;
+	struct cdv_intel_dp *intel_dp = encoder->dev_priv;
+	struct ddi_regoff *ddi_reg;
+	int vswing, premph, index;
+
+	if (intel_dp->output_reg == DP_B)
+		ddi_reg = &ddi_DP_train_table[0];
+	else
+		ddi_reg = &ddi_DP_train_table[1];
+
+	vswing = (signal_level & DP_TRAIN_VOLTAGE_SWING_MASK);
+	premph = ((signal_level & DP_TRAIN_PRE_EMPHASIS_MASK)) >>
+				DP_TRAIN_PRE_EMPHASIS_SHIFT;
+
+	if (vswing + premph > 3)
+		return;
+#ifdef CDV_FAST_LINK_TRAIN
+	return;
+#endif
+	DRM_DEBUG_KMS("Test2\n");
+	//return ;
+	cdv_sb_reset(dev);
+	/* ;Swing voltage programming
+        ;gfx_dpio_set_reg(0xc058, 0x0505313A) */
+	cdv_sb_write(dev, ddi_reg->VSwing5, 0x0505313A);
+
+	/* ;gfx_dpio_set_reg(0x8154, 0x43406055) */
+	cdv_sb_write(dev, ddi_reg->VSwing1, 0x43406055);
+
+	/* ;gfx_dpio_set_reg(0x8148, 0x55338954)
+	 * The VSwing_PreEmph table is also considered based on the vswing/premp
+	 */
+	index = (vswing + premph) * 2;
+	if (premph == 1 && vswing == 1) {
+		cdv_sb_write(dev, ddi_reg->VSwing2, 0x055738954);
+	} else
+		cdv_sb_write(dev, ddi_reg->VSwing2, dp_vswing_premph_table[index]);
+
+	/* ;gfx_dpio_set_reg(0x814c, 0x40802040) */
+	if ((vswing + premph) == DP_TRAIN_VOLTAGE_SWING_1200)
+		cdv_sb_write(dev, ddi_reg->VSwing3, 0x70802040);
+	else
+		cdv_sb_write(dev, ddi_reg->VSwing3, 0x40802040);
+
+	/* ;gfx_dpio_set_reg(0x8150, 0x2b405555) */
+	/* cdv_sb_write(dev, ddi_reg->VSwing4, 0x2b405555); */
+
+	/* ;gfx_dpio_set_reg(0x8154, 0xc3406055) */
+	cdv_sb_write(dev, ddi_reg->VSwing1, 0xc3406055);
+
+	/* ;Pre emphasis programming
+	 * ;gfx_dpio_set_reg(0xc02c, 0x1f030040)
+	 */
+	cdv_sb_write(dev, ddi_reg->PreEmph1, 0x1f030040);
+
+	/* ;gfx_dpio_set_reg(0x8124, 0x00004000) */
+	index = 2 * premph + 1;
+	cdv_sb_write(dev, ddi_reg->PreEmph2, dp_vswing_premph_table[index]);
+	return;	
+}
+
+
+/* Enable corresponding port and start training pattern 1 */
+static void
+cdv_intel_dp_start_link_train(struct psb_intel_encoder *encoder)
+{
+	struct drm_device *dev = encoder->base.dev;
+	struct cdv_intel_dp *intel_dp = encoder->dev_priv;
+	int i;
+	uint8_t voltage;
+	bool clock_recovery = false;
+	int tries;
+	u32 reg;
+	uint32_t DP = intel_dp->DP;
+
+	DP |= DP_PORT_EN;
+	DP &= ~DP_LINK_TRAIN_MASK;
+		
+	reg = DP;	
+	reg |= DP_LINK_TRAIN_PAT_1;
+	/* Enable output, wait for it to become active */
+	REG_WRITE(intel_dp->output_reg, reg);
+	REG_READ(intel_dp->output_reg);
+	psb_intel_wait_for_vblank(dev);
+
+	DRM_DEBUG_KMS("Link config\n");
+	/* Write the link configuration data */
+	cdv_intel_dp_aux_native_write(encoder, DP_LINK_BW_SET,
+				  intel_dp->link_configuration,
+				  2);
+
+	memset(intel_dp->train_set, 0, 4);
+	voltage = 0;
+	tries = 0;
+	clock_recovery = false;
+
+	DRM_DEBUG_KMS("Start train\n");
+		reg = DP | DP_LINK_TRAIN_PAT_1;
+
+
+	for (;;) {
+		/* Use intel_dp->train_set[0] to set the voltage and pre emphasis values */
+		DRM_DEBUG_KMS("DP Link Train Set %x, Link_config %x, %x\n",
+				intel_dp->train_set[0],
+				intel_dp->link_configuration[0],
+				intel_dp->link_configuration[1]);
+
+		if (!cdv_intel_dp_set_link_train(encoder, reg, DP_TRAINING_PATTERN_1)) {
+			DRM_DEBUG_KMS("Failure in aux-transfer setting pattern 1\n");
+		}
+		cdv_intel_dp_set_vswing_premph(encoder, intel_dp->train_set[0]);
+		/* Set training pattern 1 */
+
+		cdv_intel_dplink_set_level(encoder, DP_TRAINING_PATTERN_1);
+
+		udelay(200);
+		if (!cdv_intel_dp_get_link_status(encoder))
+			break;
+
+		DRM_DEBUG_KMS("DP Link status %x, %x, %x, %x, %x, %x\n",
+				intel_dp->link_status[0], intel_dp->link_status[1], intel_dp->link_status[2],
+				intel_dp->link_status[3], intel_dp->link_status[4], intel_dp->link_status[5]);
+
+		if (cdv_intel_clock_recovery_ok(intel_dp->link_status, intel_dp->lane_count)) {
+			DRM_DEBUG_KMS("PT1 train is done\n");
+			clock_recovery = true;
+			break;
+		}
+
+		/* Check to see if we've tried the max voltage */
+		for (i = 0; i < intel_dp->lane_count; i++)
+			if ((intel_dp->train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0)
+				break;
+		if (i == intel_dp->lane_count)
+			break;
+
+		/* Check to see if we've tried the same voltage 5 times */
+		if ((intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) == voltage) {
+			++tries;
+			if (tries == 5)
+				break;
+		} else
+			tries = 0;
+		voltage = intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK;
+
+		/* Compute new intel_dp->train_set as requested by target */
+		cdv_intel_get_adjust_train(encoder);
+
+	}
+
+	if (!clock_recovery) {
+		DRM_DEBUG_KMS("failure in DP patter 1 training, train set %x\n", intel_dp->train_set[0]);
+	}
+	
+	intel_dp->DP = DP;
+}
+
+static void
+cdv_intel_dp_complete_link_train(struct psb_intel_encoder *encoder)
+{
+	struct drm_device *dev = encoder->base.dev;
+	struct cdv_intel_dp *intel_dp = encoder->dev_priv;
+	bool channel_eq = false;
+	int tries, cr_tries;
+	u32 reg;
+	uint32_t DP = intel_dp->DP;
+
+	/* channel equalization */
+	tries = 0;
+	cr_tries = 0;
+	channel_eq = false;
+
+	DRM_DEBUG_KMS("\n");
+		reg = DP | DP_LINK_TRAIN_PAT_2;
+
+	for (;;) {
+
+		DRM_DEBUG_KMS("DP Link Train Set %x, Link_config %x, %x\n",
+				intel_dp->train_set[0],
+				intel_dp->link_configuration[0],
+				intel_dp->link_configuration[1]);
+        	/* channel eq pattern */
+
+		if (!cdv_intel_dp_set_link_train(encoder, reg,
+					     DP_TRAINING_PATTERN_2)) {
+			DRM_DEBUG_KMS("Failure in aux-transfer setting pattern 2\n");
+		}
+		/* Use intel_dp->train_set[0] to set the voltage and pre emphasis values */
+
+		if (cr_tries > 5) {
+			DRM_ERROR("failed to train DP, aborting\n");
+			cdv_intel_dp_link_down(encoder);
+			break;
+		}
+
+		cdv_intel_dp_set_vswing_premph(encoder, intel_dp->train_set[0]);
+
+		cdv_intel_dplink_set_level(encoder, DP_TRAINING_PATTERN_2);
+
+		udelay(1000);
+		if (!cdv_intel_dp_get_link_status(encoder))
+			break;
+
+		DRM_DEBUG_KMS("DP Link status %x, %x, %x, %x, %x, %x\n",
+				intel_dp->link_status[0], intel_dp->link_status[1], intel_dp->link_status[2],
+				intel_dp->link_status[3], intel_dp->link_status[4], intel_dp->link_status[5]);
+
+		/* Make sure clock is still ok */
+		if (!cdv_intel_clock_recovery_ok(intel_dp->link_status, intel_dp->lane_count)) {
+			cdv_intel_dp_start_link_train(encoder);
+			cr_tries++;
+			continue;
+		}
+
+		if (cdv_intel_channel_eq_ok(encoder)) {
+			DRM_DEBUG_KMS("PT2 train is done\n");
+			channel_eq = true;
+			break;
+		}
+
+		/* Try 5 times, then try clock recovery if that fails */
+		if (tries > 5) {
+			cdv_intel_dp_link_down(encoder);
+			cdv_intel_dp_start_link_train(encoder);
+			tries = 0;
+			cr_tries++;
+			continue;
+		}
+
+		/* Compute new intel_dp->train_set as requested by target */
+		cdv_intel_get_adjust_train(encoder);
+		++tries;
+
+	}
+
+	reg = DP | DP_LINK_TRAIN_OFF;
+
+	REG_WRITE(intel_dp->output_reg, reg);
+	REG_READ(intel_dp->output_reg);
+	cdv_intel_dp_aux_native_write_1(encoder,
+				    DP_TRAINING_PATTERN_SET, DP_TRAINING_PATTERN_DISABLE);
+}
+
+static void
+cdv_intel_dp_link_down(struct psb_intel_encoder *encoder)
+{
+	struct drm_device *dev = encoder->base.dev;
+	struct cdv_intel_dp *intel_dp = encoder->dev_priv;
+	uint32_t DP = intel_dp->DP;
+
+	if ((REG_READ(intel_dp->output_reg) & DP_PORT_EN) == 0)
+		return;
+
+	DRM_DEBUG_KMS("\n");
+
+
+	{
+		DP &= ~DP_LINK_TRAIN_MASK;
+		REG_WRITE(intel_dp->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE);
+	}
+	REG_READ(intel_dp->output_reg);
+
+	msleep(17);
+
+	REG_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN);
+	REG_READ(intel_dp->output_reg);
+}
+
+static enum drm_connector_status
+cdv_dp_detect(struct psb_intel_encoder *encoder)
+{
+	struct cdv_intel_dp *intel_dp = encoder->dev_priv;
+	enum drm_connector_status status;
+
+	status = connector_status_disconnected;
+	if (cdv_intel_dp_aux_native_read(encoder, 0x000, intel_dp->dpcd,
+				     sizeof (intel_dp->dpcd)) == sizeof (intel_dp->dpcd))
+	{
+		if (intel_dp->dpcd[DP_DPCD_REV] != 0)
+			status = connector_status_connected;
+	}
+	if (status == connector_status_connected)
+		DRM_DEBUG_KMS("DPCD: Rev=%x LN_Rate=%x LN_CNT=%x LN_DOWNSP=%x\n",
+			intel_dp->dpcd[0], intel_dp->dpcd[1],
+			intel_dp->dpcd[2], intel_dp->dpcd[3]);
+	return status;
+}
+
+/**
+ * Uses CRT_HOTPLUG_EN and CRT_HOTPLUG_STAT to detect DP connection.
+ *
+ * \return true if DP port is connected.
+ * \return false if DP port is disconnected.
+ */
+static enum drm_connector_status
+cdv_intel_dp_detect(struct drm_connector *connector, bool force)
+{
+	struct psb_intel_encoder *encoder = psb_intel_attached_encoder(connector);
+	struct cdv_intel_dp *intel_dp = encoder->dev_priv;
+	enum drm_connector_status status;
+	struct edid *edid = NULL;
+	int edp = is_edp(encoder);
+
+	intel_dp->has_audio = false;
+
+	if (edp)
+		cdv_intel_edp_panel_vdd_on(encoder);
+	status = cdv_dp_detect(encoder);
+	if (status != connector_status_connected) {
+		if (edp)
+			cdv_intel_edp_panel_vdd_off(encoder);
+		return status;
+        }
+
+	if (intel_dp->force_audio) {
+		intel_dp->has_audio = intel_dp->force_audio > 0;
+	} else {
+		edid = drm_get_edid(connector, &intel_dp->adapter);
+		if (edid) {
+			intel_dp->has_audio = drm_detect_monitor_audio(edid);
+			kfree(edid);
+		}
+	}
+	if (edp)
+		cdv_intel_edp_panel_vdd_off(encoder);
+
+	return connector_status_connected;
+}
+
+static int cdv_intel_dp_get_modes(struct drm_connector *connector)
+{
+	struct psb_intel_encoder *intel_encoder = psb_intel_attached_encoder(connector);
+	struct cdv_intel_dp *intel_dp = intel_encoder->dev_priv;
+	struct edid *edid = NULL;
+	int ret = 0;
+	int edp = is_edp(intel_encoder);
+
+
+	edid = drm_get_edid(connector, &intel_dp->adapter);
+	if (edid) {
+		drm_mode_connector_update_edid_property(connector, edid);
+		ret = drm_add_edid_modes(connector, edid);
+		kfree(edid);
+	}
+
+	if (is_edp(intel_encoder)) {
+		struct drm_device *dev = connector->dev;
+		struct drm_psb_private *dev_priv = dev->dev_private;
+		
+		cdv_intel_edp_panel_vdd_off(intel_encoder);
+		if (ret) {
+			if (edp && !intel_dp->panel_fixed_mode) {
+				struct drm_display_mode *newmode;
+				list_for_each_entry(newmode, &connector->probed_modes,
+					    head) {
+					if (newmode->type & DRM_MODE_TYPE_PREFERRED) {
+						intel_dp->panel_fixed_mode =
+							drm_mode_duplicate(dev, newmode);
+						break;
+					}
+				}
+			}
+
+			return ret;
+		}
+		if (!intel_dp->panel_fixed_mode && dev_priv->lfp_lvds_vbt_mode) {
+			intel_dp->panel_fixed_mode =
+				drm_mode_duplicate(dev, dev_priv->lfp_lvds_vbt_mode);
+			if (intel_dp->panel_fixed_mode) {
+				intel_dp->panel_fixed_mode->type |=
+					DRM_MODE_TYPE_PREFERRED;
+			}
+		}
+		if (intel_dp->panel_fixed_mode != NULL) {
+			struct drm_display_mode *mode;
+			mode = drm_mode_duplicate(dev, intel_dp->panel_fixed_mode);
+			drm_mode_probed_add(connector, mode);
+			return 1;
+		}
+	}
+
+	return ret;
+}
+
+static bool
+cdv_intel_dp_detect_audio(struct drm_connector *connector)
+{
+	struct psb_intel_encoder *encoder = psb_intel_attached_encoder(connector);
+	struct cdv_intel_dp *intel_dp = encoder->dev_priv;
+	struct edid *edid;
+	bool has_audio = false;
+	int edp = is_edp(encoder);
+
+	if (edp)
+		cdv_intel_edp_panel_vdd_on(encoder);
+
+	edid = drm_get_edid(connector, &intel_dp->adapter);
+	if (edid) {
+		has_audio = drm_detect_monitor_audio(edid);
+		kfree(edid);
+	}
+	if (edp)
+		cdv_intel_edp_panel_vdd_off(encoder);
+
+	return has_audio;
+}
+
+static int
+cdv_intel_dp_set_property(struct drm_connector *connector,
+		      struct drm_property *property,
+		      uint64_t val)
+{
+	struct drm_psb_private *dev_priv = connector->dev->dev_private;
+	struct psb_intel_encoder *encoder = psb_intel_attached_encoder(connector);
+	struct cdv_intel_dp *intel_dp = encoder->dev_priv;
+	int ret;
+
+	ret = drm_connector_property_set_value(connector, property, val);
+	if (ret)
+		return ret;
+
+	if (property == dev_priv->force_audio_property) {
+		int i = val;
+		bool has_audio;
+
+		if (i == intel_dp->force_audio)
+			return 0;
+
+		intel_dp->force_audio = i;
+
+		if (i == 0)
+			has_audio = cdv_intel_dp_detect_audio(connector);
+		else
+			has_audio = i > 0;
+
+		if (has_audio == intel_dp->has_audio)
+			return 0;
+
+		intel_dp->has_audio = has_audio;
+		goto done;
+	}
+
+	if (property == dev_priv->broadcast_rgb_property) {
+		if (val == !!intel_dp->color_range)
+			return 0;
+
+		intel_dp->color_range = val ? DP_COLOR_RANGE_16_235 : 0;
+		goto done;
+	}
+
+	return -EINVAL;
+
+done:
+	if (encoder->base.crtc) {
+		struct drm_crtc *crtc = encoder->base.crtc;
+		drm_crtc_helper_set_mode(crtc, &crtc->mode,
+					 crtc->x, crtc->y,
+					 crtc->fb);
+	}
+
+	return 0;
+}
+
+static void
+cdv_intel_dp_destroy(struct drm_connector *connector)
+{
+	struct psb_intel_encoder *psb_intel_encoder =
+					psb_intel_attached_encoder(connector);
+	struct cdv_intel_dp *intel_dp = psb_intel_encoder->dev_priv;
+
+	if (is_edp(psb_intel_encoder)) {
+	/*	cdv_intel_panel_destroy_backlight(connector->dev); */
+		if (intel_dp->panel_fixed_mode) {
+			kfree(intel_dp->panel_fixed_mode);
+			intel_dp->panel_fixed_mode = NULL;
+		}
+	}
+	i2c_del_adapter(&intel_dp->adapter);
+	drm_sysfs_connector_remove(connector);
+	drm_connector_cleanup(connector);
+	kfree(connector);
+}
+
+static void cdv_intel_dp_encoder_destroy(struct drm_encoder *encoder)
+{
+	drm_encoder_cleanup(encoder);
+}
+
+static const struct drm_encoder_helper_funcs cdv_intel_dp_helper_funcs = {
+	.dpms = cdv_intel_dp_dpms,
+	.mode_fixup = cdv_intel_dp_mode_fixup,
+	.prepare = cdv_intel_dp_prepare,
+	.mode_set = cdv_intel_dp_mode_set,
+	.commit = cdv_intel_dp_commit,
+};
+
+static const struct drm_connector_funcs cdv_intel_dp_connector_funcs = {
+	.dpms = drm_helper_connector_dpms,
+	.detect = cdv_intel_dp_detect,
+	.fill_modes = drm_helper_probe_single_connector_modes,
+	.set_property = cdv_intel_dp_set_property,
+	.destroy = cdv_intel_dp_destroy,
+};
+
+static const struct drm_connector_helper_funcs cdv_intel_dp_connector_helper_funcs = {
+	.get_modes = cdv_intel_dp_get_modes,
+	.mode_valid = cdv_intel_dp_mode_valid,
+	.best_encoder = psb_intel_best_encoder,
+};
+
+static const struct drm_encoder_funcs cdv_intel_dp_enc_funcs = {
+	.destroy = cdv_intel_dp_encoder_destroy,
+};
+
+
+static void cdv_intel_dp_add_properties(struct drm_connector *connector)
+{
+	cdv_intel_attach_force_audio_property(connector);
+	cdv_intel_attach_broadcast_rgb_property(connector);
+}
+
+/* check the VBT to see whether the eDP is on DP-D port */
+static bool cdv_intel_dpc_is_edp(struct drm_device *dev)
+{
+	struct drm_psb_private *dev_priv = dev->dev_private;
+	struct child_device_config *p_child;
+	int i;
+
+	if (!dev_priv->child_dev_num)
+		return false;
+
+	for (i = 0; i < dev_priv->child_dev_num; i++) {
+		p_child = dev_priv->child_dev + i;
+
+		if (p_child->dvo_port == PORT_IDPC &&
+		    p_child->device_type == DEVICE_TYPE_eDP)
+			return true;
+	}
+	return false;
+}
+
+/* Cedarview display clock gating
+
+   We need this disable dot get correct behaviour while enabling
+   DP/eDP. TODO - investigate if we can turn it back to normality
+   after enabling */
+static void cdv_disable_intel_clock_gating(struct drm_device *dev)
+{
+	u32 reg_value;
+	reg_value = REG_READ(DSPCLK_GATE_D);
+
+	reg_value |= (DPUNIT_PIPEB_GATE_DISABLE |
+			DPUNIT_PIPEA_GATE_DISABLE |
+			DPCUNIT_CLOCK_GATE_DISABLE |
+			DPLSUNIT_CLOCK_GATE_DISABLE |
+			DPOUNIT_CLOCK_GATE_DISABLE |
+		 	DPIOUNIT_CLOCK_GATE_DISABLE);	
+
+	REG_WRITE(DSPCLK_GATE_D, reg_value);
+
+	udelay(500);		
+}
+
+void
+cdv_intel_dp_init(struct drm_device *dev, struct psb_intel_mode_device *mode_dev, int output_reg)
+{
+	struct psb_intel_encoder *psb_intel_encoder;
+	struct psb_intel_connector *psb_intel_connector;
+	struct drm_connector *connector;
+	struct drm_encoder *encoder;
+	struct cdv_intel_dp *intel_dp;
+	const char *name = NULL;
+	int type = DRM_MODE_CONNECTOR_DisplayPort;
+
+	psb_intel_encoder = kzalloc(sizeof(struct psb_intel_encoder), GFP_KERNEL);
+	if (!psb_intel_encoder)
+		return;
+        psb_intel_connector = kzalloc(sizeof(struct psb_intel_connector), GFP_KERNEL);
+        if (!psb_intel_connector)
+                goto err_connector;
+	intel_dp = kzalloc(sizeof(struct cdv_intel_dp), GFP_KERNEL);
+	if (!intel_dp)
+	        goto err_priv;
+
+	if ((output_reg == DP_C) && cdv_intel_dpc_is_edp(dev))
+		type = DRM_MODE_CONNECTOR_eDP;
+
+	connector = &psb_intel_connector->base;
+	encoder = &psb_intel_encoder->base;
+
+	drm_connector_init(dev, connector, &cdv_intel_dp_connector_funcs, type);
+	drm_encoder_init(dev, encoder, &cdv_intel_dp_enc_funcs, DRM_MODE_ENCODER_TMDS);
+
+	psb_intel_connector_attach_encoder(psb_intel_connector, psb_intel_encoder);
+
+	if (type == DRM_MODE_CONNECTOR_DisplayPort)
+        	psb_intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
+        else
+		psb_intel_encoder->type = INTEL_OUTPUT_EDP;
+
+
+	psb_intel_encoder->dev_priv=intel_dp;
+	intel_dp->encoder = psb_intel_encoder;
+	intel_dp->output_reg = output_reg;
+	
+	drm_encoder_helper_add(encoder, &cdv_intel_dp_helper_funcs);
+	drm_connector_helper_add(connector, &cdv_intel_dp_connector_helper_funcs);
+
+	connector->polled = DRM_CONNECTOR_POLL_HPD;
+	connector->interlace_allowed = false;
+	connector->doublescan_allowed = false;
+
+	drm_sysfs_connector_add(connector);
+
+	/* Set up the DDC bus. */
+	switch (output_reg) {
+		case DP_B:
+			name = "DPDDC-B";
+			psb_intel_encoder->ddi_select = (DP_MASK | DDI0_SELECT);
+			break;
+		case DP_C:
+			name = "DPDDC-C";
+			psb_intel_encoder->ddi_select = (DP_MASK | DDI1_SELECT);
+			break;
+	}
+
+	cdv_disable_intel_clock_gating(dev);
+
+	cdv_intel_dp_i2c_init(psb_intel_connector, psb_intel_encoder, name);
+        /* FIXME:fail check */
+	cdv_intel_dp_add_properties(connector);
+
+	if (is_edp(psb_intel_encoder)) {
+		int ret;
+		struct edp_power_seq cur;
+                u32 pp_on, pp_off, pp_div;
+		u32 pwm_ctrl;
+
+		pp_on = REG_READ(PP_CONTROL);
+		pp_on &= ~PANEL_UNLOCK_MASK;
+	        pp_on |= PANEL_UNLOCK_REGS;
+		
+		REG_WRITE(PP_CONTROL, pp_on);
+
+		pwm_ctrl = REG_READ(BLC_PWM_CTL2);
+		pwm_ctrl |= PWM_PIPE_B;
+		REG_WRITE(BLC_PWM_CTL2, pwm_ctrl);
+
+                pp_on = REG_READ(PP_ON_DELAYS);
+                pp_off = REG_READ(PP_OFF_DELAYS);
+                pp_div = REG_READ(PP_DIVISOR);
+	
+		/* Pull timing values out of registers */
+                cur.t1_t3 = (pp_on & PANEL_POWER_UP_DELAY_MASK) >>
+                        PANEL_POWER_UP_DELAY_SHIFT;
+
+                cur.t8 = (pp_on & PANEL_LIGHT_ON_DELAY_MASK) >>
+                        PANEL_LIGHT_ON_DELAY_SHIFT;
+
+                cur.t9 = (pp_off & PANEL_LIGHT_OFF_DELAY_MASK) >>
+                        PANEL_LIGHT_OFF_DELAY_SHIFT;
+
+                cur.t10 = (pp_off & PANEL_POWER_DOWN_DELAY_MASK) >>
+                        PANEL_POWER_DOWN_DELAY_SHIFT;
+
+                cur.t11_t12 = ((pp_div & PANEL_POWER_CYCLE_DELAY_MASK) >>
+                               PANEL_POWER_CYCLE_DELAY_SHIFT);
+
+                DRM_DEBUG_KMS("cur t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
+                              cur.t1_t3, cur.t8, cur.t9, cur.t10, cur.t11_t12);
+
+
+		intel_dp->panel_power_up_delay = cur.t1_t3 / 10;
+                intel_dp->backlight_on_delay = cur.t8 / 10;
+                intel_dp->backlight_off_delay = cur.t9 / 10;
+                intel_dp->panel_power_down_delay = cur.t10 / 10;
+                intel_dp->panel_power_cycle_delay = (cur.t11_t12 - 1) * 100;
+
+                DRM_DEBUG_KMS("panel power up delay %d, power down delay %d, power cycle delay %d\n",
+                              intel_dp->panel_power_up_delay, intel_dp->panel_power_down_delay,
+                              intel_dp->panel_power_cycle_delay);
+
+                DRM_DEBUG_KMS("backlight on delay %d, off delay %d\n",
+                              intel_dp->backlight_on_delay, intel_dp->backlight_off_delay);
+
+
+		cdv_intel_edp_panel_vdd_on(psb_intel_encoder);
+		ret = cdv_intel_dp_aux_native_read(psb_intel_encoder, DP_DPCD_REV,
+					       intel_dp->dpcd,
+					       sizeof(intel_dp->dpcd));
+		cdv_intel_edp_panel_vdd_off(psb_intel_encoder);
+		if (ret == 0) {
+			/* if this fails, presume the device is a ghost */
+			DRM_INFO("failed to retrieve link info, disabling eDP\n");
+			cdv_intel_dp_encoder_destroy(encoder);
+			cdv_intel_dp_destroy(connector);
+			goto err_priv;
+		} else {
+        		DRM_DEBUG_KMS("DPCD: Rev=%x LN_Rate=%x LN_CNT=%x LN_DOWNSP=%x\n",
+				intel_dp->dpcd[0], intel_dp->dpcd[1], 
+				intel_dp->dpcd[2], intel_dp->dpcd[3]);
+			
+		}
+		/* The CDV reference driver moves pnale backlight setup into the displays that
+		   have a backlight: this is a good idea and one we should probably adopt, however
+		   we need to migrate all the drivers before we can do that */
+                /*cdv_intel_panel_setup_backlight(dev); */
+	}
+	return;
+
+err_priv:
+	kfree(psb_intel_connector);
+err_connector:
+	kfree(psb_intel_encoder);
+}
diff --git a/drivers/gpu/drm/gma500/cdv_intel_hdmi.c b/drivers/gpu/drm/gma500/cdv_intel_hdmi.c
index a86f87b9ddde..7272a461edfe 100644
--- a/drivers/gpu/drm/gma500/cdv_intel_hdmi.c
+++ b/drivers/gpu/drm/gma500/cdv_intel_hdmi.c
@@ -139,8 +139,6 @@ static enum drm_connector_status cdv_hdmi_detect(
 {
 	struct psb_intel_encoder *psb_intel_encoder =
 					psb_intel_attached_encoder(connector);
-	struct psb_intel_connector *psb_intel_connector =
-					to_psb_intel_connector(connector);
 	struct mid_intel_hdmi_priv *hdmi_priv = psb_intel_encoder->dev_priv;
 	struct edid *edid = NULL;
 	enum drm_connector_status status = connector_status_disconnected;
@@ -157,8 +155,6 @@ static enum drm_connector_status cdv_hdmi_detect(
 			hdmi_priv->has_hdmi_audio =
 						drm_detect_monitor_audio(edid);
 		}
-
-		psb_intel_connector->base.display_info.raw_edid = NULL;
 		kfree(edid);
 	}
 	return status;
@@ -352,9 +348,11 @@ void cdv_hdmi_init(struct drm_device *dev,
 	switch (reg) {
 	case SDVOB:
 		ddc_bus = GPIOE;
+		psb_intel_encoder->ddi_select = DDI0_SELECT;
 		break;
 	case SDVOC:
 		ddc_bus = GPIOD;
+		psb_intel_encoder->ddi_select = DDI1_SELECT;
 		break;
 	default:
 		DRM_ERROR("unknown reg 0x%x for HDMI\n", reg);
diff --git a/drivers/gpu/drm/gma500/cdv_intel_lvds.c b/drivers/gpu/drm/gma500/cdv_intel_lvds.c
index c7f9468b74ba..b362dd39bf5a 100644
--- a/drivers/gpu/drm/gma500/cdv_intel_lvds.c
+++ b/drivers/gpu/drm/gma500/cdv_intel_lvds.c
@@ -506,16 +506,8 @@ static int cdv_intel_lvds_set_property(struct drm_connector *connector,
 							property,
 							value))
 			return -1;
-		else {
-#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
-			struct drm_psb_private *dev_priv =
-						encoder->dev->dev_private;
-			struct backlight_device *bd =
-						dev_priv->backlight_device;
-			bd->props.brightness = value;
-			backlight_update_status(bd);
-#endif
-		}
+		else
+                        gma_backlight_set(encoder->dev, value);
 	} else if (!strcmp(property->name, "DPMS") && encoder) {
 		struct drm_encoder_helper_funcs *helpers =
 					encoder->helper_private;
diff --git a/drivers/gpu/drm/gma500/framebuffer.c b/drivers/gpu/drm/gma500/framebuffer.c
index 5732b5702e1c..884ba73ac6ce 100644
--- a/drivers/gpu/drm/gma500/framebuffer.c
+++ b/drivers/gpu/drm/gma500/framebuffer.c
@@ -764,6 +764,13 @@ static void psb_setup_outputs(struct drm_device *dev)
 		        crtc_mask = dev_priv->ops->hdmi_mask;
 			clone_mask = (1 << INTEL_OUTPUT_HDMI);
 			break;
+		case INTEL_OUTPUT_DISPLAYPORT:
+			crtc_mask = (1 << 0) | (1 << 1);
+			clone_mask = (1 << INTEL_OUTPUT_DISPLAYPORT);
+			break;
+		case INTEL_OUTPUT_EDP:
+			crtc_mask = (1 << 1);
+			clone_mask = (1 << INTEL_OUTPUT_EDP);
 		}
 		encoder->possible_crtcs = crtc_mask;
 		encoder->possible_clones =
diff --git a/drivers/gpu/drm/gma500/gem.c b/drivers/gpu/drm/gma500/gem.c
index f3a1ae8eb77b..eefd6cc5b80d 100644
--- a/drivers/gpu/drm/gma500/gem.c
+++ b/drivers/gpu/drm/gma500/gem.c
@@ -36,7 +36,12 @@ int psb_gem_init_object(struct drm_gem_object *obj)
 void psb_gem_free_object(struct drm_gem_object *obj)
 {
 	struct gtt_range *gtt = container_of(obj, struct gtt_range, gem);
-	drm_gem_object_release_wrap(obj);
+
+	/* Remove the list map if one is present */
+	if (obj->map_list.map)
+		drm_gem_free_mmap_offset(obj);
+	drm_gem_object_release(obj);
+
 	/* This must occur last as it frees up the memory of the GEM object */
 	psb_gtt_free_range(obj->dev, gtt);
 }
@@ -77,7 +82,7 @@ int psb_gem_dumb_map_gtt(struct drm_file *file, struct drm_device *dev,
 
 	/* Make it mmapable */
 	if (!obj->map_list.map) {
-		ret = gem_create_mmap_offset(obj);
+		ret = drm_gem_create_mmap_offset(obj);
 		if (ret)
 			goto out;
 	}
diff --git a/drivers/gpu/drm/gma500/gem_glue.c b/drivers/gpu/drm/gma500/gem_glue.c
deleted file mode 100644
index 3c17634f6061..000000000000
--- a/drivers/gpu/drm/gma500/gem_glue.c
+++ /dev/null
@@ -1,90 +0,0 @@
-/**************************************************************************
- * Copyright (c) 2011, Intel Corporation.
- * All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
- *
- **************************************************************************/
-
-#include <drm/drmP.h>
-#include <drm/drm.h>
-#include "gem_glue.h"
-
-void drm_gem_object_release_wrap(struct drm_gem_object *obj)
-{
-	/* Remove the list map if one is present */
-	if (obj->map_list.map) {
-		struct drm_gem_mm *mm = obj->dev->mm_private;
-		struct drm_map_list *list = &obj->map_list;
-		drm_ht_remove_item(&mm->offset_hash, &list->hash);
-		drm_mm_put_block(list->file_offset_node);
-		kfree(list->map);
-		list->map = NULL;
-	}
-	drm_gem_object_release(obj);
-}
-
-/**
- *	gem_create_mmap_offset		-	invent an mmap offset
- *	@obj: our object
- *
- *	Standard implementation of offset generation for mmap as is
- *	duplicated in several drivers. This belongs in GEM.
- */
-int gem_create_mmap_offset(struct drm_gem_object *obj)
-{
-	struct drm_device *dev = obj->dev;
-	struct drm_gem_mm *mm = dev->mm_private;
-	struct drm_map_list *list;
-	struct drm_local_map *map;
-	int ret;
-
-	list = &obj->map_list;
-	list->map = kzalloc(sizeof(struct drm_map_list), GFP_KERNEL);
-	if (list->map == NULL)
-		return -ENOMEM;
-	map = list->map;
-	map->type = _DRM_GEM;
-	map->size = obj->size;
-	map->handle = obj;
-
-	list->file_offset_node = drm_mm_search_free(&mm->offset_manager,
-					obj->size / PAGE_SIZE, 0, 0);
-	if (!list->file_offset_node) {
-		dev_err(dev->dev, "failed to allocate offset for bo %d\n",
-								obj->name);
-		ret = -ENOSPC;
-		goto free_it;
-	}
-	list->file_offset_node = drm_mm_get_block(list->file_offset_node,
-					obj->size / PAGE_SIZE, 0);
-	if (!list->file_offset_node) {
-		ret = -ENOMEM;
-		goto free_it;
-	}
-	list->hash.key = list->file_offset_node->start;
-	ret = drm_ht_insert_item(&mm->offset_hash, &list->hash);
-	if (ret) {
-		dev_err(dev->dev, "failed to add to map hash\n");
-		goto free_mm;
-	}
-	return 0;
-
-free_mm:
-	drm_mm_put_block(list->file_offset_node);
-free_it:
-	kfree(list->map);
-	list->map = NULL;
-	return ret;
-}
diff --git a/drivers/gpu/drm/gma500/gem_glue.h b/drivers/gpu/drm/gma500/gem_glue.h
deleted file mode 100644
index ce5ce30f74db..000000000000
--- a/drivers/gpu/drm/gma500/gem_glue.h
+++ /dev/null
@@ -1,2 +0,0 @@
-extern void drm_gem_object_release_wrap(struct drm_gem_object *obj);
-extern int gem_create_mmap_offset(struct drm_gem_object *obj);
diff --git a/drivers/gpu/drm/gma500/intel_bios.c b/drivers/gpu/drm/gma500/intel_bios.c
index a837ee97787c..403fffb03abd 100644
--- a/drivers/gpu/drm/gma500/intel_bios.c
+++ b/drivers/gpu/drm/gma500/intel_bios.c
@@ -54,6 +54,98 @@ static void *find_section(struct bdb_header *bdb, int section_id)
 	return NULL;
 }
 
+static void
+parse_edp(struct drm_psb_private *dev_priv, struct bdb_header *bdb)
+{
+	struct bdb_edp *edp;
+	struct edp_power_seq *edp_pps;
+	struct edp_link_params *edp_link_params;
+	uint8_t	panel_type;
+
+	edp = find_section(bdb, BDB_EDP);
+	
+	dev_priv->edp.bpp = 18;
+	if (!edp) {
+		if (dev_priv->edp.support) {
+			DRM_DEBUG_KMS("No eDP BDB found but eDP panel supported, assume %dbpp panel color depth.\n",
+				      dev_priv->edp.bpp);
+		}
+		return;
+	}
+
+	panel_type = dev_priv->panel_type;
+	switch ((edp->color_depth >> (panel_type * 2)) & 3) {
+	case EDP_18BPP:
+		dev_priv->edp.bpp = 18;
+		break;
+	case EDP_24BPP:
+		dev_priv->edp.bpp = 24;
+		break;
+	case EDP_30BPP:
+		dev_priv->edp.bpp = 30;
+		break;
+	}
+
+	/* Get the eDP sequencing and link info */
+	edp_pps = &edp->power_seqs[panel_type];
+	edp_link_params = &edp->link_params[panel_type];
+
+	dev_priv->edp.pps = *edp_pps;
+
+	DRM_DEBUG_KMS("EDP timing in vbt t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
+				dev_priv->edp.pps.t1_t3, dev_priv->edp.pps.t8, 
+				dev_priv->edp.pps.t9, dev_priv->edp.pps.t10,
+				dev_priv->edp.pps.t11_t12);
+
+	dev_priv->edp.rate = edp_link_params->rate ? DP_LINK_BW_2_7 :
+		DP_LINK_BW_1_62;
+	switch (edp_link_params->lanes) {
+	case 0:
+		dev_priv->edp.lanes = 1;
+		break;
+	case 1:
+		dev_priv->edp.lanes = 2;
+		break;
+	case 3:
+	default:
+		dev_priv->edp.lanes = 4;
+		break;
+	}
+	DRM_DEBUG_KMS("VBT reports EDP: Lane_count %d, Lane_rate %d, Bpp %d\n",
+			dev_priv->edp.lanes, dev_priv->edp.rate, dev_priv->edp.bpp);
+
+	switch (edp_link_params->preemphasis) {
+	case 0:
+		dev_priv->edp.preemphasis = DP_TRAIN_PRE_EMPHASIS_0;
+		break;
+	case 1:
+		dev_priv->edp.preemphasis = DP_TRAIN_PRE_EMPHASIS_3_5;
+		break;
+	case 2:
+		dev_priv->edp.preemphasis = DP_TRAIN_PRE_EMPHASIS_6;
+		break;
+	case 3:
+		dev_priv->edp.preemphasis = DP_TRAIN_PRE_EMPHASIS_9_5;
+		break;
+	}
+	switch (edp_link_params->vswing) {
+	case 0:
+		dev_priv->edp.vswing = DP_TRAIN_VOLTAGE_SWING_400;
+		break;
+	case 1:
+		dev_priv->edp.vswing = DP_TRAIN_VOLTAGE_SWING_600;
+		break;
+	case 2:
+		dev_priv->edp.vswing = DP_TRAIN_VOLTAGE_SWING_800;
+		break;
+	case 3:
+		dev_priv->edp.vswing = DP_TRAIN_VOLTAGE_SWING_1200;
+		break;
+	}
+	DRM_DEBUG_KMS("VBT reports EDP: VSwing  %d, Preemph %d\n",
+			dev_priv->edp.vswing, dev_priv->edp.preemphasis);
+}
+
 static u16
 get_blocksize(void *p)
 {
@@ -154,6 +246,8 @@ static void parse_lfp_panel_data(struct drm_psb_private *dev_priv,
 		return;
 
 	dev_priv->lvds_dither = lvds_options->pixel_dither;
+	dev_priv->panel_type = lvds_options->panel_type;
+
 	if (lvds_options->panel_type == 0xff)
 		return;
 
@@ -340,6 +434,9 @@ parse_driver_features(struct drm_psb_private *dev_priv,
 	if (!driver)
 		return;
 
+	if (driver->lvds_config == BDB_DRIVER_FEATURE_EDP)
+		dev_priv->edp.support = 1;
+
 	/* This bit means to use 96Mhz for DPLL_A or not */
 	if (driver->primary_lfp_id)
 		dev_priv->dplla_96mhz = true;
@@ -437,6 +534,9 @@ int psb_intel_init_bios(struct drm_device *dev)
 	size_t size;
 	int i;
 
+
+	dev_priv->panel_type = 0xff;
+
 	/* XXX Should this validation be moved to intel_opregion.c? */
 	if (dev_priv->opregion.vbt) {
 		struct vbt_header *vbt = dev_priv->opregion.vbt;
@@ -477,6 +577,7 @@ int psb_intel_init_bios(struct drm_device *dev)
 	parse_sdvo_device_mapping(dev_priv, bdb);
 	parse_device_mapping(dev_priv, bdb);
 	parse_backlight_data(dev_priv, bdb);
+	parse_edp(dev_priv, bdb);
 
 	if (bios)
 		pci_unmap_rom(pdev, bios);
diff --git a/drivers/gpu/drm/gma500/intel_bios.h b/drivers/gpu/drm/gma500/intel_bios.h
index 2e95523b84b1..c6267c98c9e7 100644
--- a/drivers/gpu/drm/gma500/intel_bios.h
+++ b/drivers/gpu/drm/gma500/intel_bios.h
@@ -23,6 +23,7 @@
 #define _I830_BIOS_H_
 
 #include <drm/drmP.h>
+#include <drm/drm_dp_helper.h>
 
 struct vbt_header {
 	u8 signature[20];		/**< Always starts with 'VBT$' */
@@ -93,6 +94,7 @@ struct vbios_data {
 #define BDB_SDVO_LVDS_PNP_IDS	 24
 #define BDB_SDVO_LVDS_POWER_SEQ	 25
 #define BDB_TV_OPTIONS		 26
+#define BDB_EDP			 27
 #define BDB_LVDS_OPTIONS	 40
 #define BDB_LVDS_LFP_DATA_PTRS	 41
 #define BDB_LVDS_LFP_DATA	 42
@@ -391,6 +393,11 @@ struct bdb_sdvo_lvds_options {
 	u8 panel_misc_bits_4;
 } __attribute__((packed));
 
+#define BDB_DRIVER_FEATURE_NO_LVDS		0
+#define BDB_DRIVER_FEATURE_INT_LVDS		1
+#define BDB_DRIVER_FEATURE_SDVO_LVDS		2
+#define BDB_DRIVER_FEATURE_EDP			3
+
 struct bdb_driver_features {
 	u8 boot_dev_algorithm:1;
 	u8 block_display_switch:1;
@@ -431,6 +438,45 @@ struct bdb_driver_features {
 	u8 custom_vbt_version;
 } __attribute__((packed));
 
+#define EDP_18BPP	0
+#define EDP_24BPP	1
+#define EDP_30BPP	2
+#define EDP_RATE_1_62	0
+#define EDP_RATE_2_7	1
+#define EDP_LANE_1	0
+#define EDP_LANE_2	1
+#define EDP_LANE_4	3
+#define EDP_PREEMPHASIS_NONE	0
+#define EDP_PREEMPHASIS_3_5dB	1
+#define EDP_PREEMPHASIS_6dB	2
+#define EDP_PREEMPHASIS_9_5dB	3
+#define EDP_VSWING_0_4V		0
+#define EDP_VSWING_0_6V		1
+#define EDP_VSWING_0_8V		2
+#define EDP_VSWING_1_2V		3
+
+struct edp_power_seq {
+	u16 t1_t3;
+	u16 t8;
+	u16 t9;
+	u16 t10;
+	u16 t11_t12;
+} __attribute__ ((packed));
+
+struct edp_link_params {
+	u8 rate:4;
+	u8 lanes:4;
+	u8 preemphasis:4;
+	u8 vswing:4;
+} __attribute__ ((packed));
+
+struct bdb_edp {
+	struct edp_power_seq power_seqs[16];
+	u32 color_depth;
+	u32 sdrrs_msa_timing_delay;
+	struct edp_link_params link_params[16];
+} __attribute__ ((packed));
+
 extern int psb_intel_init_bios(struct drm_device *dev);
 extern void psb_intel_destroy_bios(struct drm_device *dev);
 
diff --git a/drivers/gpu/drm/gma500/mdfld_dsi_output.c b/drivers/gpu/drm/gma500/mdfld_dsi_output.c
index 5675d93b4205..32dba2ab53e1 100644
--- a/drivers/gpu/drm/gma500/mdfld_dsi_output.c
+++ b/drivers/gpu/drm/gma500/mdfld_dsi_output.c
@@ -299,17 +299,8 @@ static int mdfld_dsi_connector_set_property(struct drm_connector *connector,
 		if (drm_connector_property_set_value(connector, property,
 									value))
 			goto set_prop_error;
-		else {
-#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
-			struct backlight_device *psb_bd;
-
-			psb_bd = mdfld_get_backlight_device();
-			if (psb_bd) {
-				psb_bd->props.brightness = value;
-				mdfld_set_brightness(psb_bd);
-			}
-#endif
-		}
+		else
+			gma_backlight_set(encoder->dev, value);
 	}
 set_prop_done:
 	return 0;
diff --git a/drivers/gpu/drm/gma500/mid_bios.c b/drivers/gpu/drm/gma500/mid_bios.c
index 64d18a37da40..a97e38e284fa 100644
--- a/drivers/gpu/drm/gma500/mid_bios.c
+++ b/drivers/gpu/drm/gma500/mid_bios.c
@@ -118,20 +118,20 @@ static void mid_get_pci_revID(struct drm_psb_private *dev_priv)
 					dev_priv->platform_rev_id);
 }
 
-struct vbt_header {
+struct mid_vbt_header {
 	u32 signature;
 	u8 revision;
 } __packed;
 
 /* The same for r0 and r1 */
 struct vbt_r0 {
-	struct vbt_header vbt_header;
+	struct mid_vbt_header vbt_header;
 	u8 size;
 	u8 checksum;
 } __packed;
 
 struct vbt_r10 {
-	struct vbt_header vbt_header;
+	struct mid_vbt_header vbt_header;
 	u8 checksum;
 	u16 size;
 	u8 panel_count;
@@ -281,7 +281,7 @@ static void mid_get_vbt_data(struct drm_psb_private *dev_priv)
 	struct drm_device *dev = dev_priv->dev;
 	u32 addr;
 	u8 __iomem *vbt_virtual;
-	struct vbt_header vbt_header;
+	struct mid_vbt_header vbt_header;
 	struct pci_dev *pci_gfx_root = pci_get_bus_and_slot(0, PCI_DEVFN(2, 0));
 	int ret = -1;
 
diff --git a/drivers/gpu/drm/gma500/oaktrail_hdmi.c b/drivers/gpu/drm/gma500/oaktrail_hdmi.c
index 2eb3dc4e9c9b..69e51e903f35 100644
--- a/drivers/gpu/drm/gma500/oaktrail_hdmi.c
+++ b/drivers/gpu/drm/gma500/oaktrail_hdmi.c
@@ -252,7 +252,6 @@ static int oaktrail_hdmi_get_modes(struct drm_connector *connector)
 	if (edid) {
 		drm_mode_connector_update_edid_property(connector, edid);
 		ret = drm_add_edid_modes(connector, edid);
-		connector->display_info.raw_edid = NULL;
 	}
 
 	/*
diff --git a/drivers/gpu/drm/gma500/opregion.c b/drivers/gpu/drm/gma500/opregion.c
index c430bd424681..ad0d6de938f3 100644
--- a/drivers/gpu/drm/gma500/opregion.c
+++ b/drivers/gpu/drm/gma500/opregion.c
@@ -166,8 +166,7 @@ static u32 asle_set_backlight(struct drm_device *dev, u32 bclp)
 
 	if (config_enabled(CONFIG_BACKLIGHT_CLASS_DEVICE)) {
 		int max = bd->props.max_brightness;
-		bd->props.brightness = bclp * max / 255;
-		backlight_update_status(bd);
+		gma_backlight_set(dev, bclp * max / 255);
 	}
 
 	asle->cblv = (bclp * 0x64) / 0xff | ASLE_CBLV_VALID;
diff --git a/drivers/gpu/drm/gma500/psb_device.c b/drivers/gpu/drm/gma500/psb_device.c
index 7563cd51851a..b58c4701c4e8 100644
--- a/drivers/gpu/drm/gma500/psb_device.c
+++ b/drivers/gpu/drm/gma500/psb_device.c
@@ -290,6 +290,7 @@ static void psb_get_core_freq(struct drm_device *dev)
 	case 6:
 	case 7:
 		dev_priv->core_freq = 266;
+		break;
 	default:
 		dev_priv->core_freq = 0;
 	}
diff --git a/drivers/gpu/drm/gma500/psb_drv.h b/drivers/gpu/drm/gma500/psb_drv.h
index b15282fdbf97..a7fd6c48b793 100644
--- a/drivers/gpu/drm/gma500/psb_drv.h
+++ b/drivers/gpu/drm/gma500/psb_drv.h
@@ -24,10 +24,10 @@
 
 #include <drm/drmP.h>
 #include <drm/drm_global.h>
-#include "gem_glue.h"
 #include <drm/gma_drm.h>
 #include "psb_reg.h"
 #include "psb_intel_drv.h"
+#include "intel_bios.h"
 #include "gtt.h"
 #include "power.h"
 #include "opregion.h"
@@ -613,6 +613,8 @@ struct drm_psb_private {
 	 */
 	struct backlight_device *backlight_device;
 	struct drm_property *backlight_property;
+	bool backlight_enabled;
+	int backlight_level;
 	uint32_t blc_adj1;
 	uint32_t blc_adj2;
 
@@ -640,6 +642,19 @@ struct drm_psb_private {
 	int mdfld_panel_id;
 
 	bool dplla_96mhz;	/* DPLL data from the VBT */
+
+	struct {
+		int rate;
+		int lanes;
+		int preemphasis;
+		int vswing;
+
+		bool initialized;
+		bool support;
+		int bpp;
+		struct edp_power_seq pps;
+	} edp;
+	uint8_t panel_type;
 };
 
 
@@ -796,6 +811,9 @@ extern int psb_fbdev_init(struct drm_device *dev);
 /* backlight.c */
 int gma_backlight_init(struct drm_device *dev);
 void gma_backlight_exit(struct drm_device *dev);
+void gma_backlight_disable(struct drm_device *dev);
+void gma_backlight_enable(struct drm_device *dev);
+void gma_backlight_set(struct drm_device *dev, int v);
 
 /* oaktrail_crtc.c */
 extern const struct drm_crtc_helper_funcs oaktrail_helper_funcs;
diff --git a/drivers/gpu/drm/gma500/psb_intel_drv.h b/drivers/gpu/drm/gma500/psb_intel_drv.h
index ebe1a28f60e1..90f2d11e686b 100644
--- a/drivers/gpu/drm/gma500/psb_intel_drv.h
+++ b/drivers/gpu/drm/gma500/psb_intel_drv.h
@@ -29,10 +29,6 @@
  * Display related stuff
  */
 
-/* store information about an Ixxx DVO */
-/* The i830->i865 use multiple DVOs with multiple i2cs */
-/* the i915, i945 have a single sDVO i2c bus - which is different */
-#define MAX_OUTPUTS 6
 /* maximum connectors per crtcs in the mode set */
 #define INTELFB_CONN_LIMIT 4
 
@@ -69,6 +65,8 @@
 #define INTEL_OUTPUT_HDMI 6
 #define INTEL_OUTPUT_MIPI 7
 #define INTEL_OUTPUT_MIPI2 8
+#define INTEL_OUTPUT_DISPLAYPORT 9
+#define INTEL_OUTPUT_EDP 10
 
 #define INTEL_DVO_CHIP_NONE 0
 #define INTEL_DVO_CHIP_LVDS 1
@@ -133,6 +131,11 @@ struct psb_intel_encoder {
 	void (*hot_plug)(struct psb_intel_encoder *);
 	int crtc_mask;
 	int clone_mask;
+	u32 ddi_select;	/* Channel info */
+#define DDI0_SELECT	0x01
+#define DDI1_SELECT	0x02
+#define DP_MASK		0x8000
+#define DDI_MASK	0x03
 	void *dev_priv; /* For sdvo_priv, lvds_priv, etc... */
 
 	/* FIXME: Either make SDVO and LVDS store it's i2c here or give CDV it's
@@ -190,7 +193,6 @@ struct psb_intel_crtc {
 	u32 mode_flags;
 
 	bool active;
-	bool crtc_enable;
 
 	/* Saved Crtc HW states */
 	struct psb_intel_crtc_state *crtc_state;
@@ -285,4 +287,20 @@ extern void gma_intel_gmbus_set_speed(struct i2c_adapter *adapter, int speed);
 extern void gma_intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit);
 extern void gma_intel_teardown_gmbus(struct drm_device *dev);
 
+/* DP support */
+extern void cdv_intel_dp_init(struct drm_device *dev, struct psb_intel_mode_device *mode_dev, int output_reg);
+extern void cdv_intel_dp_set_m_n(struct drm_crtc *crtc,
+					struct drm_display_mode *mode,
+					struct drm_display_mode *adjusted_mode);
+
+extern void psb_intel_attach_force_audio_property(struct drm_connector *connector);
+extern void psb_intel_attach_broadcast_rgb_property(struct drm_connector *connector);
+
+extern int cdv_sb_read(struct drm_device *dev, u32 reg, u32 *val);
+extern int cdv_sb_write(struct drm_device *dev, u32 reg, u32 val);
+extern void cdv_sb_reset(struct drm_device *dev);
+
+extern void cdv_intel_attach_force_audio_property(struct drm_connector *connector);
+extern void cdv_intel_attach_broadcast_rgb_property(struct drm_connector *connector);
+
 #endif				/* __INTEL_DRV_H__ */
diff --git a/drivers/gpu/drm/gma500/psb_intel_lvds.c b/drivers/gpu/drm/gma500/psb_intel_lvds.c
index 37adc9edf974..2a4c3a9e33e3 100644
--- a/drivers/gpu/drm/gma500/psb_intel_lvds.c
+++ b/drivers/gpu/drm/gma500/psb_intel_lvds.c
@@ -630,17 +630,8 @@ int psb_intel_lvds_set_property(struct drm_connector *connector,
 							property,
 							value))
 			goto set_prop_error;
-		else {
-#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
-			struct drm_psb_private *devp =
-						encoder->dev->dev_private;
-			struct backlight_device *bd = devp->backlight_device;
-			if (bd) {
-				bd->props.brightness = value;
-				backlight_update_status(bd);
-			}
-#endif
-		}
+		else
+                        gma_backlight_set(encoder->dev, value);
 	} else if (!strcmp(property->name, "DPMS")) {
 		struct drm_encoder_helper_funcs *hfuncs
 						= encoder->helper_private;
diff --git a/drivers/gpu/drm/gma500/psb_intel_reg.h b/drivers/gpu/drm/gma500/psb_intel_reg.h
index 8e8c8efb0a89..d914719c4b60 100644
--- a/drivers/gpu/drm/gma500/psb_intel_reg.h
+++ b/drivers/gpu/drm/gma500/psb_intel_reg.h
@@ -173,15 +173,46 @@
 #define PP_SEQUENCE_ON			(1 << 28)
 #define PP_SEQUENCE_OFF			(2 << 28)
 #define PP_SEQUENCE_MASK		0x30000000
+#define	PP_CYCLE_DELAY_ACTIVE		(1 << 27)
+#define	PP_SEQUENCE_STATE_ON_IDLE	(1 << 3)
+#define	PP_SEQUENCE_STATE_MASK		0x0000000f
+
 #define PP_CONTROL		0x61204
 #define POWER_TARGET_ON			(1 << 0)
-
+#define	PANEL_UNLOCK_REGS		(0xabcd << 16)
+#define	PANEL_UNLOCK_MASK		(0xffff << 16)
+#define	EDP_FORCE_VDD			(1 << 3)
+#define	EDP_BLC_ENABLE			(1 << 2)
+#define	PANEL_POWER_RESET		(1 << 1)
+#define	PANEL_POWER_OFF			(0 << 0)
+#define	PANEL_POWER_ON			(1 << 0)
+
+/* Poulsbo/Oaktrail */
 #define LVDSPP_ON		0x61208
 #define LVDSPP_OFF		0x6120c
 #define PP_CYCLE		0x61210
 
+/* Cedartrail */
 #define PP_ON_DELAYS		0x61208		/* Cedartrail */
+#define PANEL_PORT_SELECT_MASK 		(3 << 30)
+#define PANEL_PORT_SELECT_LVDS 		(0 << 30)
+#define PANEL_PORT_SELECT_EDP		(1 << 30)
+#define PANEL_POWER_UP_DELAY_MASK	(0x1fff0000)
+#define PANEL_POWER_UP_DELAY_SHIFT	16
+#define PANEL_LIGHT_ON_DELAY_MASK	(0x1fff)
+#define PANEL_LIGHT_ON_DELAY_SHIFT	0
+
 #define PP_OFF_DELAYS		0x6120c		/* Cedartrail */
+#define PANEL_POWER_DOWN_DELAY_MASK	(0x1fff0000)
+#define PANEL_POWER_DOWN_DELAY_SHIFT	16
+#define PANEL_LIGHT_OFF_DELAY_MASK	(0x1fff)
+#define PANEL_LIGHT_OFF_DELAY_SHIFT	0
+
+#define PP_DIVISOR		0x61210		/* Cedartrail */
+#define  PP_REFERENCE_DIVIDER_MASK	(0xffffff00)
+#define  PP_REFERENCE_DIVIDER_SHIFT	8
+#define  PANEL_POWER_CYCLE_DELAY_MASK	(0x1f)
+#define  PANEL_POWER_CYCLE_DELAY_SHIFT	0
 
 #define PFIT_CONTROL		0x61230
 #define PFIT_ENABLE			(1 << 31)
@@ -1282,6 +1313,10 @@ No status bits are changed.
 # define VRHUNIT_CLOCK_GATE_DISABLE		(1 << 28) /* Fixed value on CDV */
 # define DPOUNIT_CLOCK_GATE_DISABLE		(1 << 11)
 # define DPIOUNIT_CLOCK_GATE_DISABLE		(1 << 6)
+# define DPUNIT_PIPEB_GATE_DISABLE		(1 << 30)
+# define DPUNIT_PIPEA_GATE_DISABLE		(1 << 25)
+# define DPCUNIT_CLOCK_GATE_DISABLE		(1 << 24)
+# define DPLSUNIT_CLOCK_GATE_DISABLE		(1 << 13)
 
 #define RAMCLK_GATE_D		0x6210
 
@@ -1347,5 +1382,165 @@ No status bits are changed.
 #define LANE_PLL_ENABLE		(0x3 << 20)
 #define LANE_PLL_PIPE(p)	(((p) == 0) ? (1 << 21) : (0 << 21))
 
+#define DP_B				0x64100
+#define DP_C				0x64200
+
+#define   DP_PORT_EN			(1 << 31)
+#define   DP_PIPEB_SELECT		(1 << 30)
+#define   DP_PIPE_MASK			(1 << 30)
+
+/* Link training mode - select a suitable mode for each stage */
+#define   DP_LINK_TRAIN_PAT_1		(0 << 28)
+#define   DP_LINK_TRAIN_PAT_2		(1 << 28)
+#define   DP_LINK_TRAIN_PAT_IDLE	(2 << 28)
+#define   DP_LINK_TRAIN_OFF		(3 << 28)
+#define   DP_LINK_TRAIN_MASK		(3 << 28)
+#define   DP_LINK_TRAIN_SHIFT		28
+
+/* Signal voltages. These are mostly controlled by the other end */
+#define   DP_VOLTAGE_0_4		(0 << 25)
+#define   DP_VOLTAGE_0_6		(1 << 25)
+#define   DP_VOLTAGE_0_8		(2 << 25)
+#define   DP_VOLTAGE_1_2		(3 << 25)
+#define   DP_VOLTAGE_MASK		(7 << 25)
+#define   DP_VOLTAGE_SHIFT		25
+
+/* Signal pre-emphasis levels, like voltages, the other end tells us what
+ * they want
+ */
+#define   DP_PRE_EMPHASIS_0		(0 << 22)
+#define   DP_PRE_EMPHASIS_3_5		(1 << 22)
+#define   DP_PRE_EMPHASIS_6		(2 << 22)
+#define   DP_PRE_EMPHASIS_9_5		(3 << 22)
+#define   DP_PRE_EMPHASIS_MASK		(7 << 22)
+#define   DP_PRE_EMPHASIS_SHIFT		22
+
+/* How many wires to use. I guess 3 was too hard */
+#define   DP_PORT_WIDTH_1		(0 << 19)
+#define   DP_PORT_WIDTH_2		(1 << 19)
+#define   DP_PORT_WIDTH_4		(3 << 19)
+#define   DP_PORT_WIDTH_MASK		(7 << 19)
+
+/* Mystic DPCD version 1.1 special mode */
+#define   DP_ENHANCED_FRAMING		(1 << 18)
+
+/** locked once port is enabled */
+#define   DP_PORT_REVERSAL		(1 << 15)
+
+/** sends the clock on lane 15 of the PEG for debug */
+#define   DP_CLOCK_OUTPUT_ENABLE	(1 << 13)
+
+#define   DP_SCRAMBLING_DISABLE		(1 << 12)
+#define   DP_SCRAMBLING_DISABLE_IRONLAKE	(1 << 7)
+
+/** limit RGB values to avoid confusing TVs */
+#define   DP_COLOR_RANGE_16_235		(1 << 8)
+
+/** Turn on the audio link */
+#define   DP_AUDIO_OUTPUT_ENABLE	(1 << 6)
+
+/** vs and hs sync polarity */
+#define   DP_SYNC_VS_HIGH		(1 << 4)
+#define   DP_SYNC_HS_HIGH		(1 << 3)
+
+/** A fantasy */
+#define   DP_DETECTED			(1 << 2)
+
+/** The aux channel provides a way to talk to the
+ * signal sink for DDC etc. Max packet size supported
+ * is 20 bytes in each direction, hence the 5 fixed
+ * data registers
+ */
+#define DPB_AUX_CH_CTL			0x64110
+#define DPB_AUX_CH_DATA1		0x64114
+#define DPB_AUX_CH_DATA2		0x64118
+#define DPB_AUX_CH_DATA3		0x6411c
+#define DPB_AUX_CH_DATA4		0x64120
+#define DPB_AUX_CH_DATA5		0x64124
+
+#define DPC_AUX_CH_CTL			0x64210
+#define DPC_AUX_CH_DATA1		0x64214
+#define DPC_AUX_CH_DATA2		0x64218
+#define DPC_AUX_CH_DATA3		0x6421c
+#define DPC_AUX_CH_DATA4		0x64220
+#define DPC_AUX_CH_DATA5		0x64224
+
+#define   DP_AUX_CH_CTL_SEND_BUSY	    (1 << 31)
+#define   DP_AUX_CH_CTL_DONE		    (1 << 30)
+#define   DP_AUX_CH_CTL_INTERRUPT	    (1 << 29)
+#define   DP_AUX_CH_CTL_TIME_OUT_ERROR	    (1 << 28)
+#define   DP_AUX_CH_CTL_TIME_OUT_400us	    (0 << 26)
+#define   DP_AUX_CH_CTL_TIME_OUT_600us	    (1 << 26)
+#define   DP_AUX_CH_CTL_TIME_OUT_800us	    (2 << 26)
+#define   DP_AUX_CH_CTL_TIME_OUT_1600us	    (3 << 26)
+#define   DP_AUX_CH_CTL_TIME_OUT_MASK	    (3 << 26)
+#define   DP_AUX_CH_CTL_RECEIVE_ERROR	    (1 << 25)
+#define   DP_AUX_CH_CTL_MESSAGE_SIZE_MASK    (0x1f << 20)
+#define   DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT   20
+#define   DP_AUX_CH_CTL_PRECHARGE_2US_MASK   (0xf << 16)
+#define   DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT  16
+#define   DP_AUX_CH_CTL_AUX_AKSV_SELECT	    (1 << 15)
+#define   DP_AUX_CH_CTL_MANCHESTER_TEST	    (1 << 14)
+#define   DP_AUX_CH_CTL_SYNC_TEST	    (1 << 13)
+#define   DP_AUX_CH_CTL_DEGLITCH_TEST	    (1 << 12)
+#define   DP_AUX_CH_CTL_PRECHARGE_TEST	    (1 << 11)
+#define   DP_AUX_CH_CTL_BIT_CLOCK_2X_MASK    (0x7ff)
+#define   DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT   0
+
+/*
+ * Computing GMCH M and N values for the Display Port link
+ *
+ * GMCH M/N = dot clock * bytes per pixel / ls_clk * # of lanes
+ *
+ * ls_clk (we assume) is the DP link clock (1.62 or 2.7 GHz)
+ *
+ * The GMCH value is used internally
+ *
+ * bytes_per_pixel is the number of bytes coming out of the plane,
+ * which is after the LUTs, so we want the bytes for our color format.
+ * For our current usage, this is always 3, one byte for R, G and B.
+ */
+
+#define _PIPEA_GMCH_DATA_M			0x70050
+#define _PIPEB_GMCH_DATA_M			0x71050
+
+/* Transfer unit size for display port - 1, default is 0x3f (for TU size 64) */
+#define   PIPE_GMCH_DATA_M_TU_SIZE_MASK		(0x3f << 25)
+#define   PIPE_GMCH_DATA_M_TU_SIZE_SHIFT	25
+
+#define   PIPE_GMCH_DATA_M_MASK			(0xffffff)
+
+#define _PIPEA_GMCH_DATA_N			0x70054
+#define _PIPEB_GMCH_DATA_N			0x71054
+#define   PIPE_GMCH_DATA_N_MASK			(0xffffff)
+
+/*
+ * Computing Link M and N values for the Display Port link
+ *
+ * Link M / N = pixel_clock / ls_clk
+ *
+ * (the DP spec calls pixel_clock the 'strm_clk')
+ *
+ * The Link value is transmitted in the Main Stream
+ * Attributes and VB-ID.
+ */
+
+#define _PIPEA_DP_LINK_M				0x70060
+#define _PIPEB_DP_LINK_M				0x71060
+#define   PIPEA_DP_LINK_M_MASK			(0xffffff)
+
+#define _PIPEA_DP_LINK_N				0x70064
+#define _PIPEB_DP_LINK_N				0x71064
+#define   PIPEA_DP_LINK_N_MASK			(0xffffff)
+
+#define PIPE_GMCH_DATA_M(pipe) _PIPE(pipe, _PIPEA_GMCH_DATA_M, _PIPEB_GMCH_DATA_M)
+#define PIPE_GMCH_DATA_N(pipe) _PIPE(pipe, _PIPEA_GMCH_DATA_N, _PIPEB_GMCH_DATA_N)
+#define PIPE_DP_LINK_M(pipe) _PIPE(pipe, _PIPEA_DP_LINK_M, _PIPEB_DP_LINK_M)
+#define PIPE_DP_LINK_N(pipe) _PIPE(pipe, _PIPEA_DP_LINK_N, _PIPEB_DP_LINK_N)
+
+#define   PIPE_BPC_MASK				(7 << 5)
+#define   PIPE_8BPC				(0 << 5)
+#define   PIPE_10BPC				(1 << 5)
+#define   PIPE_6BPC				(2 << 5)
 
 #endif
diff --git a/drivers/gpu/drm/gma500/psb_intel_sdvo.c b/drivers/gpu/drm/gma500/psb_intel_sdvo.c
index c148d92229fd..fc9292705dbf 100644
--- a/drivers/gpu/drm/gma500/psb_intel_sdvo.c
+++ b/drivers/gpu/drm/gma500/psb_intel_sdvo.c
@@ -1291,7 +1291,6 @@ psb_intel_sdvo_get_analog_edid(struct drm_connector *connector)
 
 	return drm_get_edid(connector,
 			    &dev_priv->gmbus[dev_priv->crt_ddc_pin].adapter);
-	return NULL;
 }
 
 static enum drm_connector_status
@@ -1342,7 +1341,6 @@ psb_intel_sdvo_hdmi_sink_detect(struct drm_connector *connector)
 			}
 		} else
 			status = connector_status_disconnected;
-		connector->display_info.raw_edid = NULL;
 		kfree(edid);
 	}
 
@@ -1403,7 +1401,6 @@ psb_intel_sdvo_detect(struct drm_connector *connector, bool force)
 				ret = connector_status_disconnected;
 			else
 				ret = connector_status_connected;
-			connector->display_info.raw_edid = NULL;
 			kfree(edid);
 		} else
 			ret = connector_status_connected;
@@ -1452,7 +1449,6 @@ static void psb_intel_sdvo_get_ddc_modes(struct drm_connector *connector)
 			drm_add_edid_modes(connector, edid);
 		}
 
-		connector->display_info.raw_edid = NULL;
 		kfree(edid);
 	}
 }
diff --git a/drivers/gpu/drm/i2c/ch7006_drv.c b/drivers/gpu/drm/i2c/ch7006_drv.c
index 36d952280c50..599099fe76e3 100644
--- a/drivers/gpu/drm/i2c/ch7006_drv.c
+++ b/drivers/gpu/drm/i2c/ch7006_drv.c
@@ -427,15 +427,10 @@ static int ch7006_remove(struct i2c_client *client)
 	return 0;
 }
 
-static int ch7006_suspend(struct i2c_client *client, pm_message_t mesg)
+static int ch7006_resume(struct device *dev)
 {
-	ch7006_dbg(client, "\n");
-
-	return 0;
-}
+	struct i2c_client *client = to_i2c_client(dev);
 
-static int ch7006_resume(struct i2c_client *client)
-{
 	ch7006_dbg(client, "\n");
 
 	ch7006_write(client, 0x3d, 0x0);
@@ -499,15 +494,18 @@ static struct i2c_device_id ch7006_ids[] = {
 };
 MODULE_DEVICE_TABLE(i2c, ch7006_ids);
 
+static const struct dev_pm_ops ch7006_pm_ops = {
+	.resume = ch7006_resume,
+};
+
 static struct drm_i2c_encoder_driver ch7006_driver = {
 	.i2c_driver = {
 		.probe = ch7006_probe,
 		.remove = ch7006_remove,
-		.suspend = ch7006_suspend,
-		.resume = ch7006_resume,
 
 		.driver = {
 			.name = "ch7006",
+			.pm = &ch7006_pm_ops,
 		},
 
 		.id_table = ch7006_ids,
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index b0bacdba6d7e..0f2c5493242b 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -40,6 +40,7 @@ i915-y := i915_drv.o i915_dma.o i915_irq.o \
 	  dvo_ivch.o \
 	  dvo_tfp410.o \
 	  dvo_sil164.o \
+	  dvo_ns2501.o \
 	  i915_gem_dmabuf.o
 
 i915-$(CONFIG_COMPAT)   += i915_ioc32.o
diff --git a/drivers/gpu/drm/i915/dvo.h b/drivers/gpu/drm/i915/dvo.h
index 573de82c9f5a..33a62ad80100 100644
--- a/drivers/gpu/drm/i915/dvo.h
+++ b/drivers/gpu/drm/i915/dvo.h
@@ -57,13 +57,12 @@ struct intel_dvo_dev_ops {
 	void (*create_resources)(struct intel_dvo_device *dvo);
 
 	/*
-	 * Turn on/off output or set intermediate power levels if available.
+	 * Turn on/off output.
 	 *
-	 * Unsupported intermediate modes drop to the lower power setting.
-	 * If the  mode is DPMSModeOff, the output must be disabled,
-	 * as the DPLL may be disabled afterwards.
+	 * Because none of our dvo drivers support an intermediate power levels,
+	 * we don't expose this in the interfac.
 	 */
-	void (*dpms)(struct intel_dvo_device *dvo, int mode);
+	void (*dpms)(struct intel_dvo_device *dvo, bool enable);
 
 	/*
 	 * Callback for testing a video mode for a given output.
@@ -114,6 +113,12 @@ struct intel_dvo_dev_ops {
 	 */
 	enum drm_connector_status (*detect)(struct intel_dvo_device *dvo);
 
+	/*
+	 * Probe the current hw status, returning true if the connected output
+	 * is active.
+	 */
+	bool (*get_hw_state)(struct intel_dvo_device *dev);
+
 	/**
 	 * Query the device for the modes it provides.
 	 *
@@ -139,5 +144,6 @@ extern struct intel_dvo_dev_ops ch7xxx_ops;
 extern struct intel_dvo_dev_ops ivch_ops;
 extern struct intel_dvo_dev_ops tfp410_ops;
 extern struct intel_dvo_dev_ops ch7017_ops;
+extern struct intel_dvo_dev_ops ns2501_ops;
 
 #endif /* _INTEL_DVO_H */
diff --git a/drivers/gpu/drm/i915/dvo_ch7017.c b/drivers/gpu/drm/i915/dvo_ch7017.c
index 1ca799a1e1fc..86b27d1d90c2 100644
--- a/drivers/gpu/drm/i915/dvo_ch7017.c
+++ b/drivers/gpu/drm/i915/dvo_ch7017.c
@@ -163,7 +163,7 @@ struct ch7017_priv {
 };
 
 static void ch7017_dump_regs(struct intel_dvo_device *dvo);
-static void ch7017_dpms(struct intel_dvo_device *dvo, int mode);
+static void ch7017_dpms(struct intel_dvo_device *dvo, bool enable);
 
 static bool ch7017_read(struct intel_dvo_device *dvo, u8 addr, u8 *val)
 {
@@ -309,7 +309,7 @@ static void ch7017_mode_set(struct intel_dvo_device *dvo,
 	lvds_power_down = CH7017_LVDS_POWER_DOWN_DEFAULT_RESERVED |
 			  (mode->hdisplay & 0x0700) >> 8;
 
-	ch7017_dpms(dvo, DRM_MODE_DPMS_OFF);
+	ch7017_dpms(dvo, false);
 	ch7017_write(dvo, CH7017_HORIZONTAL_ACTIVE_PIXEL_INPUT,
 			horizontal_active_pixel_input);
 	ch7017_write(dvo, CH7017_HORIZONTAL_ACTIVE_PIXEL_OUTPUT,
@@ -331,7 +331,7 @@ static void ch7017_mode_set(struct intel_dvo_device *dvo,
 }
 
 /* set the CH7017 power state */
-static void ch7017_dpms(struct intel_dvo_device *dvo, int mode)
+static void ch7017_dpms(struct intel_dvo_device *dvo, bool enable)
 {
 	uint8_t val;
 
@@ -345,7 +345,7 @@ static void ch7017_dpms(struct intel_dvo_device *dvo, int mode)
 			CH7017_DAC3_POWER_DOWN |
 			CH7017_TV_POWER_DOWN_EN);
 
-	if (mode == DRM_MODE_DPMS_ON) {
+	if (enable) {
 		/* Turn on the LVDS */
 		ch7017_write(dvo, CH7017_LVDS_POWER_DOWN,
 			     val & ~CH7017_LVDS_POWER_DOWN_EN);
@@ -359,6 +359,18 @@ static void ch7017_dpms(struct intel_dvo_device *dvo, int mode)
 	msleep(20);
 }
 
+static bool ch7017_get_hw_state(struct intel_dvo_device *dvo)
+{
+	uint8_t val;
+
+	ch7017_read(dvo, CH7017_LVDS_POWER_DOWN, &val);
+
+	if (val & CH7017_LVDS_POWER_DOWN_EN)
+		return false;
+	else
+		return true;
+}
+
 static void ch7017_dump_regs(struct intel_dvo_device *dvo)
 {
 	uint8_t val;
@@ -396,6 +408,7 @@ struct intel_dvo_dev_ops ch7017_ops = {
 	.mode_valid = ch7017_mode_valid,
 	.mode_set = ch7017_mode_set,
 	.dpms = ch7017_dpms,
+	.get_hw_state = ch7017_get_hw_state,
 	.dump_regs = ch7017_dump_regs,
 	.destroy = ch7017_destroy,
 };
diff --git a/drivers/gpu/drm/i915/dvo_ch7xxx.c b/drivers/gpu/drm/i915/dvo_ch7xxx.c
index 4a036600e806..38f3a6cb8c7d 100644
--- a/drivers/gpu/drm/i915/dvo_ch7xxx.c
+++ b/drivers/gpu/drm/i915/dvo_ch7xxx.c
@@ -289,14 +289,26 @@ static void ch7xxx_mode_set(struct intel_dvo_device *dvo,
 }
 
 /* set the CH7xxx power state */
-static void ch7xxx_dpms(struct intel_dvo_device *dvo, int mode)
+static void ch7xxx_dpms(struct intel_dvo_device *dvo, bool enable)
 {
-	if (mode == DRM_MODE_DPMS_ON)
+	if (enable)
 		ch7xxx_writeb(dvo, CH7xxx_PM, CH7xxx_PM_DVIL | CH7xxx_PM_DVIP);
 	else
 		ch7xxx_writeb(dvo, CH7xxx_PM, CH7xxx_PM_FPD);
 }
 
+static bool ch7xxx_get_hw_state(struct intel_dvo_device *dvo)
+{
+	u8 val;
+
+	ch7xxx_readb(dvo, CH7xxx_PM, &val);
+
+	if (val & CH7xxx_PM_FPD)
+		return false;
+	else
+		return true;
+}
+
 static void ch7xxx_dump_regs(struct intel_dvo_device *dvo)
 {
 	int i;
@@ -326,6 +338,7 @@ struct intel_dvo_dev_ops ch7xxx_ops = {
 	.mode_valid = ch7xxx_mode_valid,
 	.mode_set = ch7xxx_mode_set,
 	.dpms = ch7xxx_dpms,
+	.get_hw_state = ch7xxx_get_hw_state,
 	.dump_regs = ch7xxx_dump_regs,
 	.destroy = ch7xxx_destroy,
 };
diff --git a/drivers/gpu/drm/i915/dvo_ivch.c b/drivers/gpu/drm/i915/dvo_ivch.c
index 04f2893d5e3c..baaf65bf0bdd 100644
--- a/drivers/gpu/drm/i915/dvo_ivch.c
+++ b/drivers/gpu/drm/i915/dvo_ivch.c
@@ -288,7 +288,7 @@ static enum drm_mode_status ivch_mode_valid(struct intel_dvo_device *dvo,
 }
 
 /** Sets the power state of the panel connected to the ivch */
-static void ivch_dpms(struct intel_dvo_device *dvo, int mode)
+static void ivch_dpms(struct intel_dvo_device *dvo, bool enable)
 {
 	int i;
 	uint16_t vr01, vr30, backlight;
@@ -297,13 +297,13 @@ static void ivch_dpms(struct intel_dvo_device *dvo, int mode)
 	if (!ivch_read(dvo, VR01, &vr01))
 		return;
 
-	if (mode == DRM_MODE_DPMS_ON)
+	if (enable)
 		backlight = 1;
 	else
 		backlight = 0;
 	ivch_write(dvo, VR80, backlight);
 
-	if (mode == DRM_MODE_DPMS_ON)
+	if (enable)
 		vr01 |= VR01_LCD_ENABLE | VR01_DVO_ENABLE;
 	else
 		vr01 &= ~(VR01_LCD_ENABLE | VR01_DVO_ENABLE);
@@ -315,7 +315,7 @@ static void ivch_dpms(struct intel_dvo_device *dvo, int mode)
 		if (!ivch_read(dvo, VR30, &vr30))
 			break;
 
-		if (((vr30 & VR30_PANEL_ON) != 0) == (mode == DRM_MODE_DPMS_ON))
+		if (((vr30 & VR30_PANEL_ON) != 0) == enable)
 			break;
 		udelay(1000);
 	}
@@ -323,6 +323,20 @@ static void ivch_dpms(struct intel_dvo_device *dvo, int mode)
 	udelay(16 * 1000);
 }
 
+static bool ivch_get_hw_state(struct intel_dvo_device *dvo)
+{
+	uint16_t vr01;
+
+	/* Set the new power state of the panel. */
+	if (!ivch_read(dvo, VR01, &vr01))
+		return false;
+
+	if (vr01 & VR01_LCD_ENABLE)
+		return true;
+	else
+		return false;
+}
+
 static void ivch_mode_set(struct intel_dvo_device *dvo,
 			  struct drm_display_mode *mode,
 			  struct drm_display_mode *adjusted_mode)
@@ -413,6 +427,7 @@ static void ivch_destroy(struct intel_dvo_device *dvo)
 struct intel_dvo_dev_ops ivch_ops = {
 	.init = ivch_init,
 	.dpms = ivch_dpms,
+	.get_hw_state = ivch_get_hw_state,
 	.mode_valid = ivch_mode_valid,
 	.mode_set = ivch_mode_set,
 	.detect = ivch_detect,
diff --git a/drivers/gpu/drm/i915/dvo_ns2501.c b/drivers/gpu/drm/i915/dvo_ns2501.c
new file mode 100644
index 000000000000..c4a255be6979
--- /dev/null
+++ b/drivers/gpu/drm/i915/dvo_ns2501.c
@@ -0,0 +1,588 @@
+/*
+ *
+ * Copyright (c) 2012 Gilles Dartiguelongue, Thomas Richter
+ *
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include "dvo.h"
+#include "i915_reg.h"
+#include "i915_drv.h"
+
+#define NS2501_VID 0x1305
+#define NS2501_DID 0x6726
+
+#define NS2501_VID_LO 0x00
+#define NS2501_VID_HI 0x01
+#define NS2501_DID_LO 0x02
+#define NS2501_DID_HI 0x03
+#define NS2501_REV 0x04
+#define NS2501_RSVD 0x05
+#define NS2501_FREQ_LO 0x06
+#define NS2501_FREQ_HI 0x07
+
+#define NS2501_REG8 0x08
+#define NS2501_8_VEN (1<<5)
+#define NS2501_8_HEN (1<<4)
+#define NS2501_8_DSEL (1<<3)
+#define NS2501_8_BPAS (1<<2)
+#define NS2501_8_RSVD (1<<1)
+#define NS2501_8_PD (1<<0)
+
+#define NS2501_REG9 0x09
+#define NS2501_9_VLOW (1<<7)
+#define NS2501_9_MSEL_MASK (0x7<<4)
+#define NS2501_9_TSEL (1<<3)
+#define NS2501_9_RSEN (1<<2)
+#define NS2501_9_RSVD (1<<1)
+#define NS2501_9_MDI (1<<0)
+
+#define NS2501_REGC 0x0c
+
+struct ns2501_priv {
+	//I2CDevRec d;
+	bool quiet;
+	int reg_8_shadow;
+	int reg_8_set;
+	// Shadow registers for i915
+	int dvoc;
+	int pll_a;
+	int srcdim;
+	int fw_blc;
+};
+
+#define NSPTR(d) ((NS2501Ptr)(d->DriverPrivate.ptr))
+
+/*
+ * For reasons unclear to me, the ns2501 at least on the Fujitsu/Siemens
+ * laptops does not react on the i2c bus unless
+ * both the PLL is running and the display is configured in its native
+ * resolution.
+ * This function forces the DVO on, and stores the registers it touches.
+ * Afterwards, registers are restored to regular values.
+ *
+ * This is pretty much a hack, though it works.
+ * Without that, ns2501_readb and ns2501_writeb fail
+ * when switching the resolution.
+ */
+
+static void enable_dvo(struct intel_dvo_device *dvo)
+{
+	struct ns2501_priv *ns = (struct ns2501_priv *)(dvo->dev_priv);
+	struct i2c_adapter *adapter = dvo->i2c_bus;
+	struct intel_gmbus *bus = container_of(adapter,
+					       struct intel_gmbus,
+					       adapter);
+	struct drm_i915_private *dev_priv = bus->dev_priv;
+
+	DRM_DEBUG_KMS("%s: Trying to re-enable the DVO\n", __FUNCTION__);
+
+	ns->dvoc = I915_READ(DVO_C);
+	ns->pll_a = I915_READ(_DPLL_A);
+	ns->srcdim = I915_READ(DVOC_SRCDIM);
+	ns->fw_blc = I915_READ(FW_BLC);
+
+	I915_WRITE(DVOC, 0x10004084);
+	I915_WRITE(_DPLL_A, 0xd0820000);
+	I915_WRITE(DVOC_SRCDIM, 0x400300);	// 1024x768
+	I915_WRITE(FW_BLC, 0x1080304);
+
+	I915_WRITE(DVOC, 0x90004084);
+}
+
+/*
+ * Restore the I915 registers modified by the above
+ * trigger function.
+ */
+static void restore_dvo(struct intel_dvo_device *dvo)
+{
+	struct i2c_adapter *adapter = dvo->i2c_bus;
+	struct intel_gmbus *bus = container_of(adapter,
+					       struct intel_gmbus,
+					       adapter);
+	struct drm_i915_private *dev_priv = bus->dev_priv;
+	struct ns2501_priv *ns = (struct ns2501_priv *)(dvo->dev_priv);
+
+	I915_WRITE(DVOC, ns->dvoc);
+	I915_WRITE(_DPLL_A, ns->pll_a);
+	I915_WRITE(DVOC_SRCDIM, ns->srcdim);
+	I915_WRITE(FW_BLC, ns->fw_blc);
+}
+
+/*
+** Read a register from the ns2501.
+** Returns true if successful, false otherwise.
+** If it returns false, it might be wise to enable the
+** DVO with the above function.
+*/
+static bool ns2501_readb(struct intel_dvo_device *dvo, int addr, uint8_t * ch)
+{
+	struct ns2501_priv *ns = dvo->dev_priv;
+	struct i2c_adapter *adapter = dvo->i2c_bus;
+	u8 out_buf[2];
+	u8 in_buf[2];
+
+	struct i2c_msg msgs[] = {
+		{
+		 .addr = dvo->slave_addr,
+		 .flags = 0,
+		 .len = 1,
+		 .buf = out_buf,
+		 },
+		{
+		 .addr = dvo->slave_addr,
+		 .flags = I2C_M_RD,
+		 .len = 1,
+		 .buf = in_buf,
+		 }
+	};
+
+	out_buf[0] = addr;
+	out_buf[1] = 0;
+
+	if (i2c_transfer(adapter, msgs, 2) == 2) {
+		*ch = in_buf[0];
+		return true;
+	};
+
+	if (!ns->quiet) {
+		DRM_DEBUG_KMS
+		    ("Unable to read register 0x%02x from %s:0x%02x.\n", addr,
+		     adapter->name, dvo->slave_addr);
+	}
+
+	return false;
+}
+
+/*
+** Write a register to the ns2501.
+** Returns true if successful, false otherwise.
+** If it returns false, it might be wise to enable the
+** DVO with the above function.
+*/
+static bool ns2501_writeb(struct intel_dvo_device *dvo, int addr, uint8_t ch)
+{
+	struct ns2501_priv *ns = dvo->dev_priv;
+	struct i2c_adapter *adapter = dvo->i2c_bus;
+	uint8_t out_buf[2];
+
+	struct i2c_msg msg = {
+		.addr = dvo->slave_addr,
+		.flags = 0,
+		.len = 2,
+		.buf = out_buf,
+	};
+
+	out_buf[0] = addr;
+	out_buf[1] = ch;
+
+	if (i2c_transfer(adapter, &msg, 1) == 1) {
+		return true;
+	}
+
+	if (!ns->quiet) {
+		DRM_DEBUG_KMS("Unable to write register 0x%02x to %s:%d\n",
+			      addr, adapter->name, dvo->slave_addr);
+	}
+
+	return false;
+}
+
+/* National Semiconductor 2501 driver for chip on i2c bus
+ * scan for the chip on the bus.
+ * Hope the VBIOS initialized the PLL correctly so we can
+ * talk to it. If not, it will not be seen and not detected.
+ * Bummer!
+ */
+static bool ns2501_init(struct intel_dvo_device *dvo,
+			struct i2c_adapter *adapter)
+{
+	/* this will detect the NS2501 chip on the specified i2c bus */
+	struct ns2501_priv *ns;
+	unsigned char ch;
+
+	ns = kzalloc(sizeof(struct ns2501_priv), GFP_KERNEL);
+	if (ns == NULL)
+		return false;
+
+	dvo->i2c_bus = adapter;
+	dvo->dev_priv = ns;
+	ns->quiet = true;
+
+	if (!ns2501_readb(dvo, NS2501_VID_LO, &ch))
+		goto out;
+
+	if (ch != (NS2501_VID & 0xff)) {
+		DRM_DEBUG_KMS("ns2501 not detected got %d: from %s Slave %d.\n",
+			      ch, adapter->name, dvo->slave_addr);
+		goto out;
+	}
+
+	if (!ns2501_readb(dvo, NS2501_DID_LO, &ch))
+		goto out;
+
+	if (ch != (NS2501_DID & 0xff)) {
+		DRM_DEBUG_KMS("ns2501 not detected got %d: from %s Slave %d.\n",
+			      ch, adapter->name, dvo->slave_addr);
+		goto out;
+	}
+	ns->quiet = false;
+	ns->reg_8_set = 0;
+	ns->reg_8_shadow =
+	    NS2501_8_PD | NS2501_8_BPAS | NS2501_8_VEN | NS2501_8_HEN;
+
+	DRM_DEBUG_KMS("init ns2501 dvo controller successfully!\n");
+	return true;
+
+out:
+	kfree(ns);
+	return false;
+}
+
+static enum drm_connector_status ns2501_detect(struct intel_dvo_device *dvo)
+{
+	/*
+	 * This is a Laptop display, it doesn't have hotplugging.
+	 * Even if not, the detection bit of the 2501 is unreliable as
+	 * it only works for some display types.
+	 * It is even more unreliable as the PLL must be active for
+	 * allowing reading from the chiop.
+	 */
+	return connector_status_connected;
+}
+
+static enum drm_mode_status ns2501_mode_valid(struct intel_dvo_device *dvo,
+					      struct drm_display_mode *mode)
+{
+	DRM_DEBUG_KMS
+	    ("%s: is mode valid (hdisplay=%d,htotal=%d,vdisplay=%d,vtotal=%d)\n",
+	     __FUNCTION__, mode->hdisplay, mode->htotal, mode->vdisplay,
+	     mode->vtotal);
+
+	/*
+	 * Currently, these are all the modes I have data from.
+	 * More might exist. Unclear how to find the native resolution
+	 * of the panel in here so we could always accept it
+	 * by disabling the scaler.
+	 */
+	if ((mode->hdisplay == 800 && mode->vdisplay == 600) ||
+	    (mode->hdisplay == 640 && mode->vdisplay == 480) ||
+	    (mode->hdisplay == 1024 && mode->vdisplay == 768)) {
+		return MODE_OK;
+	} else {
+		return MODE_ONE_SIZE;	/* Is this a reasonable error? */
+	}
+}
+
+static void ns2501_mode_set(struct intel_dvo_device *dvo,
+			    struct drm_display_mode *mode,
+			    struct drm_display_mode *adjusted_mode)
+{
+	bool ok;
+	bool restore = false;
+	struct ns2501_priv *ns = (struct ns2501_priv *)(dvo->dev_priv);
+
+	DRM_DEBUG_KMS
+	    ("%s: set mode (hdisplay=%d,htotal=%d,vdisplay=%d,vtotal=%d).\n",
+	     __FUNCTION__, mode->hdisplay, mode->htotal, mode->vdisplay,
+	     mode->vtotal);
+
+	/*
+	 * Where do I find the native resolution for which scaling is not required???
+	 *
+	 * First trigger the DVO on as otherwise the chip does not appear on the i2c
+	 * bus.
+	 */
+	do {
+		ok = true;
+
+		if (mode->hdisplay == 800 && mode->vdisplay == 600) {
+			/* mode 277 */
+			ns->reg_8_shadow &= ~NS2501_8_BPAS;
+			DRM_DEBUG_KMS("%s: switching to 800x600\n",
+				      __FUNCTION__);
+
+			/*
+			 * No, I do not know where this data comes from.
+			 * It is just what the video bios left in the DVO, so
+			 * I'm just copying it here over.
+			 * This also means that I cannot support any other modes
+			 * except the ones supported by the bios.
+			 */
+			ok &= ns2501_writeb(dvo, 0x11, 0xc8);	// 0xc7 also works.
+			ok &= ns2501_writeb(dvo, 0x1b, 0x19);
+			ok &= ns2501_writeb(dvo, 0x1c, 0x62);	// VBIOS left 0x64 here, but 0x62 works nicer
+			ok &= ns2501_writeb(dvo, 0x1d, 0x02);
+
+			ok &= ns2501_writeb(dvo, 0x34, 0x03);
+			ok &= ns2501_writeb(dvo, 0x35, 0xff);
+
+			ok &= ns2501_writeb(dvo, 0x80, 0x27);
+			ok &= ns2501_writeb(dvo, 0x81, 0x03);
+			ok &= ns2501_writeb(dvo, 0x82, 0x41);
+			ok &= ns2501_writeb(dvo, 0x83, 0x05);
+
+			ok &= ns2501_writeb(dvo, 0x8d, 0x02);
+			ok &= ns2501_writeb(dvo, 0x8e, 0x04);
+			ok &= ns2501_writeb(dvo, 0x8f, 0x00);
+
+			ok &= ns2501_writeb(dvo, 0x90, 0xfe);	/* vertical. VBIOS left 0xff here, but 0xfe works better */
+			ok &= ns2501_writeb(dvo, 0x91, 0x07);
+			ok &= ns2501_writeb(dvo, 0x94, 0x00);
+			ok &= ns2501_writeb(dvo, 0x95, 0x00);
+
+			ok &= ns2501_writeb(dvo, 0x96, 0x00);
+
+			ok &= ns2501_writeb(dvo, 0x99, 0x00);
+			ok &= ns2501_writeb(dvo, 0x9a, 0x88);
+
+			ok &= ns2501_writeb(dvo, 0x9c, 0x23);	/* Looks like first and last line of the image. */
+			ok &= ns2501_writeb(dvo, 0x9d, 0x00);
+			ok &= ns2501_writeb(dvo, 0x9e, 0x25);
+			ok &= ns2501_writeb(dvo, 0x9f, 0x03);
+
+			ok &= ns2501_writeb(dvo, 0xa4, 0x80);
+
+			ok &= ns2501_writeb(dvo, 0xb6, 0x00);
+
+			ok &= ns2501_writeb(dvo, 0xb9, 0xc8);	/* horizontal? */
+			ok &= ns2501_writeb(dvo, 0xba, 0x00);	/* horizontal? */
+
+			ok &= ns2501_writeb(dvo, 0xc0, 0x05);	/* horizontal? */
+			ok &= ns2501_writeb(dvo, 0xc1, 0xd7);
+
+			ok &= ns2501_writeb(dvo, 0xc2, 0x00);
+			ok &= ns2501_writeb(dvo, 0xc3, 0xf8);
+
+			ok &= ns2501_writeb(dvo, 0xc4, 0x03);
+			ok &= ns2501_writeb(dvo, 0xc5, 0x1a);
+
+			ok &= ns2501_writeb(dvo, 0xc6, 0x00);
+			ok &= ns2501_writeb(dvo, 0xc7, 0x73);
+			ok &= ns2501_writeb(dvo, 0xc8, 0x02);
+
+		} else if (mode->hdisplay == 640 && mode->vdisplay == 480) {
+			/* mode 274 */
+			DRM_DEBUG_KMS("%s: switching to 640x480\n",
+				      __FUNCTION__);
+			/*
+			 * No, I do not know where this data comes from.
+			 * It is just what the video bios left in the DVO, so
+			 * I'm just copying it here over.
+			 * This also means that I cannot support any other modes
+			 * except the ones supported by the bios.
+			 */
+			ns->reg_8_shadow &= ~NS2501_8_BPAS;
+
+			ok &= ns2501_writeb(dvo, 0x11, 0xa0);
+			ok &= ns2501_writeb(dvo, 0x1b, 0x11);
+			ok &= ns2501_writeb(dvo, 0x1c, 0x54);
+			ok &= ns2501_writeb(dvo, 0x1d, 0x03);
+
+			ok &= ns2501_writeb(dvo, 0x34, 0x03);
+			ok &= ns2501_writeb(dvo, 0x35, 0xff);
+
+			ok &= ns2501_writeb(dvo, 0x80, 0xff);
+			ok &= ns2501_writeb(dvo, 0x81, 0x07);
+			ok &= ns2501_writeb(dvo, 0x82, 0x3d);
+			ok &= ns2501_writeb(dvo, 0x83, 0x05);
+
+			ok &= ns2501_writeb(dvo, 0x8d, 0x02);
+			ok &= ns2501_writeb(dvo, 0x8e, 0x10);
+			ok &= ns2501_writeb(dvo, 0x8f, 0x00);
+
+			ok &= ns2501_writeb(dvo, 0x90, 0xff);	/* vertical */
+			ok &= ns2501_writeb(dvo, 0x91, 0x07);
+			ok &= ns2501_writeb(dvo, 0x94, 0x00);
+			ok &= ns2501_writeb(dvo, 0x95, 0x00);
+
+			ok &= ns2501_writeb(dvo, 0x96, 0x05);
+
+			ok &= ns2501_writeb(dvo, 0x99, 0x00);
+			ok &= ns2501_writeb(dvo, 0x9a, 0x88);
+
+			ok &= ns2501_writeb(dvo, 0x9c, 0x24);
+			ok &= ns2501_writeb(dvo, 0x9d, 0x00);
+			ok &= ns2501_writeb(dvo, 0x9e, 0x25);
+			ok &= ns2501_writeb(dvo, 0x9f, 0x03);
+
+			ok &= ns2501_writeb(dvo, 0xa4, 0x84);
+
+			ok &= ns2501_writeb(dvo, 0xb6, 0x09);
+
+			ok &= ns2501_writeb(dvo, 0xb9, 0xa0);	/* horizontal? */
+			ok &= ns2501_writeb(dvo, 0xba, 0x00);	/* horizontal? */
+
+			ok &= ns2501_writeb(dvo, 0xc0, 0x05);	/* horizontal? */
+			ok &= ns2501_writeb(dvo, 0xc1, 0x90);
+
+			ok &= ns2501_writeb(dvo, 0xc2, 0x00);
+			ok &= ns2501_writeb(dvo, 0xc3, 0x0f);
+
+			ok &= ns2501_writeb(dvo, 0xc4, 0x03);
+			ok &= ns2501_writeb(dvo, 0xc5, 0x16);
+
+			ok &= ns2501_writeb(dvo, 0xc6, 0x00);
+			ok &= ns2501_writeb(dvo, 0xc7, 0x02);
+			ok &= ns2501_writeb(dvo, 0xc8, 0x02);
+
+		} else if (mode->hdisplay == 1024 && mode->vdisplay == 768) {
+			/* mode 280 */
+			DRM_DEBUG_KMS("%s: switching to 1024x768\n",
+				      __FUNCTION__);
+			/*
+			 * This might or might not work, actually. I'm silently
+			 * assuming here that the native panel resolution is
+			 * 1024x768. If not, then this leaves the scaler disabled
+			 * generating a picture that is likely not the expected.
+			 *
+			 * Problem is that I do not know where to take the panel
+			 * dimensions from.
+			 *
+			 * Enable the bypass, scaling not required.
+			 *
+			 * The scaler registers are irrelevant here....
+			 *
+			 */
+			ns->reg_8_shadow |= NS2501_8_BPAS;
+			ok &= ns2501_writeb(dvo, 0x37, 0x44);
+		} else {
+			/*
+			 * Data not known. Bummer!
+			 * Hopefully, the code should not go here
+			 * as mode_OK delivered no other modes.
+			 */
+			ns->reg_8_shadow |= NS2501_8_BPAS;
+		}
+		ok &= ns2501_writeb(dvo, NS2501_REG8, ns->reg_8_shadow);
+
+		if (!ok) {
+			if (restore)
+				restore_dvo(dvo);
+			enable_dvo(dvo);
+			restore = true;
+		}
+	} while (!ok);
+	/*
+	 * Restore the old i915 registers before
+	 * forcing the ns2501 on.
+	 */
+	if (restore)
+		restore_dvo(dvo);
+}
+
+/* set the NS2501 power state */
+static bool ns2501_get_hw_state(struct intel_dvo_device *dvo)
+{
+	unsigned char ch;
+
+	if (!ns2501_readb(dvo, NS2501_REG8, &ch))
+		return false;
+
+	if (ch & NS2501_8_PD)
+		return true;
+	else
+		return false;
+}
+
+/* set the NS2501 power state */
+static void ns2501_dpms(struct intel_dvo_device *dvo, bool enable)
+{
+	bool ok;
+	bool restore = false;
+	struct ns2501_priv *ns = (struct ns2501_priv *)(dvo->dev_priv);
+	unsigned char ch;
+
+	DRM_DEBUG_KMS("%s: Trying set the dpms of the DVO to %i\n",
+		      __FUNCTION__, enable);
+
+	ch = ns->reg_8_shadow;
+
+	if (enable)
+		ch |= NS2501_8_PD;
+	else
+		ch &= ~NS2501_8_PD;
+
+	if (ns->reg_8_set == 0 || ns->reg_8_shadow != ch) {
+		ns->reg_8_set = 1;
+		ns->reg_8_shadow = ch;
+
+		do {
+			ok = true;
+			ok &= ns2501_writeb(dvo, NS2501_REG8, ch);
+			ok &=
+			    ns2501_writeb(dvo, 0x34,
+					  enable ? 0x03 : 0x00);
+			ok &=
+			    ns2501_writeb(dvo, 0x35,
+					  enable ? 0xff : 0x00);
+			if (!ok) {
+				if (restore)
+					restore_dvo(dvo);
+				enable_dvo(dvo);
+				restore = true;
+			}
+		} while (!ok);
+
+		if (restore)
+			restore_dvo(dvo);
+	}
+}
+
+static void ns2501_dump_regs(struct intel_dvo_device *dvo)
+{
+	uint8_t val;
+
+	ns2501_readb(dvo, NS2501_FREQ_LO, &val);
+	DRM_LOG_KMS("NS2501_FREQ_LO: 0x%02x\n", val);
+	ns2501_readb(dvo, NS2501_FREQ_HI, &val);
+	DRM_LOG_KMS("NS2501_FREQ_HI: 0x%02x\n", val);
+	ns2501_readb(dvo, NS2501_REG8, &val);
+	DRM_LOG_KMS("NS2501_REG8: 0x%02x\n", val);
+	ns2501_readb(dvo, NS2501_REG9, &val);
+	DRM_LOG_KMS("NS2501_REG9: 0x%02x\n", val);
+	ns2501_readb(dvo, NS2501_REGC, &val);
+	DRM_LOG_KMS("NS2501_REGC: 0x%02x\n", val);
+}
+
+static void ns2501_destroy(struct intel_dvo_device *dvo)
+{
+	struct ns2501_priv *ns = dvo->dev_priv;
+
+	if (ns) {
+		kfree(ns);
+		dvo->dev_priv = NULL;
+	}
+}
+
+struct intel_dvo_dev_ops ns2501_ops = {
+	.init = ns2501_init,
+	.detect = ns2501_detect,
+	.mode_valid = ns2501_mode_valid,
+	.mode_set = ns2501_mode_set,
+	.dpms = ns2501_dpms,
+	.get_hw_state = ns2501_get_hw_state,
+	.dump_regs = ns2501_dump_regs,
+	.destroy = ns2501_destroy,
+};
diff --git a/drivers/gpu/drm/i915/dvo_sil164.c b/drivers/gpu/drm/i915/dvo_sil164.c
index a0b13a6f619d..4debd32e3e4c 100644
--- a/drivers/gpu/drm/i915/dvo_sil164.c
+++ b/drivers/gpu/drm/i915/dvo_sil164.c
@@ -208,7 +208,7 @@ static void sil164_mode_set(struct intel_dvo_device *dvo,
 }
 
 /* set the SIL164 power state */
-static void sil164_dpms(struct intel_dvo_device *dvo, int mode)
+static void sil164_dpms(struct intel_dvo_device *dvo, bool enable)
 {
 	int ret;
 	unsigned char ch;
@@ -217,7 +217,7 @@ static void sil164_dpms(struct intel_dvo_device *dvo, int mode)
 	if (ret == false)
 		return;
 
-	if (mode == DRM_MODE_DPMS_ON)
+	if (enable)
 		ch |= SIL164_8_PD;
 	else
 		ch &= ~SIL164_8_PD;
@@ -226,6 +226,21 @@ static void sil164_dpms(struct intel_dvo_device *dvo, int mode)
 	return;
 }
 
+static bool sil164_get_hw_state(struct intel_dvo_device *dvo)
+{
+	int ret;
+	unsigned char ch;
+
+	ret = sil164_readb(dvo, SIL164_REG8, &ch);
+	if (ret == false)
+		return false;
+
+	if (ch & SIL164_8_PD)
+		return true;
+	else
+		return false;
+}
+
 static void sil164_dump_regs(struct intel_dvo_device *dvo)
 {
 	uint8_t val;
@@ -258,6 +273,7 @@ struct intel_dvo_dev_ops sil164_ops = {
 	.mode_valid = sil164_mode_valid,
 	.mode_set = sil164_mode_set,
 	.dpms = sil164_dpms,
+	.get_hw_state = sil164_get_hw_state,
 	.dump_regs = sil164_dump_regs,
 	.destroy = sil164_destroy,
 };
diff --git a/drivers/gpu/drm/i915/dvo_tfp410.c b/drivers/gpu/drm/i915/dvo_tfp410.c
index aa2cd3ec54aa..e17f1b07e915 100644
--- a/drivers/gpu/drm/i915/dvo_tfp410.c
+++ b/drivers/gpu/drm/i915/dvo_tfp410.c
@@ -234,14 +234,14 @@ static void tfp410_mode_set(struct intel_dvo_device *dvo,
 }
 
 /* set the tfp410 power state */
-static void tfp410_dpms(struct intel_dvo_device *dvo, int mode)
+static void tfp410_dpms(struct intel_dvo_device *dvo, bool enable)
 {
 	uint8_t ctl1;
 
 	if (!tfp410_readb(dvo, TFP410_CTL_1, &ctl1))
 		return;
 
-	if (mode == DRM_MODE_DPMS_ON)
+	if (enable)
 		ctl1 |= TFP410_CTL_1_PD;
 	else
 		ctl1 &= ~TFP410_CTL_1_PD;
@@ -249,6 +249,19 @@ static void tfp410_dpms(struct intel_dvo_device *dvo, int mode)
 	tfp410_writeb(dvo, TFP410_CTL_1, ctl1);
 }
 
+static bool tfp410_get_hw_state(struct intel_dvo_device *dvo)
+{
+	uint8_t ctl1;
+
+	if (!tfp410_readb(dvo, TFP410_CTL_1, &ctl1))
+		return false;
+
+	if (ctl1 & TFP410_CTL_1_PD)
+		return true;
+	else
+		return false;
+}
+
 static void tfp410_dump_regs(struct intel_dvo_device *dvo)
 {
 	uint8_t val, val2;
@@ -299,6 +312,7 @@ struct intel_dvo_dev_ops tfp410_ops = {
 	.mode_valid = tfp410_mode_valid,
 	.mode_set = tfp410_mode_set,
 	.dpms = tfp410_dpms,
+	.get_hw_state = tfp410_get_hw_state,
 	.dump_regs = tfp410_dump_regs,
 	.destroy = tfp410_destroy,
 };
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 63f01e29c1fa..dde8b505bf7f 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -43,7 +43,6 @@
 
 enum {
 	ACTIVE_LIST,
-	FLUSHING_LIST,
 	INACTIVE_LIST,
 	PINNED_LIST,
 };
@@ -61,28 +60,11 @@ static int i915_capabilities(struct seq_file *m, void *data)
 
 	seq_printf(m, "gen: %d\n", info->gen);
 	seq_printf(m, "pch: %d\n", INTEL_PCH_TYPE(dev));
-#define B(x) seq_printf(m, #x ": %s\n", yesno(info->x))
-	B(is_mobile);
-	B(is_i85x);
-	B(is_i915g);
-	B(is_i945gm);
-	B(is_g33);
-	B(need_gfx_hws);
-	B(is_g4x);
-	B(is_pineview);
-	B(is_broadwater);
-	B(is_crestline);
-	B(has_fbc);
-	B(has_pipe_cxsr);
-	B(has_hotplug);
-	B(cursor_needs_physical);
-	B(has_overlay);
-	B(overlay_needs_physical);
-	B(supports_tv);
-	B(has_bsd_ring);
-	B(has_blt_ring);
-	B(has_llc);
-#undef B
+#define DEV_INFO_FLAG(x) seq_printf(m, #x ": %s\n", yesno(info->x))
+#define DEV_INFO_SEP ;
+	DEV_INFO_FLAGS;
+#undef DEV_INFO_FLAG
+#undef DEV_INFO_SEP
 
 	return 0;
 }
@@ -120,20 +102,23 @@ static const char *cache_level_str(int type)
 static void
 describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
 {
-	seq_printf(m, "%p: %s%s %8zdKiB %04x %04x %d %d%s%s%s",
+	seq_printf(m, "%p: %s%s %8zdKiB %04x %04x %d %d %d%s%s%s",
 		   &obj->base,
 		   get_pin_flag(obj),
 		   get_tiling_flag(obj),
 		   obj->base.size / 1024,
 		   obj->base.read_domains,
 		   obj->base.write_domain,
-		   obj->last_rendering_seqno,
+		   obj->last_read_seqno,
+		   obj->last_write_seqno,
 		   obj->last_fenced_seqno,
 		   cache_level_str(obj->cache_level),
 		   obj->dirty ? " dirty" : "",
 		   obj->madv == I915_MADV_DONTNEED ? " purgeable" : "");
 	if (obj->base.name)
 		seq_printf(m, " (name: %d)", obj->base.name);
+	if (obj->pin_count)
+		seq_printf(m, " (pinned x %d)", obj->pin_count);
 	if (obj->fence_reg != I915_FENCE_REG_NONE)
 		seq_printf(m, " (fence: %d)", obj->fence_reg);
 	if (obj->gtt_space != NULL)
@@ -176,10 +161,6 @@ static int i915_gem_object_list_info(struct seq_file *m, void *data)
 		seq_printf(m, "Inactive:\n");
 		head = &dev_priv->mm.inactive_list;
 		break;
-	case FLUSHING_LIST:
-		seq_printf(m, "Flushing:\n");
-		head = &dev_priv->mm.flushing_list;
-		break;
 	default:
 		mutex_unlock(&dev->struct_mutex);
 		return -EINVAL;
@@ -217,8 +198,8 @@ static int i915_gem_object_info(struct seq_file *m, void* data)
 	struct drm_info_node *node = (struct drm_info_node *) m->private;
 	struct drm_device *dev = node->minor->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	u32 count, mappable_count;
-	size_t size, mappable_size;
+	u32 count, mappable_count, purgeable_count;
+	size_t size, mappable_size, purgeable_size;
 	struct drm_i915_gem_object *obj;
 	int ret;
 
@@ -231,13 +212,12 @@ static int i915_gem_object_info(struct seq_file *m, void* data)
 		   dev_priv->mm.object_memory);
 
 	size = count = mappable_size = mappable_count = 0;
-	count_objects(&dev_priv->mm.gtt_list, gtt_list);
+	count_objects(&dev_priv->mm.bound_list, gtt_list);
 	seq_printf(m, "%u [%u] objects, %zu [%zu] bytes in gtt\n",
 		   count, mappable_count, size, mappable_size);
 
 	size = count = mappable_size = mappable_count = 0;
 	count_objects(&dev_priv->mm.active_list, mm_list);
-	count_objects(&dev_priv->mm.flushing_list, mm_list);
 	seq_printf(m, "  %u [%u] active objects, %zu [%zu] bytes\n",
 		   count, mappable_count, size, mappable_size);
 
@@ -246,8 +226,16 @@ static int i915_gem_object_info(struct seq_file *m, void* data)
 	seq_printf(m, "  %u [%u] inactive objects, %zu [%zu] bytes\n",
 		   count, mappable_count, size, mappable_size);
 
+	size = count = purgeable_size = purgeable_count = 0;
+	list_for_each_entry(obj, &dev_priv->mm.unbound_list, gtt_list) {
+		size += obj->base.size, ++count;
+		if (obj->madv == I915_MADV_DONTNEED)
+			purgeable_size += obj->base.size, ++purgeable_count;
+	}
+	seq_printf(m, "%u unbound objects, %zu bytes\n", count, size);
+
 	size = count = mappable_size = mappable_count = 0;
-	list_for_each_entry(obj, &dev_priv->mm.gtt_list, gtt_list) {
+	list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list) {
 		if (obj->fault_mappable) {
 			size += obj->gtt_space->size;
 			++count;
@@ -256,7 +244,13 @@ static int i915_gem_object_info(struct seq_file *m, void* data)
 			mappable_size += obj->gtt_space->size;
 			++mappable_count;
 		}
+		if (obj->madv == I915_MADV_DONTNEED) {
+			purgeable_size += obj->base.size;
+			++purgeable_count;
+		}
 	}
+	seq_printf(m, "%u purgeable objects, %zu bytes\n",
+		   purgeable_count, purgeable_size);
 	seq_printf(m, "%u pinned mappable objects, %zu bytes\n",
 		   mappable_count, mappable_size);
 	seq_printf(m, "%u fault mappable objects, %zu bytes\n",
@@ -285,7 +279,7 @@ static int i915_gem_gtt_info(struct seq_file *m, void* data)
 		return ret;
 
 	total_obj_size = total_gtt_size = count = 0;
-	list_for_each_entry(obj, &dev_priv->mm.gtt_list, gtt_list) {
+	list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list) {
 		if (list == PINNED_LIST && obj->pin_count == 0)
 			continue;
 
@@ -358,40 +352,22 @@ static int i915_gem_request_info(struct seq_file *m, void *data)
 	struct drm_info_node *node = (struct drm_info_node *) m->private;
 	struct drm_device *dev = node->minor->dev;
 	drm_i915_private_t *dev_priv = dev->dev_private;
+	struct intel_ring_buffer *ring;
 	struct drm_i915_gem_request *gem_request;
-	int ret, count;
+	int ret, count, i;
 
 	ret = mutex_lock_interruptible(&dev->struct_mutex);
 	if (ret)
 		return ret;
 
 	count = 0;
-	if (!list_empty(&dev_priv->ring[RCS].request_list)) {
-		seq_printf(m, "Render requests:\n");
-		list_for_each_entry(gem_request,
-				    &dev_priv->ring[RCS].request_list,
-				    list) {
-			seq_printf(m, "    %d @ %d\n",
-				   gem_request->seqno,
-				   (int) (jiffies - gem_request->emitted_jiffies));
-		}
-		count++;
-	}
-	if (!list_empty(&dev_priv->ring[VCS].request_list)) {
-		seq_printf(m, "BSD requests:\n");
-		list_for_each_entry(gem_request,
-				    &dev_priv->ring[VCS].request_list,
-				    list) {
-			seq_printf(m, "    %d @ %d\n",
-				   gem_request->seqno,
-				   (int) (jiffies - gem_request->emitted_jiffies));
-		}
-		count++;
-	}
-	if (!list_empty(&dev_priv->ring[BCS].request_list)) {
-		seq_printf(m, "BLT requests:\n");
+	for_each_ring(ring, dev_priv, i) {
+		if (list_empty(&ring->request_list))
+			continue;
+
+		seq_printf(m, "%s requests:\n", ring->name);
 		list_for_each_entry(gem_request,
-				    &dev_priv->ring[BCS].request_list,
+				    &ring->request_list,
 				    list) {
 			seq_printf(m, "    %d @ %d\n",
 				   gem_request->seqno,
@@ -412,7 +388,7 @@ static void i915_ring_seqno_info(struct seq_file *m,
 {
 	if (ring->get_seqno) {
 		seq_printf(m, "Current sequence (%s): %d\n",
-			   ring->name, ring->get_seqno(ring));
+			   ring->name, ring->get_seqno(ring, false));
 	}
 }
 
@@ -421,14 +397,15 @@ static int i915_gem_seqno_info(struct seq_file *m, void *data)
 	struct drm_info_node *node = (struct drm_info_node *) m->private;
 	struct drm_device *dev = node->minor->dev;
 	drm_i915_private_t *dev_priv = dev->dev_private;
+	struct intel_ring_buffer *ring;
 	int ret, i;
 
 	ret = mutex_lock_interruptible(&dev->struct_mutex);
 	if (ret)
 		return ret;
 
-	for (i = 0; i < I915_NUM_RINGS; i++)
-		i915_ring_seqno_info(m, &dev_priv->ring[i]);
+	for_each_ring(ring, dev_priv, i)
+		i915_ring_seqno_info(m, ring);
 
 	mutex_unlock(&dev->struct_mutex);
 
@@ -441,6 +418,7 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
 	struct drm_info_node *node = (struct drm_info_node *) m->private;
 	struct drm_device *dev = node->minor->dev;
 	drm_i915_private_t *dev_priv = dev->dev_private;
+	struct intel_ring_buffer *ring;
 	int ret, i, pipe;
 
 	ret = mutex_lock_interruptible(&dev->struct_mutex);
@@ -518,13 +496,13 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
 	}
 	seq_printf(m, "Interrupts received: %d\n",
 		   atomic_read(&dev_priv->irq_received));
-	for (i = 0; i < I915_NUM_RINGS; i++) {
+	for_each_ring(ring, dev_priv, i) {
 		if (IS_GEN6(dev) || IS_GEN7(dev)) {
-			seq_printf(m, "Graphics Interrupt mask (%s):	%08x\n",
-				   dev_priv->ring[i].name,
-				   I915_READ_IMR(&dev_priv->ring[i]));
+			seq_printf(m,
+				   "Graphics Interrupt mask (%s):	%08x\n",
+				   ring->name, I915_READ_IMR(ring));
 		}
-		i915_ring_seqno_info(m, &dev_priv->ring[i]);
+		i915_ring_seqno_info(m, ring);
 	}
 	mutex_unlock(&dev->struct_mutex);
 
@@ -547,7 +525,8 @@ static int i915_gem_fence_regs_info(struct seq_file *m, void *data)
 	for (i = 0; i < dev_priv->num_fence_regs; i++) {
 		struct drm_i915_gem_object *obj = dev_priv->fence_regs[i].obj;
 
-		seq_printf(m, "Fenced object[%2d] = ", i);
+		seq_printf(m, "Fence %d, pin count = %d, object = ",
+			   i, dev_priv->fence_regs[i].pin_count);
 		if (obj == NULL)
 			seq_printf(m, "unused");
 		else
@@ -629,12 +608,12 @@ static void print_error_buffers(struct seq_file *m,
 	seq_printf(m, "%s [%d]:\n", name, count);
 
 	while (count--) {
-		seq_printf(m, "  %08x %8u %04x %04x %08x%s%s%s%s%s%s%s",
+		seq_printf(m, "  %08x %8u %04x %04x %x %x%s%s%s%s%s%s%s",
 			   err->gtt_offset,
 			   err->size,
 			   err->read_domains,
 			   err->write_domain,
-			   err->seqno,
+			   err->rseqno, err->wseqno,
 			   pin_flag(err->pinned),
 			   tiling_flag(err->tiling),
 			   dirty_flag(err->dirty),
@@ -666,10 +645,9 @@ static void i915_ring_error_state(struct seq_file *m,
 	seq_printf(m, "  IPEIR: 0x%08x\n", error->ipeir[ring]);
 	seq_printf(m, "  IPEHR: 0x%08x\n", error->ipehr[ring]);
 	seq_printf(m, "  INSTDONE: 0x%08x\n", error->instdone[ring]);
-	if (ring == RCS && INTEL_INFO(dev)->gen >= 4) {
-		seq_printf(m, "  INSTDONE1: 0x%08x\n", error->instdone1);
+	if (ring == RCS && INTEL_INFO(dev)->gen >= 4)
 		seq_printf(m, "  BBADDR: 0x%08llx\n", error->bbaddr);
-	}
+
 	if (INTEL_INFO(dev)->gen >= 4)
 		seq_printf(m, "  INSTPS: 0x%08x\n", error->instps[ring]);
 	seq_printf(m, "  INSTPM: 0x%08x\n", error->instpm[ring]);
@@ -718,11 +696,17 @@ static int i915_error_state(struct seq_file *m, void *unused)
 	for (i = 0; i < dev_priv->num_fence_regs; i++)
 		seq_printf(m, "  fence[%d] = %08llx\n", i, error->fence[i]);
 
+	for (i = 0; i < ARRAY_SIZE(error->extra_instdone); i++)
+		seq_printf(m, "  INSTDONE_%d: 0x%08x\n", i, error->extra_instdone[i]);
+
 	if (INTEL_INFO(dev)->gen >= 6) {
 		seq_printf(m, "ERROR: 0x%08x\n", error->error);
 		seq_printf(m, "DONE_REG: 0x%08x\n", error->done_reg);
 	}
 
+	if (INTEL_INFO(dev)->gen == 7)
+		seq_printf(m, "ERR_INT: 0x%08x\n", error->err_int);
+
 	for_each_ring(ring, dev_priv, i)
 		i915_ring_error_state(m, dev, error, i);
 
@@ -798,10 +782,14 @@ i915_error_state_write(struct file *filp,
 	struct seq_file *m = filp->private_data;
 	struct i915_error_state_file_priv *error_priv = m->private;
 	struct drm_device *dev = error_priv->dev;
+	int ret;
 
 	DRM_DEBUG_DRIVER("Resetting error state\n");
 
-	mutex_lock(&dev->struct_mutex);
+	ret = mutex_lock_interruptible(&dev->struct_mutex);
+	if (ret)
+		return ret;
+
 	i915_destroy_error_state(dev);
 	mutex_unlock(&dev->struct_mutex);
 
@@ -925,7 +913,7 @@ static int i915_cur_delayinfo(struct seq_file *m, void *unused)
 		seq_printf(m, "Render p-state limit: %d\n",
 			   rp_state_limits & 0xff);
 		seq_printf(m, "CAGF: %dMHz\n", ((rpstat & GEN6_CAGF_MASK) >>
-						GEN6_CAGF_SHIFT) * 50);
+						GEN6_CAGF_SHIFT) * GT_FREQUENCY_MULTIPLIER);
 		seq_printf(m, "RP CUR UP EI: %dus\n", rpupei &
 			   GEN6_CURICONT_MASK);
 		seq_printf(m, "RP CUR UP: %dus\n", rpcurup &
@@ -941,15 +929,15 @@ static int i915_cur_delayinfo(struct seq_file *m, void *unused)
 
 		max_freq = (rp_state_cap & 0xff0000) >> 16;
 		seq_printf(m, "Lowest (RPN) frequency: %dMHz\n",
-			   max_freq * 50);
+			   max_freq * GT_FREQUENCY_MULTIPLIER);
 
 		max_freq = (rp_state_cap & 0xff00) >> 8;
 		seq_printf(m, "Nominal (RP1) frequency: %dMHz\n",
-			   max_freq * 50);
+			   max_freq * GT_FREQUENCY_MULTIPLIER);
 
 		max_freq = rp_state_cap & 0xff;
 		seq_printf(m, "Max non-overclocked (RP0) frequency: %dMHz\n",
-			   max_freq * 50);
+			   max_freq * GT_FREQUENCY_MULTIPLIER);
 	} else {
 		seq_printf(m, "no P-state info available\n");
 	}
@@ -1291,7 +1279,8 @@ static int i915_ring_freq_table(struct seq_file *m, void *unused)
 
 	seq_printf(m, "GPU freq (MHz)\tEffective CPU freq (MHz)\n");
 
-	for (gpu_freq = dev_priv->min_delay; gpu_freq <= dev_priv->max_delay;
+	for (gpu_freq = dev_priv->rps.min_delay;
+	     gpu_freq <= dev_priv->rps.max_delay;
 	     gpu_freq++) {
 		I915_WRITE(GEN6_PCODE_DATA, gpu_freq);
 		I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY |
@@ -1302,7 +1291,7 @@ static int i915_ring_freq_table(struct seq_file *m, void *unused)
 			continue;
 		}
 		ia_freq = I915_READ(GEN6_PCODE_DATA);
-		seq_printf(m, "%d\t\t%d\n", gpu_freq * 50, ia_freq * 100);
+		seq_printf(m, "%d\t\t%d\n", gpu_freq * GT_FREQUENCY_MULTIPLIER, ia_freq * 100);
 	}
 
 	mutex_unlock(&dev->struct_mutex);
@@ -1471,8 +1460,12 @@ static int i915_swizzle_info(struct seq_file *m, void *data)
 	struct drm_info_node *node = (struct drm_info_node *) m->private;
 	struct drm_device *dev = node->minor->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
+	int ret;
+
+	ret = mutex_lock_interruptible(&dev->struct_mutex);
+	if (ret)
+		return ret;
 
-	mutex_lock(&dev->struct_mutex);
 	seq_printf(m, "bit6 swizzle for X-tiling = %s\n",
 		   swizzle_string(dev_priv->mm.bit_6_swizzle_x));
 	seq_printf(m, "bit6 swizzle for Y-tiling = %s\n",
@@ -1519,9 +1512,7 @@ static int i915_ppgtt_info(struct seq_file *m, void *data)
 	if (INTEL_INFO(dev)->gen == 6)
 		seq_printf(m, "GFX_MODE: 0x%08x\n", I915_READ(GFX_MODE));
 
-	for (i = 0; i < I915_NUM_RINGS; i++) {
-		ring = &dev_priv->ring[i];
-
+	for_each_ring(ring, dev_priv, i) {
 		seq_printf(m, "%s\n", ring->name);
 		if (INTEL_INFO(dev)->gen == 7)
 			seq_printf(m, "GFX_MODE: 0x%08x\n", I915_READ(RING_MODE_GEN7(ring)));
@@ -1673,7 +1664,7 @@ i915_ring_stop_write(struct file *filp,
 	struct drm_device *dev = filp->private_data;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	char buf[20];
-	int val = 0;
+	int val = 0, ret;
 
 	if (cnt > 0) {
 		if (cnt > sizeof(buf) - 1)
@@ -1688,7 +1679,10 @@ i915_ring_stop_write(struct file *filp,
 
 	DRM_DEBUG_DRIVER("Stopping rings 0x%08x\n", val);
 
-	mutex_lock(&dev->struct_mutex);
+	ret = mutex_lock_interruptible(&dev->struct_mutex);
+	if (ret)
+		return ret;
+
 	dev_priv->stop_rings = val;
 	mutex_unlock(&dev->struct_mutex);
 
@@ -1712,10 +1706,18 @@ i915_max_freq_read(struct file *filp,
 	struct drm_device *dev = filp->private_data;
 	drm_i915_private_t *dev_priv = dev->dev_private;
 	char buf[80];
-	int len;
+	int len, ret;
+
+	if (!(IS_GEN6(dev) || IS_GEN7(dev)))
+		return -ENODEV;
+
+	ret = mutex_lock_interruptible(&dev->struct_mutex);
+	if (ret)
+		return ret;
 
 	len = snprintf(buf, sizeof(buf),
-		       "max freq: %d\n", dev_priv->max_delay * 50);
+		       "max freq: %d\n", dev_priv->rps.max_delay * GT_FREQUENCY_MULTIPLIER);
+	mutex_unlock(&dev->struct_mutex);
 
 	if (len > sizeof(buf))
 		len = sizeof(buf);
@@ -1732,7 +1734,10 @@ i915_max_freq_write(struct file *filp,
 	struct drm_device *dev = filp->private_data;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	char buf[20];
-	int val = 1;
+	int val = 1, ret;
+
+	if (!(IS_GEN6(dev) || IS_GEN7(dev)))
+		return -ENODEV;
 
 	if (cnt > 0) {
 		if (cnt > sizeof(buf) - 1)
@@ -1747,12 +1752,17 @@ i915_max_freq_write(struct file *filp,
 
 	DRM_DEBUG_DRIVER("Manually setting max freq to %d\n", val);
 
+	ret = mutex_lock_interruptible(&dev->struct_mutex);
+	if (ret)
+		return ret;
+
 	/*
 	 * Turbo will still be enabled, but won't go above the set value.
 	 */
-	dev_priv->max_delay = val / 50;
+	dev_priv->rps.max_delay = val / GT_FREQUENCY_MULTIPLIER;
 
-	gen6_set_rps(dev, val / 50);
+	gen6_set_rps(dev, val / GT_FREQUENCY_MULTIPLIER);
+	mutex_unlock(&dev->struct_mutex);
 
 	return cnt;
 }
@@ -1772,10 +1782,18 @@ i915_min_freq_read(struct file *filp, char __user *ubuf, size_t max,
 	struct drm_device *dev = filp->private_data;
 	drm_i915_private_t *dev_priv = dev->dev_private;
 	char buf[80];
-	int len;
+	int len, ret;
+
+	if (!(IS_GEN6(dev) || IS_GEN7(dev)))
+		return -ENODEV;
+
+	ret = mutex_lock_interruptible(&dev->struct_mutex);
+	if (ret)
+		return ret;
 
 	len = snprintf(buf, sizeof(buf),
-		       "min freq: %d\n", dev_priv->min_delay * 50);
+		       "min freq: %d\n", dev_priv->rps.min_delay * GT_FREQUENCY_MULTIPLIER);
+	mutex_unlock(&dev->struct_mutex);
 
 	if (len > sizeof(buf))
 		len = sizeof(buf);
@@ -1790,7 +1808,10 @@ i915_min_freq_write(struct file *filp, const char __user *ubuf, size_t cnt,
 	struct drm_device *dev = filp->private_data;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	char buf[20];
-	int val = 1;
+	int val = 1, ret;
+
+	if (!(IS_GEN6(dev) || IS_GEN7(dev)))
+		return -ENODEV;
 
 	if (cnt > 0) {
 		if (cnt > sizeof(buf) - 1)
@@ -1805,12 +1826,17 @@ i915_min_freq_write(struct file *filp, const char __user *ubuf, size_t cnt,
 
 	DRM_DEBUG_DRIVER("Manually setting min freq to %d\n", val);
 
+	ret = mutex_lock_interruptible(&dev->struct_mutex);
+	if (ret)
+		return ret;
+
 	/*
 	 * Turbo will still be enabled, but won't go below the set value.
 	 */
-	dev_priv->min_delay = val / 50;
+	dev_priv->rps.min_delay = val / GT_FREQUENCY_MULTIPLIER;
 
-	gen6_set_rps(dev, val / 50);
+	gen6_set_rps(dev, val / GT_FREQUENCY_MULTIPLIER);
+	mutex_unlock(&dev->struct_mutex);
 
 	return cnt;
 }
@@ -1833,9 +1859,15 @@ i915_cache_sharing_read(struct file *filp,
 	drm_i915_private_t *dev_priv = dev->dev_private;
 	char buf[80];
 	u32 snpcr;
-	int len;
+	int len, ret;
+
+	if (!(IS_GEN6(dev) || IS_GEN7(dev)))
+		return -ENODEV;
+
+	ret = mutex_lock_interruptible(&dev->struct_mutex);
+	if (ret)
+		return ret;
 
-	mutex_lock(&dev_priv->dev->struct_mutex);
 	snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
 	mutex_unlock(&dev_priv->dev->struct_mutex);
 
@@ -1861,6 +1893,9 @@ i915_cache_sharing_write(struct file *filp,
 	u32 snpcr;
 	int val = 1;
 
+	if (!(IS_GEN6(dev) || IS_GEN7(dev)))
+		return -ENODEV;
+
 	if (cnt > 0) {
 		if (cnt > sizeof(buf) - 1)
 			return -EINVAL;
@@ -1924,16 +1959,11 @@ static int i915_forcewake_open(struct inode *inode, struct file *file)
 {
 	struct drm_device *dev = inode->i_private;
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	int ret;
 
 	if (INTEL_INFO(dev)->gen < 6)
 		return 0;
 
-	ret = mutex_lock_interruptible(&dev->struct_mutex);
-	if (ret)
-		return ret;
 	gen6_gt_force_wake_get(dev_priv);
-	mutex_unlock(&dev->struct_mutex);
 
 	return 0;
 }
@@ -1946,16 +1976,7 @@ static int i915_forcewake_release(struct inode *inode, struct file *file)
 	if (INTEL_INFO(dev)->gen < 6)
 		return 0;
 
-	/*
-	 * It's bad that we can potentially hang userspace if struct_mutex gets
-	 * forever stuck.  However, if we cannot acquire this lock it means that
-	 * almost certainly the driver has hung, is not unload-able. Therefore
-	 * hanging here is probably a minor inconvenience not to be seen my
-	 * almost every user.
-	 */
-	mutex_lock(&dev->struct_mutex);
 	gen6_gt_force_wake_put(dev_priv);
-	mutex_unlock(&dev->struct_mutex);
 
 	return 0;
 }
@@ -2005,7 +2026,6 @@ static struct drm_info_list i915_debugfs_list[] = {
 	{"i915_gem_gtt", i915_gem_gtt_info, 0},
 	{"i915_gem_pinned", i915_gem_gtt_info, 0, (void *) PINNED_LIST},
 	{"i915_gem_active", i915_gem_object_list_info, 0, (void *) ACTIVE_LIST},
-	{"i915_gem_flushing", i915_gem_object_list_info, 0, (void *) FLUSHING_LIST},
 	{"i915_gem_inactive", i915_gem_object_list_info, 0, (void *) INACTIVE_LIST},
 	{"i915_gem_pageflip", i915_gem_pageflip_info, 0},
 	{"i915_gem_request", i915_gem_request_info, 0},
@@ -2066,6 +2086,7 @@ int i915_debugfs_init(struct drm_minor *minor)
 				  &i915_cache_sharing_fops);
 	if (ret)
 		return ret;
+
 	ret = i915_debugfs_create(minor->debugfs_root, minor,
 				  "i915_ring_stop",
 				  &i915_ring_stop_fops);
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index 804f1c98e279..c9bfd83dde64 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -234,10 +234,10 @@ static int i915_initialize(struct drm_device * dev, drm_i915_init_t * init)
 		}
 	}
 
-	dev_priv->cpp = init->cpp;
-	dev_priv->back_offset = init->back_offset;
-	dev_priv->front_offset = init->front_offset;
-	dev_priv->current_page = 0;
+	dev_priv->dri1.cpp = init->cpp;
+	dev_priv->dri1.back_offset = init->back_offset;
+	dev_priv->dri1.front_offset = init->front_offset;
+	dev_priv->dri1.current_page = 0;
 	if (master_priv->sarea_priv)
 		master_priv->sarea_priv->pf_current_page = 0;
 
@@ -574,7 +574,7 @@ static int i915_dispatch_flip(struct drm_device * dev)
 
 	DRM_DEBUG_DRIVER("%s: page=%d pfCurrentPage=%d\n",
 			  __func__,
-			 dev_priv->current_page,
+			 dev_priv->dri1.current_page,
 			 master_priv->sarea_priv->pf_current_page);
 
 	i915_kernel_lost_context(dev);
@@ -588,12 +588,12 @@ static int i915_dispatch_flip(struct drm_device * dev)
 
 	OUT_RING(CMD_OP_DISPLAYBUFFER_INFO | ASYNC_FLIP);
 	OUT_RING(0);
-	if (dev_priv->current_page == 0) {
-		OUT_RING(dev_priv->back_offset);
-		dev_priv->current_page = 1;
+	if (dev_priv->dri1.current_page == 0) {
+		OUT_RING(dev_priv->dri1.back_offset);
+		dev_priv->dri1.current_page = 1;
 	} else {
-		OUT_RING(dev_priv->front_offset);
-		dev_priv->current_page = 0;
+		OUT_RING(dev_priv->dri1.front_offset);
+		dev_priv->dri1.current_page = 0;
 	}
 	OUT_RING(0);
 
@@ -612,7 +612,7 @@ static int i915_dispatch_flip(struct drm_device * dev)
 		ADVANCE_LP_RING();
 	}
 
-	master_priv->sarea_priv->pf_current_page = dev_priv->current_page;
+	master_priv->sarea_priv->pf_current_page = dev_priv->dri1.current_page;
 	return 0;
 }
 
@@ -1008,6 +1008,12 @@ static int i915_getparam(struct drm_device *dev, void *data,
 	case I915_PARAM_HAS_WAIT_TIMEOUT:
 		value = 1;
 		break;
+	case I915_PARAM_HAS_SEMAPHORES:
+		value = i915_semaphore_is_enabled(dev);
+		break;
+	case I915_PARAM_HAS_PRIME_VMAP_FLUSH:
+		value = 1;
+		break;
 	default:
 		DRM_DEBUG_DRIVER("Unknown parameter %d\n",
 				 param->param);
@@ -1424,6 +1430,21 @@ static void i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv)
 	kfree(ap);
 }
 
+static void i915_dump_device_info(struct drm_i915_private *dev_priv)
+{
+	const struct intel_device_info *info = dev_priv->info;
+
+#define DEV_INFO_FLAG(name) info->name ? #name "," : ""
+#define DEV_INFO_SEP ,
+	DRM_DEBUG_DRIVER("i915 device info: gen=%i, pciid=0x%04x flags="
+			 "%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s",
+			 info->gen,
+			 dev_priv->dev->pdev->device,
+			 DEV_INFO_FLAGS);
+#undef DEV_INFO_FLAG
+#undef DEV_INFO_SEP
+}
+
 /**
  * i915_driver_load - setup chip and create an initial config
  * @dev: DRM device
@@ -1439,7 +1460,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
 {
 	struct drm_i915_private *dev_priv;
 	struct intel_device_info *info;
-	int ret = 0, mmio_bar;
+	int ret = 0, mmio_bar, mmio_size;
 	uint32_t aperture_size;
 
 	info = (struct intel_device_info *) flags;
@@ -1448,7 +1469,6 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
 	if (info->gen >= 6 && !drm_core_check_feature(dev, DRIVER_MODESET))
 		return -ENODEV;
 
-
 	/* i915 has 4 more counters */
 	dev->counters += 4;
 	dev->types[6] = _DRM_STAT_IRQ;
@@ -1464,6 +1484,8 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
 	dev_priv->dev = dev;
 	dev_priv->info = info;
 
+	i915_dump_device_info(dev_priv);
+
 	if (i915_get_bridge_dev(dev)) {
 		ret = -EIO;
 		goto free_priv;
@@ -1503,7 +1525,19 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
 		dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(32));
 
 	mmio_bar = IS_GEN2(dev) ? 1 : 0;
-	dev_priv->regs = pci_iomap(dev->pdev, mmio_bar, 0);
+	/* Before gen4, the registers and the GTT are behind different BARs.
+	 * However, from gen4 onwards, the registers and the GTT are shared
+	 * in the same BAR, so we want to restrict this ioremap from
+	 * clobbering the GTT which we want ioremap_wc instead. Fortunately,
+	 * the register BAR remains the same size for all the earlier
+	 * generations up to Ironlake.
+	 */
+	if (info->gen < 5)
+		mmio_size = 512*1024;
+	else
+		mmio_size = 2*1024*1024;
+
+	dev_priv->regs = pci_iomap(dev->pdev, mmio_bar, mmio_size);
 	if (!dev_priv->regs) {
 		DRM_ERROR("failed to map registers\n");
 		ret = -EIO;
@@ -1535,11 +1569,9 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
 	 *
 	 * All tasks on the workqueue are expected to acquire the dev mutex
 	 * so there is no point in running more than one instance of the
-	 * workqueue at any time: max_active = 1 and NON_REENTRANT.
+	 * workqueue at any time.  Use an ordered one.
 	 */
-	dev_priv->wq = alloc_workqueue("i915",
-				       WQ_UNBOUND | WQ_NON_REENTRANT,
-				       1);
+	dev_priv->wq = alloc_ordered_workqueue("i915", 0);
 	if (dev_priv->wq == NULL) {
 		DRM_ERROR("Failed to create our workqueue.\n");
 		ret = -ENOMEM;
@@ -1585,7 +1617,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
 
 	spin_lock_init(&dev_priv->irq_lock);
 	spin_lock_init(&dev_priv->error_lock);
-	spin_lock_init(&dev_priv->rps_lock);
+	spin_lock_init(&dev_priv->rps.lock);
 	spin_lock_init(&dev_priv->dpio_lock);
 
 	if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev))
@@ -1835,6 +1867,8 @@ struct drm_ioctl_desc i915_ioctls[] = {
 	DRM_IOCTL_DEF_DRV(I915_GEM_PIN, i915_gem_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY|DRM_UNLOCKED),
 	DRM_IOCTL_DEF_DRV(I915_GEM_UNPIN, i915_gem_unpin_ioctl, DRM_AUTH|DRM_ROOT_ONLY|DRM_UNLOCKED),
 	DRM_IOCTL_DEF_DRV(I915_GEM_BUSY, i915_gem_busy_ioctl, DRM_AUTH|DRM_UNLOCKED),
+	DRM_IOCTL_DEF_DRV(I915_GEM_SET_CACHING, i915_gem_set_caching_ioctl, DRM_UNLOCKED),
+	DRM_IOCTL_DEF_DRV(I915_GEM_GET_CACHING, i915_gem_get_caching_ioctl, DRM_UNLOCKED),
 	DRM_IOCTL_DEF_DRV(I915_GEM_THROTTLE, i915_gem_throttle_ioctl, DRM_AUTH|DRM_UNLOCKED),
 	DRM_IOCTL_DEF_DRV(I915_GEM_ENTERVT, i915_gem_entervt_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
 	DRM_IOCTL_DEF_DRV(I915_GEM_LEAVEVT, i915_gem_leavevt_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
@@ -1857,6 +1891,7 @@ struct drm_ioctl_desc i915_ioctls[] = {
 	DRM_IOCTL_DEF_DRV(I915_GEM_WAIT, i915_gem_wait_ioctl, DRM_AUTH|DRM_UNLOCKED),
 	DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_CREATE, i915_gem_context_create_ioctl, DRM_UNLOCKED),
 	DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_DESTROY, i915_gem_context_destroy_ioctl, DRM_UNLOCKED),
+	DRM_IOCTL_DEF_DRV(I915_REG_READ, i915_reg_read_ioctl, DRM_UNLOCKED),
 };
 
 int i915_max_ioctl = DRM_ARRAY_SIZE(i915_ioctls);
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index f6825324e72d..aac4e5e1a5b9 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -469,6 +469,9 @@ static int i915_drm_freeze(struct drm_device *dev)
 				"GEM idle failed, resume might fail\n");
 			return error;
 		}
+
+		intel_modeset_disable(dev);
+
 		drm_irq_uninstall(dev);
 	}
 
@@ -542,13 +545,9 @@ static int i915_drm_thaw(struct drm_device *dev)
 		mutex_unlock(&dev->struct_mutex);
 
 		intel_modeset_init_hw(dev);
+		intel_modeset_setup_hw_state(dev);
 		drm_mode_config_reset(dev);
 		drm_irq_install(dev);
-
-		/* Resume the modeset for every activated CRTC */
-		mutex_lock(&dev->mode_config.mutex);
-		drm_helper_resume_force_mode(dev);
-		mutex_unlock(&dev->mode_config.mutex);
 	}
 
 	intel_opregion_init(dev);
@@ -1059,7 +1058,7 @@ static bool IS_DISPLAYREG(u32 reg)
 	 * This should make it easier to transition modules over to the
 	 * new register block scheme, since we can do it incrementally.
 	 */
-	if (reg >= 0x180000)
+	if (reg >= VLV_DISPLAY_BASE)
 		return false;
 
 	if (reg >= RENDER_RING_BASE &&
@@ -1173,9 +1172,59 @@ void i915_write##x(struct drm_i915_private *dev_priv, u32 reg, u##x val) { \
 	if (unlikely(__fifo_ret)) { \
 		gen6_gt_check_fifodbg(dev_priv); \
 	} \
+	if (IS_HASWELL(dev_priv->dev) && (I915_READ_NOTRACE(GEN7_ERR_INT) & ERR_INT_MMIO_UNCLAIMED)) { \
+		DRM_ERROR("Unclaimed write to %x\n", reg); \
+		writel(ERR_INT_MMIO_UNCLAIMED, dev_priv->regs + GEN7_ERR_INT);	\
+	} \
 }
 __i915_write(8, b)
 __i915_write(16, w)
 __i915_write(32, l)
 __i915_write(64, q)
 #undef __i915_write
+
+static const struct register_whitelist {
+	uint64_t offset;
+	uint32_t size;
+	uint32_t gen_bitmask; /* support gens, 0x10 for 4, 0x30 for 4 and 5, etc. */
+} whitelist[] = {
+	{ RING_TIMESTAMP(RENDER_RING_BASE), 8, 0xF0 },
+};
+
+int i915_reg_read_ioctl(struct drm_device *dev,
+			void *data, struct drm_file *file)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct drm_i915_reg_read *reg = data;
+	struct register_whitelist const *entry = whitelist;
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(whitelist); i++, entry++) {
+		if (entry->offset == reg->offset &&
+		    (1 << INTEL_INFO(dev)->gen & entry->gen_bitmask))
+			break;
+	}
+
+	if (i == ARRAY_SIZE(whitelist))
+		return -EINVAL;
+
+	switch (entry->size) {
+	case 8:
+		reg->val = I915_READ64(reg->offset);
+		break;
+	case 4:
+		reg->val = I915_READ(reg->offset);
+		break;
+	case 2:
+		reg->val = I915_READ16(reg->offset);
+		break;
+	case 1:
+		reg->val = I915_READ8(reg->offset);
+		break;
+	default:
+		WARN_ON(1);
+		return -EINVAL;
+	}
+
+	return 0;
+}
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 627fe35781b4..4f2831aa5fed 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -109,6 +109,7 @@ struct intel_pch_pll {
 
 #define WATCH_COHERENCY	0
 #define WATCH_LISTS	0
+#define WATCH_GTT	0
 
 #define I915_GEM_PHYS_CURSOR_0 1
 #define I915_GEM_PHYS_CURSOR_1 2
@@ -195,9 +196,10 @@ struct drm_i915_error_state {
 	u32 cpu_ring_head[I915_NUM_RINGS];
 	u32 cpu_ring_tail[I915_NUM_RINGS];
 	u32 error; /* gen6+ */
+	u32 err_int; /* gen7 */
 	u32 instpm[I915_NUM_RINGS];
 	u32 instps[I915_NUM_RINGS];
-	u32 instdone1;
+	u32 extra_instdone[I915_NUM_INSTDONE_REG];
 	u32 seqno[I915_NUM_RINGS];
 	u64 bbaddr;
 	u32 fault_reg[I915_NUM_RINGS];
@@ -221,7 +223,7 @@ struct drm_i915_error_state {
 	struct drm_i915_error_buffer {
 		u32 size;
 		u32 name;
-		u32 seqno;
+		u32 rseqno, wseqno;
 		u32 gtt_offset;
 		u32 read_domains;
 		u32 write_domain;
@@ -239,7 +241,6 @@ struct drm_i915_error_state {
 };
 
 struct drm_i915_display_funcs {
-	void (*dpms)(struct drm_crtc *crtc, int mode);
 	bool (*fbc_enabled)(struct drm_device *dev);
 	void (*enable_fbc)(struct drm_crtc *crtc, unsigned long interval);
 	void (*disable_fbc)(struct drm_device *dev);
@@ -248,7 +249,6 @@ struct drm_i915_display_funcs {
 	void (*update_wm)(struct drm_device *dev);
 	void (*update_sprite_wm)(struct drm_device *dev, int pipe,
 				 uint32_t sprite_width, int pixel_size);
-	void (*sanitize_pm)(struct drm_device *dev);
 	void (*update_linetime_wm)(struct drm_device *dev, int pipe,
 				 struct drm_display_mode *mode);
 	int (*crtc_mode_set)(struct drm_crtc *crtc,
@@ -256,6 +256,8 @@ struct drm_i915_display_funcs {
 			     struct drm_display_mode *adjusted_mode,
 			     int x, int y,
 			     struct drm_framebuffer *old_fb);
+	void (*crtc_enable)(struct drm_crtc *crtc);
+	void (*crtc_disable)(struct drm_crtc *crtc);
 	void (*off)(struct drm_crtc *crtc);
 	void (*write_eld)(struct drm_connector *connector,
 			  struct drm_crtc *crtc);
@@ -279,6 +281,32 @@ struct drm_i915_gt_funcs {
 	void (*force_wake_put)(struct drm_i915_private *dev_priv);
 };
 
+#define DEV_INFO_FLAGS \
+	DEV_INFO_FLAG(is_mobile) DEV_INFO_SEP \
+	DEV_INFO_FLAG(is_i85x) DEV_INFO_SEP \
+	DEV_INFO_FLAG(is_i915g) DEV_INFO_SEP \
+	DEV_INFO_FLAG(is_i945gm) DEV_INFO_SEP \
+	DEV_INFO_FLAG(is_g33) DEV_INFO_SEP \
+	DEV_INFO_FLAG(need_gfx_hws) DEV_INFO_SEP \
+	DEV_INFO_FLAG(is_g4x) DEV_INFO_SEP \
+	DEV_INFO_FLAG(is_pineview) DEV_INFO_SEP \
+	DEV_INFO_FLAG(is_broadwater) DEV_INFO_SEP \
+	DEV_INFO_FLAG(is_crestline) DEV_INFO_SEP \
+	DEV_INFO_FLAG(is_ivybridge) DEV_INFO_SEP \
+	DEV_INFO_FLAG(is_valleyview) DEV_INFO_SEP \
+	DEV_INFO_FLAG(is_haswell) DEV_INFO_SEP \
+	DEV_INFO_FLAG(has_force_wake) DEV_INFO_SEP \
+	DEV_INFO_FLAG(has_fbc) DEV_INFO_SEP \
+	DEV_INFO_FLAG(has_pipe_cxsr) DEV_INFO_SEP \
+	DEV_INFO_FLAG(has_hotplug) DEV_INFO_SEP \
+	DEV_INFO_FLAG(cursor_needs_physical) DEV_INFO_SEP \
+	DEV_INFO_FLAG(has_overlay) DEV_INFO_SEP \
+	DEV_INFO_FLAG(overlay_needs_physical) DEV_INFO_SEP \
+	DEV_INFO_FLAG(supports_tv) DEV_INFO_SEP \
+	DEV_INFO_FLAG(has_bsd_ring) DEV_INFO_SEP \
+	DEV_INFO_FLAG(has_blt_ring) DEV_INFO_SEP \
+	DEV_INFO_FLAG(has_llc)
+
 struct intel_device_info {
 	u8 gen;
 	u8 is_mobile:1;
@@ -402,12 +430,6 @@ typedef struct drm_i915_private {
 
 	struct resource mch_res;
 
-	unsigned int cpp;
-	int back_offset;
-	int front_offset;
-	int current_page;
-	int page_flipping;
-
 	atomic_t irq_received;
 
 	/* protects the irq masks */
@@ -425,7 +447,6 @@ typedef struct drm_i915_private {
 	u32 hotplug_supported_mask;
 	struct work_struct hotplug_work;
 
-	unsigned int sr01, adpa, ppcr, dvob, dvoc, lvds;
 	int num_pipe;
 	int num_pch_pll;
 
@@ -434,8 +455,7 @@ typedef struct drm_i915_private {
 	struct timer_list hangcheck_timer;
 	int hangcheck_count;
 	uint32_t last_acthd[I915_NUM_RINGS];
-	uint32_t last_instdone;
-	uint32_t last_instdone1;
+	uint32_t prev_instdone[I915_NUM_INSTDONE_REG];
 
 	unsigned int stop_rings;
 
@@ -666,7 +686,13 @@ typedef struct drm_i915_private {
 		struct drm_mm gtt_space;
 		/** List of all objects in gtt_space. Used to restore gtt
 		 * mappings on resume */
-		struct list_head gtt_list;
+		struct list_head bound_list;
+		/**
+		 * List of objects which are not bound to the GTT (thus
+		 * are idle and not used by the GPU) but still have
+		 * (presumably uncached) pages still attached.
+		 */
+		struct list_head unbound_list;
 
 		/** Usable portion of the GTT for GEM */
 		unsigned long gtt_start;
@@ -696,17 +722,6 @@ typedef struct drm_i915_private {
 		struct list_head active_list;
 
 		/**
-		 * List of objects which are not in the ringbuffer but which
-		 * still have a write_domain which needs to be flushed before
-		 * unbinding.
-		 *
-		 * last_rendering_seqno is 0 while an object is in this list.
-		 *
-		 * A reference is held on the buffer while on this list.
-		 */
-		struct list_head flushing_list;
-
-		/**
 		 * LRU list of objects which are not in the ringbuffer and
 		 * are ready to unbind, but are still in the GTT.
 		 *
@@ -775,6 +790,12 @@ typedef struct drm_i915_private {
 	struct {
 		unsigned allow_batchbuffer : 1;
 		u32 __iomem *gfx_hws_cpu_addr;
+
+		unsigned int cpp;
+		int back_offset;
+		int front_offset;
+		int current_page;
+		int page_flipping;
 	} dri1;
 
 	/* Kernel Modesetting */
@@ -796,9 +817,6 @@ typedef struct drm_i915_private {
 	bool lvds_downclock_avail;
 	/* indicates the reduced downclock for LVDS*/
 	int lvds_downclock;
-	struct work_struct idle_work;
-	struct timer_list idle_timer;
-	bool busy;
 	u16 orig_clock;
 	int child_dev_num;
 	struct child_device_config *child_dev;
@@ -807,26 +825,41 @@ typedef struct drm_i915_private {
 
 	bool mchbar_need_disable;
 
-	struct work_struct rps_work;
-	spinlock_t rps_lock;
-	u32 pm_iir;
-
-	u8 cur_delay;
-	u8 min_delay;
-	u8 max_delay;
-	u8 fmax;
-	u8 fstart;
-
-	u64 last_count1;
-	unsigned long last_time1;
-	unsigned long chipset_power;
-	u64 last_count2;
-	struct timespec last_time2;
-	unsigned long gfx_power;
-	int c_m;
-	int r_t;
-	u8 corr;
-	spinlock_t *mchdev_lock;
+	/* gen6+ rps state */
+	struct {
+		struct work_struct work;
+		u32 pm_iir;
+		/* lock - irqsave spinlock that protectects the work_struct and
+		 * pm_iir. */
+		spinlock_t lock;
+
+		/* The below variables an all the rps hw state are protected by
+		 * dev->struct mutext. */
+		u8 cur_delay;
+		u8 min_delay;
+		u8 max_delay;
+	} rps;
+
+	/* ilk-only ips/rps state. Everything in here is protected by the global
+	 * mchdev_lock in intel_pm.c */
+	struct {
+		u8 cur_delay;
+		u8 min_delay;
+		u8 max_delay;
+		u8 fmax;
+		u8 fstart;
+
+		u64 last_count1;
+		unsigned long last_time1;
+		unsigned long chipset_power;
+		u64 last_count2;
+		struct timespec last_time2;
+		unsigned long gfx_power;
+		u8 corr;
+
+		int c_m;
+		int r_t;
+	} ips;
 
 	enum no_fbc_reason no_fbc_reason;
 
@@ -861,30 +894,48 @@ enum hdmi_force_audio {
 };
 
 enum i915_cache_level {
-	I915_CACHE_NONE,
+	I915_CACHE_NONE = 0,
 	I915_CACHE_LLC,
-	I915_CACHE_LLC_MLC, /* gen6+ */
+	I915_CACHE_LLC_MLC, /* gen6+, in docs at least! */
+};
+
+struct drm_i915_gem_object_ops {
+	/* Interface between the GEM object and its backing storage.
+	 * get_pages() is called once prior to the use of the associated set
+	 * of pages before to binding them into the GTT, and put_pages() is
+	 * called after we no longer need them. As we expect there to be
+	 * associated cost with migrating pages between the backing storage
+	 * and making them available for the GPU (e.g. clflush), we may hold
+	 * onto the pages after they are no longer referenced by the GPU
+	 * in case they may be used again shortly (for example migrating the
+	 * pages to a different memory domain within the GTT). put_pages()
+	 * will therefore most likely be called when the object itself is
+	 * being released or under memory pressure (where we attempt to
+	 * reap pages for the shrinker).
+	 */
+	int (*get_pages)(struct drm_i915_gem_object *);
+	void (*put_pages)(struct drm_i915_gem_object *);
 };
 
 struct drm_i915_gem_object {
 	struct drm_gem_object base;
 
+	const struct drm_i915_gem_object_ops *ops;
+
 	/** Current space allocated to this object in the GTT, if any. */
 	struct drm_mm_node *gtt_space;
 	struct list_head gtt_list;
 
-	/** This object's place on the active/flushing/inactive lists */
+	/** This object's place on the active/inactive lists */
 	struct list_head ring_list;
 	struct list_head mm_list;
-	/** This object's place on GPU write list */
-	struct list_head gpu_write_list;
 	/** This object's place in the batchbuffer or on the eviction list */
 	struct list_head exec_list;
 
 	/**
-	 * This is set if the object is on the active or flushing lists
-	 * (has pending rendering), and is not set if it's on inactive (ready
-	 * to be unbound).
+	 * This is set if the object is on the active lists (has pending
+	 * rendering and so a non-zero seqno), and is not set if it i s on
+	 * inactive (ready to be unbound) list.
 	 */
 	unsigned int active:1;
 
@@ -895,12 +946,6 @@ struct drm_i915_gem_object {
 	unsigned int dirty:1;
 
 	/**
-	 * This is set if the object has been written to since the last
-	 * GPU flush.
-	 */
-	unsigned int pending_gpu_write:1;
-
-	/**
 	 * Fence register bits (if any) for this object.  Will be set
 	 * as needed when mapped into the GTT.
 	 * Protected by dev->struct_mutex.
@@ -961,17 +1006,12 @@ struct drm_i915_gem_object {
 
 	unsigned int has_aliasing_ppgtt_mapping:1;
 	unsigned int has_global_gtt_mapping:1;
+	unsigned int has_dma_mapping:1;
 
-	struct page **pages;
-
-	/**
-	 * DMAR support
-	 */
-	struct scatterlist *sg_list;
-	int num_sg;
+	struct sg_table *pages;
+	int pages_pin_count;
 
 	/* prime dma-buf support */
-	struct sg_table *sg_table;
 	void *dma_buf_vmapping;
 	int vmapping_count;
 
@@ -992,7 +1032,8 @@ struct drm_i915_gem_object {
 	struct intel_ring_buffer *ring;
 
 	/** Breadcrumb of last rendering to the buffer. */
-	uint32_t last_rendering_seqno;
+	uint32_t last_read_seqno;
+	uint32_t last_write_seqno;
 	/** Breadcrumb of last fenced GPU access to the buffer. */
 	uint32_t last_fenced_seqno;
 
@@ -1135,6 +1176,10 @@ struct drm_i915_file_private {
 
 #define HAS_FORCE_WAKE(dev) (INTEL_INFO(dev)->has_force_wake)
 
+#define HAS_L3_GPU_CACHE(dev) (IS_IVYBRIDGE(dev) || IS_HASWELL(dev))
+
+#define GT_FREQUENCY_MULTIPLIER 50
+
 #include "i915_trace.h"
 
 /**
@@ -1256,6 +1301,10 @@ int i915_gem_unpin_ioctl(struct drm_device *dev, void *data,
 			 struct drm_file *file_priv);
 int i915_gem_busy_ioctl(struct drm_device *dev, void *data,
 			struct drm_file *file_priv);
+int i915_gem_get_caching_ioctl(struct drm_device *dev, void *data,
+			       struct drm_file *file);
+int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data,
+			       struct drm_file *file);
 int i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
 			    struct drm_file *file_priv);
 int i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
@@ -1274,24 +1323,42 @@ int i915_gem_wait_ioctl(struct drm_device *dev, void *data,
 			struct drm_file *file_priv);
 void i915_gem_load(struct drm_device *dev);
 int i915_gem_init_object(struct drm_gem_object *obj);
-int __must_check i915_gem_flush_ring(struct intel_ring_buffer *ring,
-				     uint32_t invalidate_domains,
-				     uint32_t flush_domains);
+void i915_gem_object_init(struct drm_i915_gem_object *obj,
+			 const struct drm_i915_gem_object_ops *ops);
 struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
 						  size_t size);
 void i915_gem_free_object(struct drm_gem_object *obj);
 int __must_check i915_gem_object_pin(struct drm_i915_gem_object *obj,
 				     uint32_t alignment,
-				     bool map_and_fenceable);
+				     bool map_and_fenceable,
+				     bool nonblocking);
 void i915_gem_object_unpin(struct drm_i915_gem_object *obj);
 int __must_check i915_gem_object_unbind(struct drm_i915_gem_object *obj);
 void i915_gem_release_mmap(struct drm_i915_gem_object *obj);
 void i915_gem_lastclose(struct drm_device *dev);
 
-int i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj,
-				  gfp_t gfpmask);
+int __must_check i915_gem_object_get_pages(struct drm_i915_gem_object *obj);
+static inline struct page *i915_gem_object_get_page(struct drm_i915_gem_object *obj, int n)
+{
+	struct scatterlist *sg = obj->pages->sgl;
+	while (n >= SG_MAX_SINGLE_ALLOC) {
+		sg = sg_chain_ptr(sg + SG_MAX_SINGLE_ALLOC - 1);
+		n -= SG_MAX_SINGLE_ALLOC - 1;
+	}
+	return sg_page(sg+n);
+}
+static inline void i915_gem_object_pin_pages(struct drm_i915_gem_object *obj)
+{
+	BUG_ON(obj->pages == NULL);
+	obj->pages_pin_count++;
+}
+static inline void i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj)
+{
+	BUG_ON(obj->pages_pin_count == 0);
+	obj->pages_pin_count--;
+}
+
 int __must_check i915_mutex_lock_interruptible(struct drm_device *dev);
-int __must_check i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj);
 int i915_gem_object_sync(struct drm_i915_gem_object *obj,
 			 struct intel_ring_buffer *to);
 void i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
@@ -1358,9 +1425,9 @@ void i915_gem_init_ppgtt(struct drm_device *dev);
 void i915_gem_cleanup_ringbuffer(struct drm_device *dev);
 int __must_check i915_gpu_idle(struct drm_device *dev);
 int __must_check i915_gem_idle(struct drm_device *dev);
-int __must_check i915_add_request(struct intel_ring_buffer *ring,
-				  struct drm_file *file,
-				  struct drm_i915_gem_request *request);
+int i915_add_request(struct intel_ring_buffer *ring,
+		     struct drm_file *file,
+		     struct drm_i915_gem_request *request);
 int __must_check i915_wait_seqno(struct intel_ring_buffer *ring,
 				 uint32_t seqno);
 int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
@@ -1429,8 +1496,11 @@ void i915_gem_init_global_gtt(struct drm_device *dev,
 
 /* i915_gem_evict.c */
 int __must_check i915_gem_evict_something(struct drm_device *dev, int min_size,
-					  unsigned alignment, bool mappable);
-int i915_gem_evict_everything(struct drm_device *dev, bool purgeable_only);
+					  unsigned alignment,
+					  unsigned cache_level,
+					  bool mappable,
+					  bool nonblock);
+int i915_gem_evict_everything(struct drm_device *dev);
 
 /* i915_gem_stolen.c */
 int i915_gem_init_stolen(struct drm_device *dev);
@@ -1519,6 +1589,7 @@ extern void intel_modeset_init(struct drm_device *dev);
 extern void intel_modeset_gem_init(struct drm_device *dev);
 extern void intel_modeset_cleanup(struct drm_device *dev);
 extern int intel_modeset_vga_set_state(struct drm_device *dev, bool state);
+extern void intel_modeset_setup_hw_state(struct drm_device *dev);
 extern bool intel_fbc_enabled(struct drm_device *dev);
 extern void intel_disable_fbc(struct drm_device *dev);
 extern bool ironlake_set_drps(struct drm_device *dev, u8 val);
@@ -1529,6 +1600,8 @@ extern int intel_trans_dp_port_sel(struct drm_crtc *crtc);
 extern int intel_enable_rc6(const struct drm_device *dev);
 
 extern bool i915_semaphore_is_enabled(struct drm_device *dev);
+int i915_reg_read_ioctl(struct drm_device *dev, void *data,
+			struct drm_file *file);
 
 /* overlay */
 #ifdef CONFIG_DEBUG_FS
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index e2c93f7be8ed..e957f3740f68 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -36,12 +36,12 @@
 #include <linux/pci.h>
 #include <linux/dma-buf.h>
 
-static __must_check int i915_gem_object_flush_gpu_write_domain(struct drm_i915_gem_object *obj);
 static void i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj);
 static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj);
 static __must_check int i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
 						    unsigned alignment,
-						    bool map_and_fenceable);
+						    bool map_and_fenceable,
+						    bool nonblocking);
 static int i915_gem_phys_pwrite(struct drm_device *dev,
 				struct drm_i915_gem_object *obj,
 				struct drm_i915_gem_pwrite *args,
@@ -55,6 +55,8 @@ static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj,
 
 static int i915_gem_inactive_shrink(struct shrinker *shrinker,
 				    struct shrink_control *sc);
+static long i915_gem_purge(struct drm_i915_private *dev_priv, long target);
+static void i915_gem_shrink_all(struct drm_i915_private *dev_priv);
 static void i915_gem_object_truncate(struct drm_i915_gem_object *obj);
 
 static inline void i915_gem_object_fence_lost(struct drm_i915_gem_object *obj)
@@ -140,7 +142,7 @@ int i915_mutex_lock_interruptible(struct drm_device *dev)
 static inline bool
 i915_gem_object_is_inactive(struct drm_i915_gem_object *obj)
 {
-	return !obj->active;
+	return obj->gtt_space && !obj->active;
 }
 
 int
@@ -179,7 +181,7 @@ i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
 
 	pinned = 0;
 	mutex_lock(&dev->struct_mutex);
-	list_for_each_entry(obj, &dev_priv->mm.gtt_list, gtt_list)
+	list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list)
 		if (obj->pin_count)
 			pinned += obj->gtt_space->size;
 	mutex_unlock(&dev->struct_mutex);
@@ -340,7 +342,7 @@ shmem_pread_fast(struct page *page, int shmem_page_offset, int page_length,
 				      page_length);
 	kunmap_atomic(vaddr);
 
-	return ret;
+	return ret ? -EFAULT : 0;
 }
 
 static void
@@ -391,7 +393,7 @@ shmem_pread_slow(struct page *page, int shmem_page_offset, int page_length,
 				     page_length);
 	kunmap(page);
 
-	return ret;
+	return ret ? - EFAULT : 0;
 }
 
 static int
@@ -400,7 +402,6 @@ i915_gem_shmem_pread(struct drm_device *dev,
 		     struct drm_i915_gem_pread *args,
 		     struct drm_file *file)
 {
-	struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
 	char __user *user_data;
 	ssize_t remain;
 	loff_t offset;
@@ -409,7 +410,8 @@ i915_gem_shmem_pread(struct drm_device *dev,
 	int hit_slowpath = 0;
 	int prefaulted = 0;
 	int needs_clflush = 0;
-	int release_page;
+	struct scatterlist *sg;
+	int i;
 
 	user_data = (char __user *) (uintptr_t) args->data_ptr;
 	remain = args->size;
@@ -423,16 +425,30 @@ i915_gem_shmem_pread(struct drm_device *dev,
 		 * anyway again before the next pread happens. */
 		if (obj->cache_level == I915_CACHE_NONE)
 			needs_clflush = 1;
-		ret = i915_gem_object_set_to_gtt_domain(obj, false);
-		if (ret)
-			return ret;
+		if (obj->gtt_space) {
+			ret = i915_gem_object_set_to_gtt_domain(obj, false);
+			if (ret)
+				return ret;
+		}
 	}
 
+	ret = i915_gem_object_get_pages(obj);
+	if (ret)
+		return ret;
+
+	i915_gem_object_pin_pages(obj);
+
 	offset = args->offset;
 
-	while (remain > 0) {
+	for_each_sg(obj->pages->sgl, sg, obj->pages->nents, i) {
 		struct page *page;
 
+		if (i < offset >> PAGE_SHIFT)
+			continue;
+
+		if (remain <= 0)
+			break;
+
 		/* Operation in this page
 		 *
 		 * shmem_page_offset = offset within page in shmem file
@@ -443,18 +459,7 @@ i915_gem_shmem_pread(struct drm_device *dev,
 		if ((shmem_page_offset + page_length) > PAGE_SIZE)
 			page_length = PAGE_SIZE - shmem_page_offset;
 
-		if (obj->pages) {
-			page = obj->pages[offset >> PAGE_SHIFT];
-			release_page = 0;
-		} else {
-			page = shmem_read_mapping_page(mapping, offset >> PAGE_SHIFT);
-			if (IS_ERR(page)) {
-				ret = PTR_ERR(page);
-				goto out;
-			}
-			release_page = 1;
-		}
-
+		page = sg_page(sg);
 		page_do_bit17_swizzling = obj_do_bit17_swizzling &&
 			(page_to_phys(page) & (1 << 17)) != 0;
 
@@ -465,7 +470,6 @@ i915_gem_shmem_pread(struct drm_device *dev,
 			goto next_page;
 
 		hit_slowpath = 1;
-		page_cache_get(page);
 		mutex_unlock(&dev->struct_mutex);
 
 		if (!prefaulted) {
@@ -483,16 +487,12 @@ i915_gem_shmem_pread(struct drm_device *dev,
 				       needs_clflush);
 
 		mutex_lock(&dev->struct_mutex);
-		page_cache_release(page);
+
 next_page:
 		mark_page_accessed(page);
-		if (release_page)
-			page_cache_release(page);
 
-		if (ret) {
-			ret = -EFAULT;
+		if (ret)
 			goto out;
-		}
 
 		remain -= page_length;
 		user_data += page_length;
@@ -500,6 +500,8 @@ next_page:
 	}
 
 out:
+	i915_gem_object_unpin_pages(obj);
+
 	if (hit_slowpath) {
 		/* Fixup: Kill any reinstated backing storage pages */
 		if (obj->madv == __I915_MADV_PURGED)
@@ -605,7 +607,7 @@ i915_gem_gtt_pwrite_fast(struct drm_device *dev,
 	char __user *user_data;
 	int page_offset, page_length, ret;
 
-	ret = i915_gem_object_pin(obj, 0, true);
+	ret = i915_gem_object_pin(obj, 0, true, true);
 	if (ret)
 		goto out;
 
@@ -685,7 +687,7 @@ shmem_pwrite_fast(struct page *page, int shmem_page_offset, int page_length,
 				       page_length);
 	kunmap_atomic(vaddr);
 
-	return ret;
+	return ret ? -EFAULT : 0;
 }
 
 /* Only difference to the fast-path function is that this can handle bit17
@@ -719,7 +721,7 @@ shmem_pwrite_slow(struct page *page, int shmem_page_offset, int page_length,
 					     page_do_bit17_swizzling);
 	kunmap(page);
 
-	return ret;
+	return ret ? -EFAULT : 0;
 }
 
 static int
@@ -728,7 +730,6 @@ i915_gem_shmem_pwrite(struct drm_device *dev,
 		      struct drm_i915_gem_pwrite *args,
 		      struct drm_file *file)
 {
-	struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
 	ssize_t remain;
 	loff_t offset;
 	char __user *user_data;
@@ -737,7 +738,8 @@ i915_gem_shmem_pwrite(struct drm_device *dev,
 	int hit_slowpath = 0;
 	int needs_clflush_after = 0;
 	int needs_clflush_before = 0;
-	int release_page;
+	int i;
+	struct scatterlist *sg;
 
 	user_data = (char __user *) (uintptr_t) args->data_ptr;
 	remain = args->size;
@@ -751,9 +753,11 @@ i915_gem_shmem_pwrite(struct drm_device *dev,
 		 * right away and we therefore have to clflush anyway. */
 		if (obj->cache_level == I915_CACHE_NONE)
 			needs_clflush_after = 1;
-		ret = i915_gem_object_set_to_gtt_domain(obj, true);
-		if (ret)
-			return ret;
+		if (obj->gtt_space) {
+			ret = i915_gem_object_set_to_gtt_domain(obj, true);
+			if (ret)
+				return ret;
+		}
 	}
 	/* Same trick applies for invalidate partially written cachelines before
 	 * writing.  */
@@ -761,13 +765,25 @@ i915_gem_shmem_pwrite(struct drm_device *dev,
 	    && obj->cache_level == I915_CACHE_NONE)
 		needs_clflush_before = 1;
 
+	ret = i915_gem_object_get_pages(obj);
+	if (ret)
+		return ret;
+
+	i915_gem_object_pin_pages(obj);
+
 	offset = args->offset;
 	obj->dirty = 1;
 
-	while (remain > 0) {
+	for_each_sg(obj->pages->sgl, sg, obj->pages->nents, i) {
 		struct page *page;
 		int partial_cacheline_write;
 
+		if (i < offset >> PAGE_SHIFT)
+			continue;
+
+		if (remain <= 0)
+			break;
+
 		/* Operation in this page
 		 *
 		 * shmem_page_offset = offset within page in shmem file
@@ -786,18 +802,7 @@ i915_gem_shmem_pwrite(struct drm_device *dev,
 			((shmem_page_offset | page_length)
 				& (boot_cpu_data.x86_clflush_size - 1));
 
-		if (obj->pages) {
-			page = obj->pages[offset >> PAGE_SHIFT];
-			release_page = 0;
-		} else {
-			page = shmem_read_mapping_page(mapping, offset >> PAGE_SHIFT);
-			if (IS_ERR(page)) {
-				ret = PTR_ERR(page);
-				goto out;
-			}
-			release_page = 1;
-		}
-
+		page = sg_page(sg);
 		page_do_bit17_swizzling = obj_do_bit17_swizzling &&
 			(page_to_phys(page) & (1 << 17)) != 0;
 
@@ -809,26 +814,20 @@ i915_gem_shmem_pwrite(struct drm_device *dev,
 			goto next_page;
 
 		hit_slowpath = 1;
-		page_cache_get(page);
 		mutex_unlock(&dev->struct_mutex);
-
 		ret = shmem_pwrite_slow(page, shmem_page_offset, page_length,
 					user_data, page_do_bit17_swizzling,
 					partial_cacheline_write,
 					needs_clflush_after);
 
 		mutex_lock(&dev->struct_mutex);
-		page_cache_release(page);
+
 next_page:
 		set_page_dirty(page);
 		mark_page_accessed(page);
-		if (release_page)
-			page_cache_release(page);
 
-		if (ret) {
-			ret = -EFAULT;
+		if (ret)
 			goto out;
-		}
 
 		remain -= page_length;
 		user_data += page_length;
@@ -836,6 +835,8 @@ next_page:
 	}
 
 out:
+	i915_gem_object_unpin_pages(obj);
+
 	if (hit_slowpath) {
 		/* Fixup: Kill any reinstated backing storage pages */
 		if (obj->madv == __I915_MADV_PURGED)
@@ -919,10 +920,8 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
 		goto out;
 	}
 
-	if (obj->gtt_space &&
-	    obj->cache_level == I915_CACHE_NONE &&
+	if (obj->cache_level == I915_CACHE_NONE &&
 	    obj->tiling_mode == I915_TILING_NONE &&
-	    obj->map_and_fenceable &&
 	    obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
 		ret = i915_gem_gtt_pwrite_fast(dev, obj, args, file);
 		/* Note that the gtt paths might fail with non-page-backed user
@@ -930,7 +929,7 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
 		 * textures). Fallback to the shmem path in that case. */
 	}
 
-	if (ret == -EFAULT)
+	if (ret == -EFAULT || ret == -ENOSPC)
 		ret = i915_gem_shmem_pwrite(dev, obj, args, file);
 
 out:
@@ -940,6 +939,240 @@ unlock:
 	return ret;
 }
 
+int
+i915_gem_check_wedge(struct drm_i915_private *dev_priv,
+		     bool interruptible)
+{
+	if (atomic_read(&dev_priv->mm.wedged)) {
+		struct completion *x = &dev_priv->error_completion;
+		bool recovery_complete;
+		unsigned long flags;
+
+		/* Give the error handler a chance to run. */
+		spin_lock_irqsave(&x->wait.lock, flags);
+		recovery_complete = x->done > 0;
+		spin_unlock_irqrestore(&x->wait.lock, flags);
+
+		/* Non-interruptible callers can't handle -EAGAIN, hence return
+		 * -EIO unconditionally for these. */
+		if (!interruptible)
+			return -EIO;
+
+		/* Recovery complete, but still wedged means reset failure. */
+		if (recovery_complete)
+			return -EIO;
+
+		return -EAGAIN;
+	}
+
+	return 0;
+}
+
+/*
+ * Compare seqno against outstanding lazy request. Emit a request if they are
+ * equal.
+ */
+static int
+i915_gem_check_olr(struct intel_ring_buffer *ring, u32 seqno)
+{
+	int ret;
+
+	BUG_ON(!mutex_is_locked(&ring->dev->struct_mutex));
+
+	ret = 0;
+	if (seqno == ring->outstanding_lazy_request)
+		ret = i915_add_request(ring, NULL, NULL);
+
+	return ret;
+}
+
+/**
+ * __wait_seqno - wait until execution of seqno has finished
+ * @ring: the ring expected to report seqno
+ * @seqno: duh!
+ * @interruptible: do an interruptible wait (normally yes)
+ * @timeout: in - how long to wait (NULL forever); out - how much time remaining
+ *
+ * Returns 0 if the seqno was found within the alloted time. Else returns the
+ * errno with remaining time filled in timeout argument.
+ */
+static int __wait_seqno(struct intel_ring_buffer *ring, u32 seqno,
+			bool interruptible, struct timespec *timeout)
+{
+	drm_i915_private_t *dev_priv = ring->dev->dev_private;
+	struct timespec before, now, wait_time={1,0};
+	unsigned long timeout_jiffies;
+	long end;
+	bool wait_forever = true;
+	int ret;
+
+	if (i915_seqno_passed(ring->get_seqno(ring, true), seqno))
+		return 0;
+
+	trace_i915_gem_request_wait_begin(ring, seqno);
+
+	if (timeout != NULL) {
+		wait_time = *timeout;
+		wait_forever = false;
+	}
+
+	timeout_jiffies = timespec_to_jiffies(&wait_time);
+
+	if (WARN_ON(!ring->irq_get(ring)))
+		return -ENODEV;
+
+	/* Record current time in case interrupted by signal, or wedged * */
+	getrawmonotonic(&before);
+
+#define EXIT_COND \
+	(i915_seqno_passed(ring->get_seqno(ring, false), seqno) || \
+	atomic_read(&dev_priv->mm.wedged))
+	do {
+		if (interruptible)
+			end = wait_event_interruptible_timeout(ring->irq_queue,
+							       EXIT_COND,
+							       timeout_jiffies);
+		else
+			end = wait_event_timeout(ring->irq_queue, EXIT_COND,
+						 timeout_jiffies);
+
+		ret = i915_gem_check_wedge(dev_priv, interruptible);
+		if (ret)
+			end = ret;
+	} while (end == 0 && wait_forever);
+
+	getrawmonotonic(&now);
+
+	ring->irq_put(ring);
+	trace_i915_gem_request_wait_end(ring, seqno);
+#undef EXIT_COND
+
+	if (timeout) {
+		struct timespec sleep_time = timespec_sub(now, before);
+		*timeout = timespec_sub(*timeout, sleep_time);
+	}
+
+	switch (end) {
+	case -EIO:
+	case -EAGAIN: /* Wedged */
+	case -ERESTARTSYS: /* Signal */
+		return (int)end;
+	case 0: /* Timeout */
+		if (timeout)
+			set_normalized_timespec(timeout, 0, 0);
+		return -ETIME;
+	default: /* Completed */
+		WARN_ON(end < 0); /* We're not aware of other errors */
+		return 0;
+	}
+}
+
+/**
+ * Waits for a sequence number to be signaled, and cleans up the
+ * request and object lists appropriately for that event.
+ */
+int
+i915_wait_seqno(struct intel_ring_buffer *ring, uint32_t seqno)
+{
+	struct drm_device *dev = ring->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	bool interruptible = dev_priv->mm.interruptible;
+	int ret;
+
+	BUG_ON(!mutex_is_locked(&dev->struct_mutex));
+	BUG_ON(seqno == 0);
+
+	ret = i915_gem_check_wedge(dev_priv, interruptible);
+	if (ret)
+		return ret;
+
+	ret = i915_gem_check_olr(ring, seqno);
+	if (ret)
+		return ret;
+
+	return __wait_seqno(ring, seqno, interruptible, NULL);
+}
+
+/**
+ * Ensures that all rendering to the object has completed and the object is
+ * safe to unbind from the GTT or access from the CPU.
+ */
+static __must_check int
+i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
+			       bool readonly)
+{
+	struct intel_ring_buffer *ring = obj->ring;
+	u32 seqno;
+	int ret;
+
+	seqno = readonly ? obj->last_write_seqno : obj->last_read_seqno;
+	if (seqno == 0)
+		return 0;
+
+	ret = i915_wait_seqno(ring, seqno);
+	if (ret)
+		return ret;
+
+	i915_gem_retire_requests_ring(ring);
+
+	/* Manually manage the write flush as we may have not yet
+	 * retired the buffer.
+	 */
+	if (obj->last_write_seqno &&
+	    i915_seqno_passed(seqno, obj->last_write_seqno)) {
+		obj->last_write_seqno = 0;
+		obj->base.write_domain &= ~I915_GEM_GPU_DOMAINS;
+	}
+
+	return 0;
+}
+
+/* A nonblocking variant of the above wait. This is a highly dangerous routine
+ * as the object state may change during this call.
+ */
+static __must_check int
+i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj,
+					    bool readonly)
+{
+	struct drm_device *dev = obj->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_ring_buffer *ring = obj->ring;
+	u32 seqno;
+	int ret;
+
+	BUG_ON(!mutex_is_locked(&dev->struct_mutex));
+	BUG_ON(!dev_priv->mm.interruptible);
+
+	seqno = readonly ? obj->last_write_seqno : obj->last_read_seqno;
+	if (seqno == 0)
+		return 0;
+
+	ret = i915_gem_check_wedge(dev_priv, true);
+	if (ret)
+		return ret;
+
+	ret = i915_gem_check_olr(ring, seqno);
+	if (ret)
+		return ret;
+
+	mutex_unlock(&dev->struct_mutex);
+	ret = __wait_seqno(ring, seqno, true, NULL);
+	mutex_lock(&dev->struct_mutex);
+
+	i915_gem_retire_requests_ring(ring);
+
+	/* Manually manage the write flush as we may have not yet
+	 * retired the buffer.
+	 */
+	if (obj->last_write_seqno &&
+	    i915_seqno_passed(seqno, obj->last_write_seqno)) {
+		obj->last_write_seqno = 0;
+		obj->base.write_domain &= ~I915_GEM_GPU_DOMAINS;
+	}
+
+	return ret;
+}
+
 /**
  * Called when user space prepares to use an object with the CPU, either
  * through the mmap ioctl's mapping or a GTT mapping.
@@ -977,6 +1210,14 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
 		goto unlock;
 	}
 
+	/* Try to flush the object off the GPU without holding the lock.
+	 * We will repeat the flush holding the lock in the normal manner
+	 * to catch cases where we are gazumped.
+	 */
+	ret = i915_gem_object_wait_rendering__nonblocking(obj, !write_domain);
+	if (ret)
+		goto unref;
+
 	if (read_domains & I915_GEM_DOMAIN_GTT) {
 		ret = i915_gem_object_set_to_gtt_domain(obj, write_domain != 0);
 
@@ -990,6 +1231,7 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
 		ret = i915_gem_object_set_to_cpu_domain(obj, write_domain != 0);
 	}
 
+unref:
 	drm_gem_object_unreference(&obj->base);
 unlock:
 	mutex_unlock(&dev->struct_mutex);
@@ -1109,7 +1351,7 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
 			goto unlock;
 	}
 	if (!obj->gtt_space) {
-		ret = i915_gem_object_bind_to_gtt(obj, 0, true);
+		ret = i915_gem_object_bind_to_gtt(obj, 0, true, false);
 		if (ret)
 			goto unlock;
 
@@ -1270,6 +1512,42 @@ i915_gem_get_unfenced_gtt_alignment(struct drm_device *dev,
 	return i915_gem_get_gtt_size(dev, size, tiling_mode);
 }
 
+static int i915_gem_object_create_mmap_offset(struct drm_i915_gem_object *obj)
+{
+	struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
+	int ret;
+
+	if (obj->base.map_list.map)
+		return 0;
+
+	ret = drm_gem_create_mmap_offset(&obj->base);
+	if (ret != -ENOSPC)
+		return ret;
+
+	/* Badly fragmented mmap space? The only way we can recover
+	 * space is by destroying unwanted objects. We can't randomly release
+	 * mmap_offsets as userspace expects them to be persistent for the
+	 * lifetime of the objects. The closest we can is to release the
+	 * offsets on purgeable objects by truncating it and marking it purged,
+	 * which prevents userspace from ever using that object again.
+	 */
+	i915_gem_purge(dev_priv, obj->base.size >> PAGE_SHIFT);
+	ret = drm_gem_create_mmap_offset(&obj->base);
+	if (ret != -ENOSPC)
+		return ret;
+
+	i915_gem_shrink_all(dev_priv);
+	return drm_gem_create_mmap_offset(&obj->base);
+}
+
+static void i915_gem_object_free_mmap_offset(struct drm_i915_gem_object *obj)
+{
+	if (!obj->base.map_list.map)
+		return;
+
+	drm_gem_free_mmap_offset(&obj->base);
+}
+
 int
 i915_gem_mmap_gtt(struct drm_file *file,
 		  struct drm_device *dev,
@@ -1301,11 +1579,9 @@ i915_gem_mmap_gtt(struct drm_file *file,
 		goto out;
 	}
 
-	if (!obj->base.map_list.map) {
-		ret = drm_gem_create_mmap_offset(&obj->base);
-		if (ret)
-			goto out;
-	}
+	ret = i915_gem_object_create_mmap_offset(obj);
+	if (ret)
+		goto out;
 
 	*offset = (u64)obj->base.map_list.hash.key << PAGE_SHIFT;
 
@@ -1340,83 +1616,245 @@ i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
 	return i915_gem_mmap_gtt(file, dev, args->handle, &args->offset);
 }
 
-int
-i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj,
-			      gfp_t gfpmask)
+/* Immediately discard the backing storage */
+static void
+i915_gem_object_truncate(struct drm_i915_gem_object *obj)
 {
-	int page_count, i;
-	struct address_space *mapping;
 	struct inode *inode;
-	struct page *page;
 
-	if (obj->pages || obj->sg_table)
-		return 0;
+	i915_gem_object_free_mmap_offset(obj);
 
-	/* Get the list of pages out of our struct file.  They'll be pinned
-	 * at this point until we release them.
-	 */
-	page_count = obj->base.size / PAGE_SIZE;
-	BUG_ON(obj->pages != NULL);
-	obj->pages = drm_malloc_ab(page_count, sizeof(struct page *));
-	if (obj->pages == NULL)
-		return -ENOMEM;
+	if (obj->base.filp == NULL)
+		return;
 
+	/* Our goal here is to return as much of the memory as
+	 * is possible back to the system as we are called from OOM.
+	 * To do this we must instruct the shmfs to drop all of its
+	 * backing pages, *now*.
+	 */
 	inode = obj->base.filp->f_path.dentry->d_inode;
-	mapping = inode->i_mapping;
-	gfpmask |= mapping_gfp_mask(mapping);
-
-	for (i = 0; i < page_count; i++) {
-		page = shmem_read_mapping_page_gfp(mapping, i, gfpmask);
-		if (IS_ERR(page))
-			goto err_pages;
-
-		obj->pages[i] = page;
-	}
-
-	if (i915_gem_object_needs_bit17_swizzle(obj))
-		i915_gem_object_do_bit_17_swizzle(obj);
-
-	return 0;
+	shmem_truncate_range(inode, 0, (loff_t)-1);
 
-err_pages:
-	while (i--)
-		page_cache_release(obj->pages[i]);
+	obj->madv = __I915_MADV_PURGED;
+}
 
-	drm_free_large(obj->pages);
-	obj->pages = NULL;
-	return PTR_ERR(page);
+static inline int
+i915_gem_object_is_purgeable(struct drm_i915_gem_object *obj)
+{
+	return obj->madv == I915_MADV_DONTNEED;
 }
 
 static void
 i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj)
 {
 	int page_count = obj->base.size / PAGE_SIZE;
-	int i;
-
-	if (!obj->pages)
-		return;
+	struct scatterlist *sg;
+	int ret, i;
 
 	BUG_ON(obj->madv == __I915_MADV_PURGED);
 
+	ret = i915_gem_object_set_to_cpu_domain(obj, true);
+	if (ret) {
+		/* In the event of a disaster, abandon all caches and
+		 * hope for the best.
+		 */
+		WARN_ON(ret != -EIO);
+		i915_gem_clflush_object(obj);
+		obj->base.read_domains = obj->base.write_domain = I915_GEM_DOMAIN_CPU;
+	}
+
 	if (i915_gem_object_needs_bit17_swizzle(obj))
 		i915_gem_object_save_bit_17_swizzle(obj);
 
 	if (obj->madv == I915_MADV_DONTNEED)
 		obj->dirty = 0;
 
-	for (i = 0; i < page_count; i++) {
+	for_each_sg(obj->pages->sgl, sg, page_count, i) {
+		struct page *page = sg_page(sg);
+
 		if (obj->dirty)
-			set_page_dirty(obj->pages[i]);
+			set_page_dirty(page);
 
 		if (obj->madv == I915_MADV_WILLNEED)
-			mark_page_accessed(obj->pages[i]);
+			mark_page_accessed(page);
 
-		page_cache_release(obj->pages[i]);
+		page_cache_release(page);
 	}
 	obj->dirty = 0;
 
-	drm_free_large(obj->pages);
+	sg_free_table(obj->pages);
+	kfree(obj->pages);
+}
+
+static int
+i915_gem_object_put_pages(struct drm_i915_gem_object *obj)
+{
+	const struct drm_i915_gem_object_ops *ops = obj->ops;
+
+	if (obj->pages == NULL)
+		return 0;
+
+	BUG_ON(obj->gtt_space);
+
+	if (obj->pages_pin_count)
+		return -EBUSY;
+
+	ops->put_pages(obj);
 	obj->pages = NULL;
+
+	list_del(&obj->gtt_list);
+	if (i915_gem_object_is_purgeable(obj))
+		i915_gem_object_truncate(obj);
+
+	return 0;
+}
+
+static long
+i915_gem_purge(struct drm_i915_private *dev_priv, long target)
+{
+	struct drm_i915_gem_object *obj, *next;
+	long count = 0;
+
+	list_for_each_entry_safe(obj, next,
+				 &dev_priv->mm.unbound_list,
+				 gtt_list) {
+		if (i915_gem_object_is_purgeable(obj) &&
+		    i915_gem_object_put_pages(obj) == 0) {
+			count += obj->base.size >> PAGE_SHIFT;
+			if (count >= target)
+				return count;
+		}
+	}
+
+	list_for_each_entry_safe(obj, next,
+				 &dev_priv->mm.inactive_list,
+				 mm_list) {
+		if (i915_gem_object_is_purgeable(obj) &&
+		    i915_gem_object_unbind(obj) == 0 &&
+		    i915_gem_object_put_pages(obj) == 0) {
+			count += obj->base.size >> PAGE_SHIFT;
+			if (count >= target)
+				return count;
+		}
+	}
+
+	return count;
+}
+
+static void
+i915_gem_shrink_all(struct drm_i915_private *dev_priv)
+{
+	struct drm_i915_gem_object *obj, *next;
+
+	i915_gem_evict_everything(dev_priv->dev);
+
+	list_for_each_entry_safe(obj, next, &dev_priv->mm.unbound_list, gtt_list)
+		i915_gem_object_put_pages(obj);
+}
+
+static int
+i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
+{
+	struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
+	int page_count, i;
+	struct address_space *mapping;
+	struct sg_table *st;
+	struct scatterlist *sg;
+	struct page *page;
+	gfp_t gfp;
+
+	/* Assert that the object is not currently in any GPU domain. As it
+	 * wasn't in the GTT, there shouldn't be any way it could have been in
+	 * a GPU cache
+	 */
+	BUG_ON(obj->base.read_domains & I915_GEM_GPU_DOMAINS);
+	BUG_ON(obj->base.write_domain & I915_GEM_GPU_DOMAINS);
+
+	st = kmalloc(sizeof(*st), GFP_KERNEL);
+	if (st == NULL)
+		return -ENOMEM;
+
+	page_count = obj->base.size / PAGE_SIZE;
+	if (sg_alloc_table(st, page_count, GFP_KERNEL)) {
+		sg_free_table(st);
+		kfree(st);
+		return -ENOMEM;
+	}
+
+	/* Get the list of pages out of our struct file.  They'll be pinned
+	 * at this point until we release them.
+	 *
+	 * Fail silently without starting the shrinker
+	 */
+	mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
+	gfp = mapping_gfp_mask(mapping);
+	gfp |= __GFP_NORETRY | __GFP_NOWARN;
+	gfp &= ~(__GFP_IO | __GFP_WAIT);
+	for_each_sg(st->sgl, sg, page_count, i) {
+		page = shmem_read_mapping_page_gfp(mapping, i, gfp);
+		if (IS_ERR(page)) {
+			i915_gem_purge(dev_priv, page_count);
+			page = shmem_read_mapping_page_gfp(mapping, i, gfp);
+		}
+		if (IS_ERR(page)) {
+			/* We've tried hard to allocate the memory by reaping
+			 * our own buffer, now let the real VM do its job and
+			 * go down in flames if truly OOM.
+			 */
+			gfp &= ~(__GFP_NORETRY | __GFP_NOWARN);
+			gfp |= __GFP_IO | __GFP_WAIT;
+
+			i915_gem_shrink_all(dev_priv);
+			page = shmem_read_mapping_page_gfp(mapping, i, gfp);
+			if (IS_ERR(page))
+				goto err_pages;
+
+			gfp |= __GFP_NORETRY | __GFP_NOWARN;
+			gfp &= ~(__GFP_IO | __GFP_WAIT);
+		}
+
+		sg_set_page(sg, page, PAGE_SIZE, 0);
+	}
+
+	if (i915_gem_object_needs_bit17_swizzle(obj))
+		i915_gem_object_do_bit_17_swizzle(obj);
+
+	obj->pages = st;
+	return 0;
+
+err_pages:
+	for_each_sg(st->sgl, sg, i, page_count)
+		page_cache_release(sg_page(sg));
+	sg_free_table(st);
+	kfree(st);
+	return PTR_ERR(page);
+}
+
+/* Ensure that the associated pages are gathered from the backing storage
+ * and pinned into our object. i915_gem_object_get_pages() may be called
+ * multiple times before they are released by a single call to
+ * i915_gem_object_put_pages() - once the pages are no longer referenced
+ * either as a result of memory pressure (reaping pages under the shrinker)
+ * or as the object is itself released.
+ */
+int
+i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
+{
+	struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
+	const struct drm_i915_gem_object_ops *ops = obj->ops;
+	int ret;
+
+	if (obj->pages)
+		return 0;
+
+	BUG_ON(obj->pages_pin_count);
+
+	ret = ops->get_pages(obj);
+	if (ret)
+		return ret;
+
+	list_add_tail(&obj->gtt_list, &dev_priv->mm.unbound_list);
+	return 0;
 }
 
 void
@@ -1440,7 +1878,7 @@ i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
 	list_move_tail(&obj->mm_list, &dev_priv->mm.active_list);
 	list_move_tail(&obj->ring_list, &ring->active_list);
 
-	obj->last_rendering_seqno = seqno;
+	obj->last_read_seqno = seqno;
 
 	if (obj->fenced_gpu_access) {
 		obj->last_fenced_seqno = seqno;
@@ -1457,97 +1895,35 @@ i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
 }
 
 static void
-i915_gem_object_move_off_active(struct drm_i915_gem_object *obj)
-{
-	list_del_init(&obj->ring_list);
-	obj->last_rendering_seqno = 0;
-	obj->last_fenced_seqno = 0;
-}
-
-static void
-i915_gem_object_move_to_flushing(struct drm_i915_gem_object *obj)
+i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj)
 {
 	struct drm_device *dev = obj->base.dev;
-	drm_i915_private_t *dev_priv = dev->dev_private;
+	struct drm_i915_private *dev_priv = dev->dev_private;
 
+	BUG_ON(obj->base.write_domain & ~I915_GEM_GPU_DOMAINS);
 	BUG_ON(!obj->active);
-	list_move_tail(&obj->mm_list, &dev_priv->mm.flushing_list);
-
-	i915_gem_object_move_off_active(obj);
-}
 
-static void
-i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj)
-{
-	struct drm_device *dev = obj->base.dev;
-	struct drm_i915_private *dev_priv = dev->dev_private;
+	if (obj->pin_count) /* are we a framebuffer? */
+		intel_mark_fb_idle(obj);
 
 	list_move_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
 
-	BUG_ON(!list_empty(&obj->gpu_write_list));
-	BUG_ON(!obj->active);
+	list_del_init(&obj->ring_list);
 	obj->ring = NULL;
 
-	i915_gem_object_move_off_active(obj);
+	obj->last_read_seqno = 0;
+	obj->last_write_seqno = 0;
+	obj->base.write_domain = 0;
+
+	obj->last_fenced_seqno = 0;
 	obj->fenced_gpu_access = false;
 
 	obj->active = 0;
-	obj->pending_gpu_write = false;
 	drm_gem_object_unreference(&obj->base);
 
 	WARN_ON(i915_verify_lists(dev));
 }
 
-/* Immediately discard the backing storage */
-static void
-i915_gem_object_truncate(struct drm_i915_gem_object *obj)
-{
-	struct inode *inode;
-
-	/* Our goal here is to return as much of the memory as
-	 * is possible back to the system as we are called from OOM.
-	 * To do this we must instruct the shmfs to drop all of its
-	 * backing pages, *now*.
-	 */
-	inode = obj->base.filp->f_path.dentry->d_inode;
-	shmem_truncate_range(inode, 0, (loff_t)-1);
-
-	if (obj->base.map_list.map)
-		drm_gem_free_mmap_offset(&obj->base);
-
-	obj->madv = __I915_MADV_PURGED;
-}
-
-static inline int
-i915_gem_object_is_purgeable(struct drm_i915_gem_object *obj)
-{
-	return obj->madv == I915_MADV_DONTNEED;
-}
-
-static void
-i915_gem_process_flushing_list(struct intel_ring_buffer *ring,
-			       uint32_t flush_domains)
-{
-	struct drm_i915_gem_object *obj, *next;
-
-	list_for_each_entry_safe(obj, next,
-				 &ring->gpu_write_list,
-				 gpu_write_list) {
-		if (obj->base.write_domain & flush_domains) {
-			uint32_t old_write_domain = obj->base.write_domain;
-
-			obj->base.write_domain = 0;
-			list_del_init(&obj->gpu_write_list);
-			i915_gem_object_move_to_active(obj, ring,
-						       i915_gem_next_request_seqno(ring));
-
-			trace_i915_gem_object_change_domain(obj,
-							    obj->base.read_domains,
-							    old_write_domain);
-		}
-	}
-}
-
 static u32
 i915_gem_get_seqno(struct drm_device *dev)
 {
@@ -1588,15 +1964,16 @@ i915_add_request(struct intel_ring_buffer *ring,
 	 * is that the flush _must_ happen before the next request, no matter
 	 * what.
 	 */
-	if (ring->gpu_caches_dirty) {
-		ret = i915_gem_flush_ring(ring, 0, I915_GEM_GPU_DOMAINS);
-		if (ret)
-			return ret;
+	ret = intel_ring_flush_all_caches(ring);
+	if (ret)
+		return ret;
 
-		ring->gpu_caches_dirty = false;
+	if (request == NULL) {
+		request = kmalloc(sizeof(*request), GFP_KERNEL);
+		if (request == NULL)
+			return -ENOMEM;
 	}
 
-	BUG_ON(request == NULL);
 	seqno = i915_gem_next_request_seqno(ring);
 
 	/* Record the position of the start of the request so that
@@ -1607,8 +1984,10 @@ i915_add_request(struct intel_ring_buffer *ring,
 	request_ring_position = intel_ring_get_tail(ring);
 
 	ret = ring->add_request(ring, &seqno);
-	if (ret)
-	    return ret;
+	if (ret) {
+		kfree(request);
+		return ret;
+	}
 
 	trace_i915_gem_request_add(ring, seqno);
 
@@ -1618,6 +1997,7 @@ i915_add_request(struct intel_ring_buffer *ring,
 	request->emitted_jiffies = jiffies;
 	was_empty = list_empty(&ring->request_list);
 	list_add_tail(&request->list, &ring->request_list);
+	request->file_priv = NULL;
 
 	if (file) {
 		struct drm_i915_file_private *file_priv = file->driver_priv;
@@ -1637,13 +2017,13 @@ i915_add_request(struct intel_ring_buffer *ring,
 				  jiffies +
 				  msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD));
 		}
-		if (was_empty)
+		if (was_empty) {
 			queue_delayed_work(dev_priv->wq,
 					   &dev_priv->mm.retire_work, HZ);
+			intel_mark_busy(dev_priv->dev);
+		}
 	}
 
-	WARN_ON(!list_empty(&ring->gpu_write_list));
-
 	return 0;
 }
 
@@ -1685,8 +2065,6 @@ static void i915_gem_reset_ring_lists(struct drm_i915_private *dev_priv,
 				       struct drm_i915_gem_object,
 				       ring_list);
 
-		obj->base.write_domain = 0;
-		list_del_init(&obj->gpu_write_list);
 		i915_gem_object_move_to_inactive(obj);
 	}
 }
@@ -1722,20 +2100,6 @@ void i915_gem_reset(struct drm_device *dev)
 	for_each_ring(ring, dev_priv, i)
 		i915_gem_reset_ring_lists(dev_priv, ring);
 
-	/* Remove anything from the flushing lists. The GPU cache is likely
-	 * to be lost on reset along with the data, so simply move the
-	 * lost bo to the inactive list.
-	 */
-	while (!list_empty(&dev_priv->mm.flushing_list)) {
-		obj = list_first_entry(&dev_priv->mm.flushing_list,
-				      struct drm_i915_gem_object,
-				      mm_list);
-
-		obj->base.write_domain = 0;
-		list_del_init(&obj->gpu_write_list);
-		i915_gem_object_move_to_inactive(obj);
-	}
-
 	/* Move everything out of the GPU domains to ensure we do any
 	 * necessary invalidation upon reuse.
 	 */
@@ -1764,7 +2128,7 @@ i915_gem_retire_requests_ring(struct intel_ring_buffer *ring)
 
 	WARN_ON(i915_verify_lists(ring->dev));
 
-	seqno = ring->get_seqno(ring);
+	seqno = ring->get_seqno(ring, true);
 
 	for (i = 0; i < ARRAY_SIZE(ring->sync_seqno); i++)
 		if (seqno >= ring->sync_seqno[i])
@@ -1803,13 +2167,10 @@ i915_gem_retire_requests_ring(struct intel_ring_buffer *ring)
 				      struct drm_i915_gem_object,
 				      ring_list);
 
-		if (!i915_seqno_passed(seqno, obj->last_rendering_seqno))
+		if (!i915_seqno_passed(seqno, obj->last_read_seqno))
 			break;
 
-		if (obj->base.write_domain != 0)
-			i915_gem_object_move_to_flushing(obj);
-		else
-			i915_gem_object_move_to_inactive(obj);
+		i915_gem_object_move_to_inactive(obj);
 	}
 
 	if (unlikely(ring->trace_irq_seqno &&
@@ -1858,216 +2219,20 @@ i915_gem_retire_work_handler(struct work_struct *work)
 	 */
 	idle = true;
 	for_each_ring(ring, dev_priv, i) {
-		if (ring->gpu_caches_dirty) {
-			struct drm_i915_gem_request *request;
-
-			request = kzalloc(sizeof(*request), GFP_KERNEL);
-			if (request == NULL ||
-			    i915_add_request(ring, NULL, request))
-			    kfree(request);
-		}
+		if (ring->gpu_caches_dirty)
+			i915_add_request(ring, NULL, NULL);
 
 		idle &= list_empty(&ring->request_list);
 	}
 
 	if (!dev_priv->mm.suspended && !idle)
 		queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ);
+	if (idle)
+		intel_mark_idle(dev);
 
 	mutex_unlock(&dev->struct_mutex);
 }
 
-int
-i915_gem_check_wedge(struct drm_i915_private *dev_priv,
-		     bool interruptible)
-{
-	if (atomic_read(&dev_priv->mm.wedged)) {
-		struct completion *x = &dev_priv->error_completion;
-		bool recovery_complete;
-		unsigned long flags;
-
-		/* Give the error handler a chance to run. */
-		spin_lock_irqsave(&x->wait.lock, flags);
-		recovery_complete = x->done > 0;
-		spin_unlock_irqrestore(&x->wait.lock, flags);
-
-		/* Non-interruptible callers can't handle -EAGAIN, hence return
-		 * -EIO unconditionally for these. */
-		if (!interruptible)
-			return -EIO;
-
-		/* Recovery complete, but still wedged means reset failure. */
-		if (recovery_complete)
-			return -EIO;
-
-		return -EAGAIN;
-	}
-
-	return 0;
-}
-
-/*
- * Compare seqno against outstanding lazy request. Emit a request if they are
- * equal.
- */
-static int
-i915_gem_check_olr(struct intel_ring_buffer *ring, u32 seqno)
-{
-	int ret = 0;
-
-	BUG_ON(!mutex_is_locked(&ring->dev->struct_mutex));
-
-	if (seqno == ring->outstanding_lazy_request) {
-		struct drm_i915_gem_request *request;
-
-		request = kzalloc(sizeof(*request), GFP_KERNEL);
-		if (request == NULL)
-			return -ENOMEM;
-
-		ret = i915_add_request(ring, NULL, request);
-		if (ret) {
-			kfree(request);
-			return ret;
-		}
-
-		BUG_ON(seqno != request->seqno);
-	}
-
-	return ret;
-}
-
-/**
- * __wait_seqno - wait until execution of seqno has finished
- * @ring: the ring expected to report seqno
- * @seqno: duh!
- * @interruptible: do an interruptible wait (normally yes)
- * @timeout: in - how long to wait (NULL forever); out - how much time remaining
- *
- * Returns 0 if the seqno was found within the alloted time. Else returns the
- * errno with remaining time filled in timeout argument.
- */
-static int __wait_seqno(struct intel_ring_buffer *ring, u32 seqno,
-			bool interruptible, struct timespec *timeout)
-{
-	drm_i915_private_t *dev_priv = ring->dev->dev_private;
-	struct timespec before, now, wait_time={1,0};
-	unsigned long timeout_jiffies;
-	long end;
-	bool wait_forever = true;
-	int ret;
-
-	if (i915_seqno_passed(ring->get_seqno(ring), seqno))
-		return 0;
-
-	trace_i915_gem_request_wait_begin(ring, seqno);
-
-	if (timeout != NULL) {
-		wait_time = *timeout;
-		wait_forever = false;
-	}
-
-	timeout_jiffies = timespec_to_jiffies(&wait_time);
-
-	if (WARN_ON(!ring->irq_get(ring)))
-		return -ENODEV;
-
-	/* Record current time in case interrupted by signal, or wedged * */
-	getrawmonotonic(&before);
-
-#define EXIT_COND \
-	(i915_seqno_passed(ring->get_seqno(ring), seqno) || \
-	atomic_read(&dev_priv->mm.wedged))
-	do {
-		if (interruptible)
-			end = wait_event_interruptible_timeout(ring->irq_queue,
-							       EXIT_COND,
-							       timeout_jiffies);
-		else
-			end = wait_event_timeout(ring->irq_queue, EXIT_COND,
-						 timeout_jiffies);
-
-		ret = i915_gem_check_wedge(dev_priv, interruptible);
-		if (ret)
-			end = ret;
-	} while (end == 0 && wait_forever);
-
-	getrawmonotonic(&now);
-
-	ring->irq_put(ring);
-	trace_i915_gem_request_wait_end(ring, seqno);
-#undef EXIT_COND
-
-	if (timeout) {
-		struct timespec sleep_time = timespec_sub(now, before);
-		*timeout = timespec_sub(*timeout, sleep_time);
-	}
-
-	switch (end) {
-	case -EIO:
-	case -EAGAIN: /* Wedged */
-	case -ERESTARTSYS: /* Signal */
-		return (int)end;
-	case 0: /* Timeout */
-		if (timeout)
-			set_normalized_timespec(timeout, 0, 0);
-		return -ETIME;
-	default: /* Completed */
-		WARN_ON(end < 0); /* We're not aware of other errors */
-		return 0;
-	}
-}
-
-/**
- * Waits for a sequence number to be signaled, and cleans up the
- * request and object lists appropriately for that event.
- */
-int
-i915_wait_seqno(struct intel_ring_buffer *ring, uint32_t seqno)
-{
-	drm_i915_private_t *dev_priv = ring->dev->dev_private;
-	int ret = 0;
-
-	BUG_ON(seqno == 0);
-
-	ret = i915_gem_check_wedge(dev_priv, dev_priv->mm.interruptible);
-	if (ret)
-		return ret;
-
-	ret = i915_gem_check_olr(ring, seqno);
-	if (ret)
-		return ret;
-
-	ret = __wait_seqno(ring, seqno, dev_priv->mm.interruptible, NULL);
-
-	return ret;
-}
-
-/**
- * Ensures that all rendering to the object has completed and the object is
- * safe to unbind from the GTT or access from the CPU.
- */
-int
-i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj)
-{
-	int ret;
-
-	/* This function only exists to support waiting for existing rendering,
-	 * not for emitting required flushes.
-	 */
-	BUG_ON((obj->base.write_domain & I915_GEM_GPU_DOMAINS) != 0);
-
-	/* If there is rendering queued on the buffer being evicted, wait for
-	 * it.
-	 */
-	if (obj->active) {
-		ret = i915_wait_seqno(obj->ring, obj->last_rendering_seqno);
-		if (ret)
-			return ret;
-		i915_gem_retire_requests_ring(obj->ring);
-	}
-
-	return 0;
-}
-
 /**
  * Ensures that an object will eventually get non-busy by flushing any required
  * write domains, emitting any outstanding lazy request and retiring and
@@ -2079,14 +2244,10 @@ i915_gem_object_flush_active(struct drm_i915_gem_object *obj)
 	int ret;
 
 	if (obj->active) {
-		ret = i915_gem_object_flush_gpu_write_domain(obj);
+		ret = i915_gem_check_olr(obj->ring, obj->last_read_seqno);
 		if (ret)
 			return ret;
 
-		ret = i915_gem_check_olr(obj->ring,
-					 obj->last_rendering_seqno);
-		if (ret)
-			return ret;
 		i915_gem_retire_requests_ring(obj->ring);
 	}
 
@@ -2146,7 +2307,7 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
 		goto out;
 
 	if (obj->active) {
-		seqno = obj->last_rendering_seqno;
+		seqno = obj->last_read_seqno;
 		ring = obj->ring;
 	}
 
@@ -2201,11 +2362,11 @@ i915_gem_object_sync(struct drm_i915_gem_object *obj,
 		return 0;
 
 	if (to == NULL || !i915_semaphore_is_enabled(obj->base.dev))
-		return i915_gem_object_wait_rendering(obj);
+		return i915_gem_object_wait_rendering(obj, false);
 
 	idx = intel_ring_sync_index(from, to);
 
-	seqno = obj->last_rendering_seqno;
+	seqno = obj->last_read_seqno;
 	if (seqno <= from->sync_seqno[idx])
 		return 0;
 
@@ -2259,6 +2420,8 @@ i915_gem_object_unbind(struct drm_i915_gem_object *obj)
 	if (obj->pin_count)
 		return -EBUSY;
 
+	BUG_ON(obj->pages == NULL);
+
 	ret = i915_gem_object_finish_gpu(obj);
 	if (ret)
 		return ret;
@@ -2269,22 +2432,6 @@ i915_gem_object_unbind(struct drm_i915_gem_object *obj)
 
 	i915_gem_object_finish_gtt(obj);
 
-	/* Move the object to the CPU domain to ensure that
-	 * any possible CPU writes while it's not in the GTT
-	 * are flushed when we go to remap it.
-	 */
-	if (ret == 0)
-		ret = i915_gem_object_set_to_cpu_domain(obj, 1);
-	if (ret == -ERESTARTSYS)
-		return ret;
-	if (ret) {
-		/* In the event of a disaster, abandon all caches and
-		 * hope for the best.
-		 */
-		i915_gem_clflush_object(obj);
-		obj->base.read_domains = obj->base.write_domain = I915_GEM_DOMAIN_CPU;
-	}
-
 	/* release the fence reg _after_ flushing */
 	ret = i915_gem_object_put_fence(obj);
 	if (ret)
@@ -2300,10 +2447,8 @@ i915_gem_object_unbind(struct drm_i915_gem_object *obj)
 	}
 	i915_gem_gtt_finish_object(obj);
 
-	i915_gem_object_put_pages_gtt(obj);
-
-	list_del_init(&obj->gtt_list);
-	list_del_init(&obj->mm_list);
+	list_del(&obj->mm_list);
+	list_move_tail(&obj->gtt_list, &dev_priv->mm.unbound_list);
 	/* Avoid an unnecessary call to unbind on rebind. */
 	obj->map_and_fenceable = true;
 
@@ -2311,48 +2456,14 @@ i915_gem_object_unbind(struct drm_i915_gem_object *obj)
 	obj->gtt_space = NULL;
 	obj->gtt_offset = 0;
 
-	if (i915_gem_object_is_purgeable(obj))
-		i915_gem_object_truncate(obj);
-
-	return ret;
-}
-
-int
-i915_gem_flush_ring(struct intel_ring_buffer *ring,
-		    uint32_t invalidate_domains,
-		    uint32_t flush_domains)
-{
-	int ret;
-
-	if (((invalidate_domains | flush_domains) & I915_GEM_GPU_DOMAINS) == 0)
-		return 0;
-
-	trace_i915_gem_ring_flush(ring, invalidate_domains, flush_domains);
-
-	ret = ring->flush(ring, invalidate_domains, flush_domains);
-	if (ret)
-		return ret;
-
-	if (flush_domains & I915_GEM_GPU_DOMAINS)
-		i915_gem_process_flushing_list(ring, flush_domains);
-
 	return 0;
 }
 
 static int i915_ring_idle(struct intel_ring_buffer *ring)
 {
-	int ret;
-
-	if (list_empty(&ring->gpu_write_list) && list_empty(&ring->active_list))
+	if (list_empty(&ring->active_list))
 		return 0;
 
-	if (!list_empty(&ring->gpu_write_list)) {
-		ret = i915_gem_flush_ring(ring,
-				    I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
-		if (ret)
-			return ret;
-	}
-
 	return i915_wait_seqno(ring, i915_gem_next_request_seqno(ring));
 }
 
@@ -2371,10 +2482,6 @@ int i915_gpu_idle(struct drm_device *dev)
 		ret = i915_ring_idle(ring);
 		if (ret)
 			return ret;
-
-		/* Is the device fubar? */
-		if (WARN_ON(!list_empty(&ring->gpu_write_list)))
-			return -EBUSY;
 	}
 
 	return 0;
@@ -2547,21 +2654,8 @@ static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj,
 static int
 i915_gem_object_flush_fence(struct drm_i915_gem_object *obj)
 {
-	int ret;
-
-	if (obj->fenced_gpu_access) {
-		if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) {
-			ret = i915_gem_flush_ring(obj->ring,
-						  0, obj->base.write_domain);
-			if (ret)
-				return ret;
-		}
-
-		obj->fenced_gpu_access = false;
-	}
-
 	if (obj->last_fenced_seqno) {
-		ret = i915_wait_seqno(obj->ring, obj->last_fenced_seqno);
+		int ret = i915_wait_seqno(obj->ring, obj->last_fenced_seqno);
 		if (ret)
 			return ret;
 
@@ -2574,6 +2668,7 @@ i915_gem_object_flush_fence(struct drm_i915_gem_object *obj)
 	if (obj->base.read_domains & I915_GEM_DOMAIN_GTT)
 		mb();
 
+	obj->fenced_gpu_access = false;
 	return 0;
 }
 
@@ -2693,18 +2788,88 @@ i915_gem_object_get_fence(struct drm_i915_gem_object *obj)
 	return 0;
 }
 
+static bool i915_gem_valid_gtt_space(struct drm_device *dev,
+				     struct drm_mm_node *gtt_space,
+				     unsigned long cache_level)
+{
+	struct drm_mm_node *other;
+
+	/* On non-LLC machines we have to be careful when putting differing
+	 * types of snoopable memory together to avoid the prefetcher
+	 * crossing memory domains and dieing.
+	 */
+	if (HAS_LLC(dev))
+		return true;
+
+	if (gtt_space == NULL)
+		return true;
+
+	if (list_empty(&gtt_space->node_list))
+		return true;
+
+	other = list_entry(gtt_space->node_list.prev, struct drm_mm_node, node_list);
+	if (other->allocated && !other->hole_follows && other->color != cache_level)
+		return false;
+
+	other = list_entry(gtt_space->node_list.next, struct drm_mm_node, node_list);
+	if (other->allocated && !gtt_space->hole_follows && other->color != cache_level)
+		return false;
+
+	return true;
+}
+
+static void i915_gem_verify_gtt(struct drm_device *dev)
+{
+#if WATCH_GTT
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct drm_i915_gem_object *obj;
+	int err = 0;
+
+	list_for_each_entry(obj, &dev_priv->mm.gtt_list, gtt_list) {
+		if (obj->gtt_space == NULL) {
+			printk(KERN_ERR "object found on GTT list with no space reserved\n");
+			err++;
+			continue;
+		}
+
+		if (obj->cache_level != obj->gtt_space->color) {
+			printk(KERN_ERR "object reserved space [%08lx, %08lx] with wrong color, cache_level=%x, color=%lx\n",
+			       obj->gtt_space->start,
+			       obj->gtt_space->start + obj->gtt_space->size,
+			       obj->cache_level,
+			       obj->gtt_space->color);
+			err++;
+			continue;
+		}
+
+		if (!i915_gem_valid_gtt_space(dev,
+					      obj->gtt_space,
+					      obj->cache_level)) {
+			printk(KERN_ERR "invalid GTT space found at [%08lx, %08lx] - color=%x\n",
+			       obj->gtt_space->start,
+			       obj->gtt_space->start + obj->gtt_space->size,
+			       obj->cache_level);
+			err++;
+			continue;
+		}
+	}
+
+	WARN_ON(err);
+#endif
+}
+
 /**
  * Finds free space in the GTT aperture and binds the object there.
  */
 static int
 i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
 			    unsigned alignment,
-			    bool map_and_fenceable)
+			    bool map_and_fenceable,
+			    bool nonblocking)
 {
 	struct drm_device *dev = obj->base.dev;
 	drm_i915_private_t *dev_priv = dev->dev_private;
 	struct drm_mm_node *free_space;
-	gfp_t gfpmask = __GFP_NORETRY | __GFP_NOWARN;
 	u32 size, fence_size, fence_alignment, unfenced_alignment;
 	bool mappable, fenceable;
 	int ret;
@@ -2744,89 +2909,67 @@ i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
 		return -E2BIG;
 	}
 
+	ret = i915_gem_object_get_pages(obj);
+	if (ret)
+		return ret;
+
  search_free:
 	if (map_and_fenceable)
 		free_space =
-			drm_mm_search_free_in_range(&dev_priv->mm.gtt_space,
-						    size, alignment,
-						    0, dev_priv->mm.gtt_mappable_end,
-						    0);
+			drm_mm_search_free_in_range_color(&dev_priv->mm.gtt_space,
+							  size, alignment, obj->cache_level,
+							  0, dev_priv->mm.gtt_mappable_end,
+							  false);
 	else
-		free_space = drm_mm_search_free(&dev_priv->mm.gtt_space,
-						size, alignment, 0);
+		free_space = drm_mm_search_free_color(&dev_priv->mm.gtt_space,
+						      size, alignment, obj->cache_level,
+						      false);
 
 	if (free_space != NULL) {
 		if (map_and_fenceable)
 			obj->gtt_space =
 				drm_mm_get_block_range_generic(free_space,
-							       size, alignment, 0,
+							       size, alignment, obj->cache_level,
 							       0, dev_priv->mm.gtt_mappable_end,
-							       0);
+							       false);
 		else
 			obj->gtt_space =
-				drm_mm_get_block(free_space, size, alignment);
+				drm_mm_get_block_generic(free_space,
+							 size, alignment, obj->cache_level,
+							 false);
 	}
 	if (obj->gtt_space == NULL) {
-		/* If the gtt is empty and we're still having trouble
-		 * fitting our object in, we're out of memory.
-		 */
 		ret = i915_gem_evict_something(dev, size, alignment,
-					       map_and_fenceable);
+					       obj->cache_level,
+					       map_and_fenceable,
+					       nonblocking);
 		if (ret)
 			return ret;
 
 		goto search_free;
 	}
-
-	ret = i915_gem_object_get_pages_gtt(obj, gfpmask);
-	if (ret) {
+	if (WARN_ON(!i915_gem_valid_gtt_space(dev,
+					      obj->gtt_space,
+					      obj->cache_level))) {
 		drm_mm_put_block(obj->gtt_space);
 		obj->gtt_space = NULL;
-
-		if (ret == -ENOMEM) {
-			/* first try to reclaim some memory by clearing the GTT */
-			ret = i915_gem_evict_everything(dev, false);
-			if (ret) {
-				/* now try to shrink everyone else */
-				if (gfpmask) {
-					gfpmask = 0;
-					goto search_free;
-				}
-
-				return -ENOMEM;
-			}
-
-			goto search_free;
-		}
-
-		return ret;
+		return -EINVAL;
 	}
 
+
 	ret = i915_gem_gtt_prepare_object(obj);
 	if (ret) {
-		i915_gem_object_put_pages_gtt(obj);
 		drm_mm_put_block(obj->gtt_space);
 		obj->gtt_space = NULL;
-
-		if (i915_gem_evict_everything(dev, false))
-			return ret;
-
-		goto search_free;
+		return ret;
 	}
 
 	if (!dev_priv->mm.aliasing_ppgtt)
 		i915_gem_gtt_bind_object(obj, obj->cache_level);
 
-	list_add_tail(&obj->gtt_list, &dev_priv->mm.gtt_list);
+	list_move_tail(&obj->gtt_list, &dev_priv->mm.bound_list);
 	list_add_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
 
-	/* Assert that the object is not currently in any GPU domain. As it
-	 * wasn't in the GTT, there shouldn't be any way it could have been in
-	 * a GPU cache
-	 */
-	BUG_ON(obj->base.read_domains & I915_GEM_GPU_DOMAINS);
-	BUG_ON(obj->base.write_domain & I915_GEM_GPU_DOMAINS);
-
 	obj->gtt_offset = obj->gtt_space->start;
 
 	fenceable =
@@ -2839,6 +2982,7 @@ i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
 	obj->map_and_fenceable = mappable && fenceable;
 
 	trace_i915_gem_object_bind(obj, map_and_fenceable);
+	i915_gem_verify_gtt(dev);
 	return 0;
 }
 
@@ -2865,18 +3009,7 @@ i915_gem_clflush_object(struct drm_i915_gem_object *obj)
 
 	trace_i915_gem_object_clflush(obj);
 
-	drm_clflush_pages(obj->pages, obj->base.size / PAGE_SIZE);
-}
-
-/** Flushes any GPU write domain for the object if it's dirty. */
-static int
-i915_gem_object_flush_gpu_write_domain(struct drm_i915_gem_object *obj)
-{
-	if ((obj->base.write_domain & I915_GEM_GPU_DOMAINS) == 0)
-		return 0;
-
-	/* Queue the GPU write cache flushing we need. */
-	return i915_gem_flush_ring(obj->ring, 0, obj->base.write_domain);
+	drm_clflush_sg(obj->pages);
 }
 
 /** Flushes the GTT write domain for the object if it's dirty. */
@@ -2945,16 +3078,10 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
 	if (obj->base.write_domain == I915_GEM_DOMAIN_GTT)
 		return 0;
 
-	ret = i915_gem_object_flush_gpu_write_domain(obj);
+	ret = i915_gem_object_wait_rendering(obj, !write);
 	if (ret)
 		return ret;
 
-	if (obj->pending_gpu_write || write) {
-		ret = i915_gem_object_wait_rendering(obj);
-		if (ret)
-			return ret;
-	}
-
 	i915_gem_object_flush_cpu_write_domain(obj);
 
 	old_write_domain = obj->base.write_domain;
@@ -2997,6 +3124,12 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
 		return -EBUSY;
 	}
 
+	if (!i915_gem_valid_gtt_space(dev, obj->gtt_space, cache_level)) {
+		ret = i915_gem_object_unbind(obj);
+		if (ret)
+			return ret;
+	}
+
 	if (obj->gtt_space) {
 		ret = i915_gem_object_finish_gpu(obj);
 		if (ret)
@@ -3008,7 +3141,7 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
 		 * registers with snooped memory, so relinquish any fences
 		 * currently pointing to our region in the aperture.
 		 */
-		if (INTEL_INFO(obj->base.dev)->gen < 6) {
+		if (INTEL_INFO(dev)->gen < 6) {
 			ret = i915_gem_object_put_fence(obj);
 			if (ret)
 				return ret;
@@ -3019,6 +3152,8 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
 		if (obj->has_aliasing_ppgtt_mapping)
 			i915_ppgtt_bind_object(dev_priv->mm.aliasing_ppgtt,
 					       obj, cache_level);
+
+		obj->gtt_space->color = cache_level;
 	}
 
 	if (cache_level == I915_CACHE_NONE) {
@@ -3045,9 +3180,72 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
 	}
 
 	obj->cache_level = cache_level;
+	i915_gem_verify_gtt(dev);
 	return 0;
 }
 
+int i915_gem_get_caching_ioctl(struct drm_device *dev, void *data,
+			       struct drm_file *file)
+{
+	struct drm_i915_gem_caching *args = data;
+	struct drm_i915_gem_object *obj;
+	int ret;
+
+	ret = i915_mutex_lock_interruptible(dev);
+	if (ret)
+		return ret;
+
+	obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
+	if (&obj->base == NULL) {
+		ret = -ENOENT;
+		goto unlock;
+	}
+
+	args->caching = obj->cache_level != I915_CACHE_NONE;
+
+	drm_gem_object_unreference(&obj->base);
+unlock:
+	mutex_unlock(&dev->struct_mutex);
+	return ret;
+}
+
+int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data,
+			       struct drm_file *file)
+{
+	struct drm_i915_gem_caching *args = data;
+	struct drm_i915_gem_object *obj;
+	enum i915_cache_level level;
+	int ret;
+
+	ret = i915_mutex_lock_interruptible(dev);
+	if (ret)
+		return ret;
+
+	switch (args->caching) {
+	case I915_CACHING_NONE:
+		level = I915_CACHE_NONE;
+		break;
+	case I915_CACHING_CACHED:
+		level = I915_CACHE_LLC;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
+	if (&obj->base == NULL) {
+		ret = -ENOENT;
+		goto unlock;
+	}
+
+	ret = i915_gem_object_set_cache_level(obj, level);
+
+	drm_gem_object_unreference(&obj->base);
+unlock:
+	mutex_unlock(&dev->struct_mutex);
+	return ret;
+}
+
 /*
  * Prepare buffer for display plane (scanout, cursors, etc).
  * Can be called from an uninterruptible phase (modesetting) and allows
@@ -3061,10 +3259,6 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
 	u32 old_read_domains, old_write_domain;
 	int ret;
 
-	ret = i915_gem_object_flush_gpu_write_domain(obj);
-	if (ret)
-		return ret;
-
 	if (pipelined != obj->ring) {
 		ret = i915_gem_object_sync(obj, pipelined);
 		if (ret)
@@ -3088,7 +3282,7 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
 	 * (e.g. libkms for the bootup splash), we have to ensure that we
 	 * always use map_and_fenceable for all scanout buffers.
 	 */
-	ret = i915_gem_object_pin(obj, alignment, true);
+	ret = i915_gem_object_pin(obj, alignment, true, false);
 	if (ret)
 		return ret;
 
@@ -3100,7 +3294,7 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
 	/* It should now be out of any other write domains, and we can update
 	 * the domain values for our changes.
 	 */
-	BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
+	obj->base.write_domain = 0;
 	obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
 
 	trace_i915_gem_object_change_domain(obj,
@@ -3118,13 +3312,7 @@ i915_gem_object_finish_gpu(struct drm_i915_gem_object *obj)
 	if ((obj->base.read_domains & I915_GEM_GPU_DOMAINS) == 0)
 		return 0;
 
-	if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) {
-		ret = i915_gem_flush_ring(obj->ring, 0, obj->base.write_domain);
-		if (ret)
-			return ret;
-	}
-
-	ret = i915_gem_object_wait_rendering(obj);
+	ret = i915_gem_object_wait_rendering(obj, false);
 	if (ret)
 		return ret;
 
@@ -3148,16 +3336,10 @@ i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write)
 	if (obj->base.write_domain == I915_GEM_DOMAIN_CPU)
 		return 0;
 
-	ret = i915_gem_object_flush_gpu_write_domain(obj);
+	ret = i915_gem_object_wait_rendering(obj, !write);
 	if (ret)
 		return ret;
 
-	if (write || obj->pending_gpu_write) {
-		ret = i915_gem_object_wait_rendering(obj);
-		if (ret)
-			return ret;
-	}
-
 	i915_gem_object_flush_gtt_write_domain(obj);
 
 	old_write_domain = obj->base.write_domain;
@@ -3237,7 +3419,8 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
 int
 i915_gem_object_pin(struct drm_i915_gem_object *obj,
 		    uint32_t alignment,
-		    bool map_and_fenceable)
+		    bool map_and_fenceable,
+		    bool nonblocking)
 {
 	int ret;
 
@@ -3262,7 +3445,8 @@ i915_gem_object_pin(struct drm_i915_gem_object *obj,
 
 	if (obj->gtt_space == NULL) {
 		ret = i915_gem_object_bind_to_gtt(obj, alignment,
-						  map_and_fenceable);
+						  map_and_fenceable,
+						  nonblocking);
 		if (ret)
 			return ret;
 	}
@@ -3320,7 +3504,7 @@ i915_gem_pin_ioctl(struct drm_device *dev, void *data,
 	obj->user_pin_count++;
 	obj->pin_filp = file;
 	if (obj->user_pin_count == 1) {
-		ret = i915_gem_object_pin(obj, args->alignment, true);
+		ret = i915_gem_object_pin(obj, args->alignment, true, false);
 		if (ret)
 			goto out;
 	}
@@ -3400,6 +3584,10 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data,
 	ret = i915_gem_object_flush_active(obj);
 
 	args->busy = obj->active;
+	if (obj->ring) {
+		BUILD_BUG_ON(I915_NUM_RINGS > 16);
+		args->busy |= intel_ring_flag(obj->ring) << 16;
+	}
 
 	drm_gem_object_unreference(&obj->base);
 unlock:
@@ -3448,9 +3636,8 @@ i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
 	if (obj->madv != __I915_MADV_PURGED)
 		obj->madv = args->madv;
 
-	/* if the object is no longer bound, discard its backing storage */
-	if (i915_gem_object_is_purgeable(obj) &&
-	    obj->gtt_space == NULL)
+	/* if the object is no longer attached, discard its backing storage */
+	if (i915_gem_object_is_purgeable(obj) && obj->pages == NULL)
 		i915_gem_object_truncate(obj);
 
 	args->retained = obj->madv != __I915_MADV_PURGED;
@@ -3462,10 +3649,32 @@ unlock:
 	return ret;
 }
 
+void i915_gem_object_init(struct drm_i915_gem_object *obj,
+			  const struct drm_i915_gem_object_ops *ops)
+{
+	INIT_LIST_HEAD(&obj->mm_list);
+	INIT_LIST_HEAD(&obj->gtt_list);
+	INIT_LIST_HEAD(&obj->ring_list);
+	INIT_LIST_HEAD(&obj->exec_list);
+
+	obj->ops = ops;
+
+	obj->fence_reg = I915_FENCE_REG_NONE;
+	obj->madv = I915_MADV_WILLNEED;
+	/* Avoid an unnecessary call to unbind on the first bind. */
+	obj->map_and_fenceable = true;
+
+	i915_gem_info_add_obj(obj->base.dev->dev_private, obj->base.size);
+}
+
+static const struct drm_i915_gem_object_ops i915_gem_object_ops = {
+	.get_pages = i915_gem_object_get_pages_gtt,
+	.put_pages = i915_gem_object_put_pages_gtt,
+};
+
 struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
 						  size_t size)
 {
-	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct drm_i915_gem_object *obj;
 	struct address_space *mapping;
 	u32 mask;
@@ -3489,7 +3698,7 @@ struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
 	mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
 	mapping_set_gfp_mask(mapping, mask);
 
-	i915_gem_info_add_obj(dev_priv, size);
+	i915_gem_object_init(obj, &i915_gem_object_ops);
 
 	obj->base.write_domain = I915_GEM_DOMAIN_CPU;
 	obj->base.read_domains = I915_GEM_DOMAIN_CPU;
@@ -3511,17 +3720,6 @@ struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
 	} else
 		obj->cache_level = I915_CACHE_NONE;
 
-	obj->base.driver_private = NULL;
-	obj->fence_reg = I915_FENCE_REG_NONE;
-	INIT_LIST_HEAD(&obj->mm_list);
-	INIT_LIST_HEAD(&obj->gtt_list);
-	INIT_LIST_HEAD(&obj->ring_list);
-	INIT_LIST_HEAD(&obj->exec_list);
-	INIT_LIST_HEAD(&obj->gpu_write_list);
-	obj->madv = I915_MADV_WILLNEED;
-	/* Avoid an unnecessary call to unbind on the first bind. */
-	obj->map_and_fenceable = true;
-
 	return obj;
 }
 
@@ -3540,9 +3738,6 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj)
 
 	trace_i915_gem_object_destroy(obj);
 
-	if (gem_obj->import_attach)
-		drm_prime_gem_destroy(gem_obj, obj->sg_table);
-
 	if (obj->phys_obj)
 		i915_gem_detach_phys_object(dev, obj);
 
@@ -3558,8 +3753,14 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj)
 		dev_priv->mm.interruptible = was_interruptible;
 	}
 
-	if (obj->base.map_list.map)
-		drm_gem_free_mmap_offset(&obj->base);
+	obj->pages_pin_count = 0;
+	i915_gem_object_put_pages(obj);
+	i915_gem_object_free_mmap_offset(obj);
+
+	BUG_ON(obj->pages);
+
+	if (obj->base.import_attach)
+		drm_prime_gem_destroy(&obj->base, NULL);
 
 	drm_gem_object_release(&obj->base);
 	i915_gem_info_remove_obj(dev_priv, obj->base.size);
@@ -3590,7 +3791,7 @@ i915_gem_idle(struct drm_device *dev)
 
 	/* Under UMS, be paranoid and evict. */
 	if (!drm_core_check_feature(dev, DRIVER_MODESET))
-		i915_gem_evict_everything(dev, false);
+		i915_gem_evict_everything(dev);
 
 	i915_gem_reset_fences(dev);
 
@@ -3891,7 +4092,6 @@ i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
 	}
 
 	BUG_ON(!list_empty(&dev_priv->mm.active_list));
-	BUG_ON(!list_empty(&dev_priv->mm.flushing_list));
 	BUG_ON(!list_empty(&dev_priv->mm.inactive_list));
 	mutex_unlock(&dev->struct_mutex);
 
@@ -3939,7 +4139,6 @@ init_ring_lists(struct intel_ring_buffer *ring)
 {
 	INIT_LIST_HEAD(&ring->active_list);
 	INIT_LIST_HEAD(&ring->request_list);
-	INIT_LIST_HEAD(&ring->gpu_write_list);
 }
 
 void
@@ -3949,10 +4148,10 @@ i915_gem_load(struct drm_device *dev)
 	drm_i915_private_t *dev_priv = dev->dev_private;
 
 	INIT_LIST_HEAD(&dev_priv->mm.active_list);
-	INIT_LIST_HEAD(&dev_priv->mm.flushing_list);
 	INIT_LIST_HEAD(&dev_priv->mm.inactive_list);
+	INIT_LIST_HEAD(&dev_priv->mm.unbound_list);
+	INIT_LIST_HEAD(&dev_priv->mm.bound_list);
 	INIT_LIST_HEAD(&dev_priv->mm.fence_list);
-	INIT_LIST_HEAD(&dev_priv->mm.gtt_list);
 	for (i = 0; i < I915_NUM_RINGS; i++)
 		init_ring_lists(&dev_priv->ring[i]);
 	for (i = 0; i < I915_MAX_NUM_FENCES; i++)
@@ -4197,18 +4396,6 @@ void i915_gem_release(struct drm_device *dev, struct drm_file *file)
 }
 
 static int
-i915_gpu_is_active(struct drm_device *dev)
-{
-	drm_i915_private_t *dev_priv = dev->dev_private;
-	int lists_empty;
-
-	lists_empty = list_empty(&dev_priv->mm.flushing_list) &&
-		      list_empty(&dev_priv->mm.active_list);
-
-	return !lists_empty;
-}
-
-static int
 i915_gem_inactive_shrink(struct shrinker *shrinker, struct shrink_control *sc)
 {
 	struct drm_i915_private *dev_priv =
@@ -4216,60 +4403,27 @@ i915_gem_inactive_shrink(struct shrinker *shrinker, struct shrink_control *sc)
 			     struct drm_i915_private,
 			     mm.inactive_shrinker);
 	struct drm_device *dev = dev_priv->dev;
-	struct drm_i915_gem_object *obj, *next;
+	struct drm_i915_gem_object *obj;
 	int nr_to_scan = sc->nr_to_scan;
 	int cnt;
 
 	if (!mutex_trylock(&dev->struct_mutex))
 		return 0;
 
-	/* "fast-path" to count number of available objects */
-	if (nr_to_scan == 0) {
-		cnt = 0;
-		list_for_each_entry(obj,
-				    &dev_priv->mm.inactive_list,
-				    mm_list)
-			cnt++;
-		mutex_unlock(&dev->struct_mutex);
-		return cnt / 100 * sysctl_vfs_cache_pressure;
-	}
-
-rescan:
-	/* first scan for clean buffers */
-	i915_gem_retire_requests(dev);
-
-	list_for_each_entry_safe(obj, next,
-				 &dev_priv->mm.inactive_list,
-				 mm_list) {
-		if (i915_gem_object_is_purgeable(obj)) {
-			if (i915_gem_object_unbind(obj) == 0 &&
-			    --nr_to_scan == 0)
-				break;
-		}
+	if (nr_to_scan) {
+		nr_to_scan -= i915_gem_purge(dev_priv, nr_to_scan);
+		if (nr_to_scan > 0)
+			i915_gem_shrink_all(dev_priv);
 	}
 
-	/* second pass, evict/count anything still on the inactive list */
 	cnt = 0;
-	list_for_each_entry_safe(obj, next,
-				 &dev_priv->mm.inactive_list,
-				 mm_list) {
-		if (nr_to_scan &&
-		    i915_gem_object_unbind(obj) == 0)
-			nr_to_scan--;
-		else
-			cnt++;
-	}
+	list_for_each_entry(obj, &dev_priv->mm.unbound_list, gtt_list)
+		if (obj->pages_pin_count == 0)
+			cnt += obj->base.size >> PAGE_SHIFT;
+	list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list)
+		if (obj->pin_count == 0 && obj->pages_pin_count == 0)
+			cnt += obj->base.size >> PAGE_SHIFT;
 
-	if (nr_to_scan && i915_gpu_is_active(dev)) {
-		/*
-		 * We are desperate for pages, so as a last resort, wait
-		 * for the GPU to finish and discard whatever we can.
-		 * This has a dramatic impact to reduce the number of
-		 * OOM-killer events whilst running the GPU aggressively.
-		 */
-		if (i915_gpu_idle(dev) == 0)
-			goto rescan;
-	}
 	mutex_unlock(&dev->struct_mutex);
-	return cnt / 100 * sysctl_vfs_cache_pressure;
+	return cnt;
 }
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index a21c3dccf436..1eb48faf741b 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -97,8 +97,7 @@
 
 static struct i915_hw_context *
 i915_gem_context_get(struct drm_i915_file_private *file_priv, u32 id);
-static int do_switch(struct drm_i915_gem_object *from_obj,
-		     struct i915_hw_context *to, u32 seqno);
+static int do_switch(struct i915_hw_context *to);
 
 static int get_context_size(struct drm_device *dev)
 {
@@ -113,7 +112,10 @@ static int get_context_size(struct drm_device *dev)
 		break;
 	case 7:
 		reg = I915_READ(GEN7_CXT_SIZE);
-		ret = GEN7_CXT_TOTAL_SIZE(reg) * 64;
+		if (IS_HASWELL(dev))
+			ret = HSW_CXT_TOTAL_SIZE(reg) * 64;
+		else
+			ret = GEN7_CXT_TOTAL_SIZE(reg) * 64;
 		break;
 	default:
 		BUG();
@@ -219,20 +221,21 @@ static int create_default_context(struct drm_i915_private *dev_priv)
 	 * default context.
 	 */
 	dev_priv->ring[RCS].default_context = ctx;
-	ret = i915_gem_object_pin(ctx->obj, CONTEXT_ALIGN, false);
-	if (ret) {
-		do_destroy(ctx);
-		return ret;
-	}
+	ret = i915_gem_object_pin(ctx->obj, CONTEXT_ALIGN, false, false);
+	if (ret)
+		goto err_destroy;
 
-	ret = do_switch(NULL, ctx, 0);
-	if (ret) {
-		i915_gem_object_unpin(ctx->obj);
-		do_destroy(ctx);
-	} else {
-		DRM_DEBUG_DRIVER("Default HW context loaded\n");
-	}
+	ret = do_switch(ctx);
+	if (ret)
+		goto err_unpin;
 
+	DRM_DEBUG_DRIVER("Default HW context loaded\n");
+	return 0;
+
+err_unpin:
+	i915_gem_object_unpin(ctx->obj);
+err_destroy:
+	do_destroy(ctx);
 	return ret;
 }
 
@@ -359,18 +362,19 @@ mi_set_context(struct intel_ring_buffer *ring,
 	return ret;
 }
 
-static int do_switch(struct drm_i915_gem_object *from_obj,
-		     struct i915_hw_context *to,
-		     u32 seqno)
+static int do_switch(struct i915_hw_context *to)
 {
-	struct intel_ring_buffer *ring = NULL;
+	struct intel_ring_buffer *ring = to->ring;
+	struct drm_i915_gem_object *from_obj = ring->last_context_obj;
 	u32 hw_flags = 0;
 	int ret;
 
-	BUG_ON(to == NULL);
 	BUG_ON(from_obj != NULL && from_obj->pin_count == 0);
 
-	ret = i915_gem_object_pin(to->obj, CONTEXT_ALIGN, false);
+	if (from_obj == to->obj)
+		return 0;
+
+	ret = i915_gem_object_pin(to->obj, CONTEXT_ALIGN, false, false);
 	if (ret)
 		return ret;
 
@@ -393,7 +397,6 @@ static int do_switch(struct drm_i915_gem_object *from_obj,
 	else if (WARN_ON_ONCE(from_obj == to->obj)) /* not yet expected */
 		hw_flags |= MI_FORCE_RESTORE;
 
-	ring = to->ring;
 	ret = mi_set_context(ring, to, hw_flags);
 	if (ret) {
 		i915_gem_object_unpin(to->obj);
@@ -407,6 +410,7 @@ static int do_switch(struct drm_i915_gem_object *from_obj,
 	 * MI_SET_CONTEXT instead of when the next seqno has completed.
 	 */
 	if (from_obj != NULL) {
+		u32 seqno = i915_gem_next_request_seqno(ring);
 		from_obj->base.read_domains = I915_GEM_DOMAIN_INSTRUCTION;
 		i915_gem_object_move_to_active(from_obj, ring, seqno);
 		/* As long as MI_SET_CONTEXT is serializing, ie. it flushes the
@@ -417,7 +421,7 @@ static int do_switch(struct drm_i915_gem_object *from_obj,
 		 * swapped, but there is no way to do that yet.
 		 */
 		from_obj->dirty = 1;
-		BUG_ON(from_obj->ring != to->ring);
+		BUG_ON(from_obj->ring != ring);
 		i915_gem_object_unpin(from_obj);
 
 		drm_gem_object_unreference(&from_obj->base);
@@ -448,9 +452,7 @@ int i915_switch_context(struct intel_ring_buffer *ring,
 			int to_id)
 {
 	struct drm_i915_private *dev_priv = ring->dev->dev_private;
-	struct drm_i915_file_private *file_priv = NULL;
 	struct i915_hw_context *to;
-	struct drm_i915_gem_object *from_obj = ring->last_context_obj;
 
 	if (dev_priv->hw_contexts_disabled)
 		return 0;
@@ -458,21 +460,18 @@ int i915_switch_context(struct intel_ring_buffer *ring,
 	if (ring != &dev_priv->ring[RCS])
 		return 0;
 
-	if (file)
-		file_priv = file->driver_priv;
-
 	if (to_id == DEFAULT_CONTEXT_ID) {
 		to = ring->default_context;
 	} else {
-		to = i915_gem_context_get(file_priv, to_id);
+		if (file == NULL)
+			return -EINVAL;
+
+		to = i915_gem_context_get(file->driver_priv, to_id);
 		if (to == NULL)
 			return -ENOENT;
 	}
 
-	if (from_obj == to->obj)
-		return 0;
-
-	return do_switch(from_obj, to, i915_gem_next_request_seqno(to->ring));
+	return do_switch(to);
 }
 
 int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
diff --git a/drivers/gpu/drm/i915/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/i915_gem_dmabuf.c
index af199596e792..773ef77b6c22 100644
--- a/drivers/gpu/drm/i915/i915_gem_dmabuf.c
+++ b/drivers/gpu/drm/i915/i915_gem_dmabuf.c
@@ -28,35 +28,62 @@
 #include <linux/dma-buf.h>
 
 static struct sg_table *i915_gem_map_dma_buf(struct dma_buf_attachment *attachment,
-				      enum dma_data_direction dir)
+					     enum dma_data_direction dir)
 {
 	struct drm_i915_gem_object *obj = attachment->dmabuf->priv;
-	struct drm_device *dev = obj->base.dev;
-	int npages = obj->base.size / PAGE_SIZE;
-	struct sg_table *sg = NULL;
-	int ret;
-	int nents;
+	struct sg_table *st;
+	struct scatterlist *src, *dst;
+	int ret, i;
 
-	ret = i915_mutex_lock_interruptible(dev);
+	ret = i915_mutex_lock_interruptible(obj->base.dev);
 	if (ret)
 		return ERR_PTR(ret);
 
-	if (!obj->pages) {
-		ret = i915_gem_object_get_pages_gtt(obj, __GFP_NORETRY | __GFP_NOWARN);
-		if (ret)
-			goto out;
+	ret = i915_gem_object_get_pages(obj);
+	if (ret) {
+		st = ERR_PTR(ret);
+		goto out;
+	}
+
+	/* Copy sg so that we make an independent mapping */
+	st = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
+	if (st == NULL) {
+		st = ERR_PTR(-ENOMEM);
+		goto out;
 	}
 
-	/* link the pages into an SG then map the sg */
-	sg = drm_prime_pages_to_sg(obj->pages, npages);
-	nents = dma_map_sg(attachment->dev, sg->sgl, sg->nents, dir);
+	ret = sg_alloc_table(st, obj->pages->nents, GFP_KERNEL);
+	if (ret) {
+		kfree(st);
+		st = ERR_PTR(ret);
+		goto out;
+	}
+
+	src = obj->pages->sgl;
+	dst = st->sgl;
+	for (i = 0; i < obj->pages->nents; i++) {
+		sg_set_page(dst, sg_page(src), PAGE_SIZE, 0);
+		dst = sg_next(dst);
+		src = sg_next(src);
+	}
+
+	if (!dma_map_sg(attachment->dev, st->sgl, st->nents, dir)) {
+		sg_free_table(st);
+		kfree(st);
+		st = ERR_PTR(-ENOMEM);
+		goto out;
+	}
+
+	i915_gem_object_pin_pages(obj);
+
 out:
-	mutex_unlock(&dev->struct_mutex);
-	return sg;
+	mutex_unlock(&obj->base.dev->struct_mutex);
+	return st;
 }
 
 static void i915_gem_unmap_dma_buf(struct dma_buf_attachment *attachment,
-			    struct sg_table *sg, enum dma_data_direction dir)
+				   struct sg_table *sg,
+				   enum dma_data_direction dir)
 {
 	dma_unmap_sg(attachment->dev, sg->sgl, sg->nents, dir);
 	sg_free_table(sg);
@@ -78,7 +105,9 @@ static void *i915_gem_dmabuf_vmap(struct dma_buf *dma_buf)
 {
 	struct drm_i915_gem_object *obj = dma_buf->priv;
 	struct drm_device *dev = obj->base.dev;
-	int ret;
+	struct scatterlist *sg;
+	struct page **pages;
+	int ret, i;
 
 	ret = i915_mutex_lock_interruptible(dev);
 	if (ret)
@@ -89,24 +118,34 @@ static void *i915_gem_dmabuf_vmap(struct dma_buf *dma_buf)
 		goto out_unlock;
 	}
 
-	if (!obj->pages) {
-		ret = i915_gem_object_get_pages_gtt(obj, __GFP_NORETRY | __GFP_NOWARN);
-		if (ret) {
-			mutex_unlock(&dev->struct_mutex);
-			return ERR_PTR(ret);
-		}
-	}
+	ret = i915_gem_object_get_pages(obj);
+	if (ret)
+		goto error;
 
-	obj->dma_buf_vmapping = vmap(obj->pages, obj->base.size / PAGE_SIZE, 0, PAGE_KERNEL);
-	if (!obj->dma_buf_vmapping) {
-		DRM_ERROR("failed to vmap object\n");
-		goto out_unlock;
-	}
+	ret = -ENOMEM;
+
+	pages = drm_malloc_ab(obj->pages->nents, sizeof(struct page *));
+	if (pages == NULL)
+		goto error;
+
+	for_each_sg(obj->pages->sgl, sg, obj->pages->nents, i)
+		pages[i] = sg_page(sg);
+
+	obj->dma_buf_vmapping = vmap(pages, obj->pages->nents, 0, PAGE_KERNEL);
+	drm_free_large(pages);
+
+	if (!obj->dma_buf_vmapping)
+		goto error;
 
 	obj->vmapping_count = 1;
+	i915_gem_object_pin_pages(obj);
 out_unlock:
 	mutex_unlock(&dev->struct_mutex);
 	return obj->dma_buf_vmapping;
+
+error:
+	mutex_unlock(&dev->struct_mutex);
+	return ERR_PTR(ret);
 }
 
 static void i915_gem_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr)
@@ -119,10 +158,11 @@ static void i915_gem_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr)
 	if (ret)
 		return;
 
-	--obj->vmapping_count;
-	if (obj->vmapping_count == 0) {
+	if (--obj->vmapping_count == 0) {
 		vunmap(obj->dma_buf_vmapping);
 		obj->dma_buf_vmapping = NULL;
+
+		i915_gem_object_unpin_pages(obj);
 	}
 	mutex_unlock(&dev->struct_mutex);
 }
@@ -151,6 +191,22 @@ static int i915_gem_dmabuf_mmap(struct dma_buf *dma_buf, struct vm_area_struct *
 	return -EINVAL;
 }
 
+static int i915_gem_begin_cpu_access(struct dma_buf *dma_buf, size_t start, size_t length, enum dma_data_direction direction)
+{
+	struct drm_i915_gem_object *obj = dma_buf->priv;
+	struct drm_device *dev = obj->base.dev;
+	int ret;
+	bool write = (direction == DMA_BIDIRECTIONAL || direction == DMA_TO_DEVICE);
+
+	ret = i915_mutex_lock_interruptible(dev);
+	if (ret)
+		return ret;
+
+	ret = i915_gem_object_set_to_cpu_domain(obj, write);
+	mutex_unlock(&dev->struct_mutex);
+	return ret;
+}
+
 static const struct dma_buf_ops i915_dmabuf_ops =  {
 	.map_dma_buf = i915_gem_map_dma_buf,
 	.unmap_dma_buf = i915_gem_unmap_dma_buf,
@@ -162,25 +218,47 @@ static const struct dma_buf_ops i915_dmabuf_ops =  {
 	.mmap = i915_gem_dmabuf_mmap,
 	.vmap = i915_gem_dmabuf_vmap,
 	.vunmap = i915_gem_dmabuf_vunmap,
+	.begin_cpu_access = i915_gem_begin_cpu_access,
 };
 
 struct dma_buf *i915_gem_prime_export(struct drm_device *dev,
-				struct drm_gem_object *gem_obj, int flags)
+				      struct drm_gem_object *gem_obj, int flags)
 {
 	struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
 
-	return dma_buf_export(obj, &i915_dmabuf_ops,
-						  obj->base.size, 0600);
+	return dma_buf_export(obj, &i915_dmabuf_ops, obj->base.size, 0600);
+}
+
+static int i915_gem_object_get_pages_dmabuf(struct drm_i915_gem_object *obj)
+{
+	struct sg_table *sg;
+
+	sg = dma_buf_map_attachment(obj->base.import_attach, DMA_BIDIRECTIONAL);
+	if (IS_ERR(sg))
+		return PTR_ERR(sg);
+
+	obj->pages = sg;
+	obj->has_dma_mapping = true;
+	return 0;
 }
 
+static void i915_gem_object_put_pages_dmabuf(struct drm_i915_gem_object *obj)
+{
+	dma_buf_unmap_attachment(obj->base.import_attach,
+				 obj->pages, DMA_BIDIRECTIONAL);
+	obj->has_dma_mapping = false;
+}
+
+static const struct drm_i915_gem_object_ops i915_gem_object_dmabuf_ops = {
+	.get_pages = i915_gem_object_get_pages_dmabuf,
+	.put_pages = i915_gem_object_put_pages_dmabuf,
+};
+
 struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev,
-				struct dma_buf *dma_buf)
+					     struct dma_buf *dma_buf)
 {
 	struct dma_buf_attachment *attach;
-	struct sg_table *sg;
 	struct drm_i915_gem_object *obj;
-	int npages;
-	int size;
 	int ret;
 
 	/* is this one of own objects? */
@@ -198,34 +276,24 @@ struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev,
 	if (IS_ERR(attach))
 		return ERR_CAST(attach);
 
-	sg = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
-	if (IS_ERR(sg)) {
-		ret = PTR_ERR(sg);
-		goto fail_detach;
-	}
-
-	size = dma_buf->size;
-	npages = size / PAGE_SIZE;
 
 	obj = kzalloc(sizeof(*obj), GFP_KERNEL);
 	if (obj == NULL) {
 		ret = -ENOMEM;
-		goto fail_unmap;
+		goto fail_detach;
 	}
 
-	ret = drm_gem_private_object_init(dev, &obj->base, size);
+	ret = drm_gem_private_object_init(dev, &obj->base, dma_buf->size);
 	if (ret) {
 		kfree(obj);
-		goto fail_unmap;
+		goto fail_detach;
 	}
 
-	obj->sg_table = sg;
+	i915_gem_object_init(obj, &i915_gem_object_dmabuf_ops);
 	obj->base.import_attach = attach;
 
 	return &obj->base;
 
-fail_unmap:
-	dma_buf_unmap_attachment(attach, sg, DMA_BIDIRECTIONAL);
 fail_detach:
 	dma_buf_detach(dma_buf, attach);
 	return ERR_PTR(ret);
diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c
index fd408995a783..776a3225184c 100644
--- a/drivers/gpu/drm/i915/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/i915_gem_evict.c
@@ -43,7 +43,8 @@ mark_free(struct drm_i915_gem_object *obj, struct list_head *unwind)
 
 int
 i915_gem_evict_something(struct drm_device *dev, int min_size,
-			 unsigned alignment, bool mappable)
+			 unsigned alignment, unsigned cache_level,
+			 bool mappable, bool nonblocking)
 {
 	drm_i915_private_t *dev_priv = dev->dev_private;
 	struct list_head eviction_list, unwind_list;
@@ -78,11 +79,11 @@ i915_gem_evict_something(struct drm_device *dev, int min_size,
 	INIT_LIST_HEAD(&unwind_list);
 	if (mappable)
 		drm_mm_init_scan_with_range(&dev_priv->mm.gtt_space,
-					    min_size, alignment, 0,
+					    min_size, alignment, cache_level,
 					    0, dev_priv->mm.gtt_mappable_end);
 	else
 		drm_mm_init_scan(&dev_priv->mm.gtt_space,
-				 min_size, alignment, 0);
+				 min_size, alignment, cache_level);
 
 	/* First see if there is a large enough contiguous idle region... */
 	list_for_each_entry(obj, &dev_priv->mm.inactive_list, mm_list) {
@@ -90,29 +91,16 @@ i915_gem_evict_something(struct drm_device *dev, int min_size,
 			goto found;
 	}
 
-	/* Now merge in the soon-to-be-expired objects... */
-	list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) {
-		/* Does the object require an outstanding flush? */
-		if (obj->base.write_domain)
-			continue;
-
-		if (mark_free(obj, &unwind_list))
-			goto found;
-	}
+	if (nonblocking)
+		goto none;
 
-	/* Finally add anything with a pending flush (in order of retirement) */
-	list_for_each_entry(obj, &dev_priv->mm.flushing_list, mm_list) {
-		if (mark_free(obj, &unwind_list))
-			goto found;
-	}
+	/* Now merge in the soon-to-be-expired objects... */
 	list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) {
-		if (!obj->base.write_domain)
-			continue;
-
 		if (mark_free(obj, &unwind_list))
 			goto found;
 	}
 
+none:
 	/* Nothing found, clean up and bail out! */
 	while (!list_empty(&unwind_list)) {
 		obj = list_first_entry(&unwind_list,
@@ -163,7 +151,7 @@ found:
 }
 
 int
-i915_gem_evict_everything(struct drm_device *dev, bool purgeable_only)
+i915_gem_evict_everything(struct drm_device *dev)
 {
 	drm_i915_private_t *dev_priv = dev->dev_private;
 	struct drm_i915_gem_object *obj, *next;
@@ -171,12 +159,11 @@ i915_gem_evict_everything(struct drm_device *dev, bool purgeable_only)
 	int ret;
 
 	lists_empty = (list_empty(&dev_priv->mm.inactive_list) &&
-		       list_empty(&dev_priv->mm.flushing_list) &&
 		       list_empty(&dev_priv->mm.active_list));
 	if (lists_empty)
 		return -ENOSPC;
 
-	trace_i915_gem_evict_everything(dev, purgeable_only);
+	trace_i915_gem_evict_everything(dev);
 
 	/* The gpu_idle will flush everything in the write domain to the
 	 * active list. Then we must move everything off the active list
@@ -188,16 +175,11 @@ i915_gem_evict_everything(struct drm_device *dev, bool purgeable_only)
 
 	i915_gem_retire_requests(dev);
 
-	BUG_ON(!list_empty(&dev_priv->mm.flushing_list));
-
 	/* Having flushed everything, unbind() should never raise an error */
 	list_for_each_entry_safe(obj, next,
-				 &dev_priv->mm.inactive_list, mm_list) {
-		if (!purgeable_only || obj->madv != I915_MADV_WILLNEED) {
-			if (obj->pin_count == 0)
-				WARN_ON(i915_gem_object_unbind(obj));
-		}
-	}
+				 &dev_priv->mm.inactive_list, mm_list)
+		if (obj->pin_count == 0)
+			WARN_ON(i915_gem_object_unbind(obj));
 
 	return 0;
 }
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 8dd9a6f47db8..3eea143749f6 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -33,180 +33,6 @@
 #include "intel_drv.h"
 #include <linux/dma_remapping.h>
 
-struct change_domains {
-	uint32_t invalidate_domains;
-	uint32_t flush_domains;
-	uint32_t flush_rings;
-	uint32_t flips;
-};
-
-/*
- * Set the next domain for the specified object. This
- * may not actually perform the necessary flushing/invaliding though,
- * as that may want to be batched with other set_domain operations
- *
- * This is (we hope) the only really tricky part of gem. The goal
- * is fairly simple -- track which caches hold bits of the object
- * and make sure they remain coherent. A few concrete examples may
- * help to explain how it works. For shorthand, we use the notation
- * (read_domains, write_domain), e.g. (CPU, CPU) to indicate the
- * a pair of read and write domain masks.
- *
- * Case 1: the batch buffer
- *
- *	1. Allocated
- *	2. Written by CPU
- *	3. Mapped to GTT
- *	4. Read by GPU
- *	5. Unmapped from GTT
- *	6. Freed
- *
- *	Let's take these a step at a time
- *
- *	1. Allocated
- *		Pages allocated from the kernel may still have
- *		cache contents, so we set them to (CPU, CPU) always.
- *	2. Written by CPU (using pwrite)
- *		The pwrite function calls set_domain (CPU, CPU) and
- *		this function does nothing (as nothing changes)
- *	3. Mapped by GTT
- *		This function asserts that the object is not
- *		currently in any GPU-based read or write domains
- *	4. Read by GPU
- *		i915_gem_execbuffer calls set_domain (COMMAND, 0).
- *		As write_domain is zero, this function adds in the
- *		current read domains (CPU+COMMAND, 0).
- *		flush_domains is set to CPU.
- *		invalidate_domains is set to COMMAND
- *		clflush is run to get data out of the CPU caches
- *		then i915_dev_set_domain calls i915_gem_flush to
- *		emit an MI_FLUSH and drm_agp_chipset_flush
- *	5. Unmapped from GTT
- *		i915_gem_object_unbind calls set_domain (CPU, CPU)
- *		flush_domains and invalidate_domains end up both zero
- *		so no flushing/invalidating happens
- *	6. Freed
- *		yay, done
- *
- * Case 2: The shared render buffer
- *
- *	1. Allocated
- *	2. Mapped to GTT
- *	3. Read/written by GPU
- *	4. set_domain to (CPU,CPU)
- *	5. Read/written by CPU
- *	6. Read/written by GPU
- *
- *	1. Allocated
- *		Same as last example, (CPU, CPU)
- *	2. Mapped to GTT
- *		Nothing changes (assertions find that it is not in the GPU)
- *	3. Read/written by GPU
- *		execbuffer calls set_domain (RENDER, RENDER)
- *		flush_domains gets CPU
- *		invalidate_domains gets GPU
- *		clflush (obj)
- *		MI_FLUSH and drm_agp_chipset_flush
- *	4. set_domain (CPU, CPU)
- *		flush_domains gets GPU
- *		invalidate_domains gets CPU
- *		wait_rendering (obj) to make sure all drawing is complete.
- *		This will include an MI_FLUSH to get the data from GPU
- *		to memory
- *		clflush (obj) to invalidate the CPU cache
- *		Another MI_FLUSH in i915_gem_flush (eliminate this somehow?)
- *	5. Read/written by CPU
- *		cache lines are loaded and dirtied
- *	6. Read written by GPU
- *		Same as last GPU access
- *
- * Case 3: The constant buffer
- *
- *	1. Allocated
- *	2. Written by CPU
- *	3. Read by GPU
- *	4. Updated (written) by CPU again
- *	5. Read by GPU
- *
- *	1. Allocated
- *		(CPU, CPU)
- *	2. Written by CPU
- *		(CPU, CPU)
- *	3. Read by GPU
- *		(CPU+RENDER, 0)
- *		flush_domains = CPU
- *		invalidate_domains = RENDER
- *		clflush (obj)
- *		MI_FLUSH
- *		drm_agp_chipset_flush
- *	4. Updated (written) by CPU again
- *		(CPU, CPU)
- *		flush_domains = 0 (no previous write domain)
- *		invalidate_domains = 0 (no new read domains)
- *	5. Read by GPU
- *		(CPU+RENDER, 0)
- *		flush_domains = CPU
- *		invalidate_domains = RENDER
- *		clflush (obj)
- *		MI_FLUSH
- *		drm_agp_chipset_flush
- */
-static void
-i915_gem_object_set_to_gpu_domain(struct drm_i915_gem_object *obj,
-				  struct intel_ring_buffer *ring,
-				  struct change_domains *cd)
-{
-	uint32_t invalidate_domains = 0, flush_domains = 0;
-
-	/*
-	 * If the object isn't moving to a new write domain,
-	 * let the object stay in multiple read domains
-	 */
-	if (obj->base.pending_write_domain == 0)
-		obj->base.pending_read_domains |= obj->base.read_domains;
-
-	/*
-	 * Flush the current write domain if
-	 * the new read domains don't match. Invalidate
-	 * any read domains which differ from the old
-	 * write domain
-	 */
-	if (obj->base.write_domain &&
-	    (((obj->base.write_domain != obj->base.pending_read_domains ||
-	       obj->ring != ring)) ||
-	     (obj->fenced_gpu_access && !obj->pending_fenced_gpu_access))) {
-		flush_domains |= obj->base.write_domain;
-		invalidate_domains |=
-			obj->base.pending_read_domains & ~obj->base.write_domain;
-	}
-	/*
-	 * Invalidate any read caches which may have
-	 * stale data. That is, any new read domains.
-	 */
-	invalidate_domains |= obj->base.pending_read_domains & ~obj->base.read_domains;
-	if ((flush_domains | invalidate_domains) & I915_GEM_DOMAIN_CPU)
-		i915_gem_clflush_object(obj);
-
-	if (obj->base.pending_write_domain)
-		cd->flips |= atomic_read(&obj->pending_flip);
-
-	/* The actual obj->write_domain will be updated with
-	 * pending_write_domain after we emit the accumulated flush for all
-	 * of our domain changes in execbuffers (which clears objects'
-	 * write_domains).  So if we have a current write domain that we
-	 * aren't changing, set pending_write_domain to that.
-	 */
-	if (flush_domains == 0 && obj->base.pending_write_domain == 0)
-		obj->base.pending_write_domain = obj->base.write_domain;
-
-	cd->invalidate_domains |= invalidate_domains;
-	cd->flush_domains |= flush_domains;
-	if (flush_domains & I915_GEM_GPU_DOMAINS)
-		cd->flush_rings |= intel_ring_flag(obj->ring);
-	if (invalidate_domains & I915_GEM_GPU_DOMAINS)
-		cd->flush_rings |= intel_ring_flag(ring);
-}
-
 struct eb_objects {
 	int and;
 	struct hlist_head buckets[0];
@@ -217,6 +43,7 @@ eb_create(int size)
 {
 	struct eb_objects *eb;
 	int count = PAGE_SIZE / sizeof(struct hlist_head) / 2;
+	BUILD_BUG_ON(!is_power_of_2(PAGE_SIZE / sizeof(struct hlist_head)));
 	while (count > size)
 		count >>= 1;
 	eb = kzalloc(count*sizeof(struct hlist_head) +
@@ -268,6 +95,7 @@ eb_destroy(struct eb_objects *eb)
 static inline int use_cpu_reloc(struct drm_i915_gem_object *obj)
 {
 	return (obj->base.write_domain == I915_GEM_DOMAIN_CPU ||
+		!obj->map_and_fenceable ||
 		obj->cache_level != I915_CACHE_NONE);
 }
 
@@ -382,7 +210,8 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
 		if (ret)
 			return ret;
 
-		vaddr = kmap_atomic(obj->pages[reloc->offset >> PAGE_SHIFT]);
+		vaddr = kmap_atomic(i915_gem_object_get_page(obj,
+							     reloc->offset >> PAGE_SHIFT));
 		*(uint32_t *)(vaddr + page_offset) = reloc->delta;
 		kunmap_atomic(vaddr);
 	} else {
@@ -503,7 +332,8 @@ i915_gem_execbuffer_relocate(struct drm_device *dev,
 	return ret;
 }
 
-#define  __EXEC_OBJECT_HAS_FENCE (1<<31)
+#define  __EXEC_OBJECT_HAS_PIN (1<<31)
+#define  __EXEC_OBJECT_HAS_FENCE (1<<30)
 
 static int
 need_reloc_mappable(struct drm_i915_gem_object *obj)
@@ -513,9 +343,10 @@ need_reloc_mappable(struct drm_i915_gem_object *obj)
 }
 
 static int
-pin_and_fence_object(struct drm_i915_gem_object *obj,
-		     struct intel_ring_buffer *ring)
+i915_gem_execbuffer_reserve_object(struct drm_i915_gem_object *obj,
+				   struct intel_ring_buffer *ring)
 {
+	struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
 	struct drm_i915_gem_exec_object2 *entry = obj->exec_entry;
 	bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4;
 	bool need_fence, need_mappable;
@@ -527,15 +358,17 @@ pin_and_fence_object(struct drm_i915_gem_object *obj,
 		obj->tiling_mode != I915_TILING_NONE;
 	need_mappable = need_fence || need_reloc_mappable(obj);
 
-	ret = i915_gem_object_pin(obj, entry->alignment, need_mappable);
+	ret = i915_gem_object_pin(obj, entry->alignment, need_mappable, false);
 	if (ret)
 		return ret;
 
+	entry->flags |= __EXEC_OBJECT_HAS_PIN;
+
 	if (has_fenced_gpu_access) {
 		if (entry->flags & EXEC_OBJECT_NEEDS_FENCE) {
 			ret = i915_gem_object_get_fence(obj);
 			if (ret)
-				goto err_unpin;
+				return ret;
 
 			if (i915_gem_object_pin_fence(obj))
 				entry->flags |= __EXEC_OBJECT_HAS_FENCE;
@@ -544,12 +377,35 @@ pin_and_fence_object(struct drm_i915_gem_object *obj,
 		}
 	}
 
+	/* Ensure ppgtt mapping exists if needed */
+	if (dev_priv->mm.aliasing_ppgtt && !obj->has_aliasing_ppgtt_mapping) {
+		i915_ppgtt_bind_object(dev_priv->mm.aliasing_ppgtt,
+				       obj, obj->cache_level);
+
+		obj->has_aliasing_ppgtt_mapping = 1;
+	}
+
 	entry->offset = obj->gtt_offset;
 	return 0;
+}
 
-err_unpin:
-	i915_gem_object_unpin(obj);
-	return ret;
+static void
+i915_gem_execbuffer_unreserve_object(struct drm_i915_gem_object *obj)
+{
+	struct drm_i915_gem_exec_object2 *entry;
+
+	if (!obj->gtt_space)
+		return;
+
+	entry = obj->exec_entry;
+
+	if (entry->flags & __EXEC_OBJECT_HAS_FENCE)
+		i915_gem_object_unpin_fence(obj);
+
+	if (entry->flags & __EXEC_OBJECT_HAS_PIN)
+		i915_gem_object_unpin(obj);
+
+	entry->flags &= ~(__EXEC_OBJECT_HAS_FENCE | __EXEC_OBJECT_HAS_PIN);
 }
 
 static int
@@ -557,11 +413,10 @@ i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
 			    struct drm_file *file,
 			    struct list_head *objects)
 {
-	drm_i915_private_t *dev_priv = ring->dev->dev_private;
 	struct drm_i915_gem_object *obj;
-	int ret, retry;
-	bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4;
 	struct list_head ordered_objects;
+	bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4;
+	int retry;
 
 	INIT_LIST_HEAD(&ordered_objects);
 	while (!list_empty(objects)) {
@@ -586,6 +441,7 @@ i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
 
 		obj->base.pending_read_domains = 0;
 		obj->base.pending_write_domain = 0;
+		obj->pending_fenced_gpu_access = false;
 	}
 	list_splice(&ordered_objects, objects);
 
@@ -598,12 +454,12 @@ i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
 	 * 2.  Bind new objects.
 	 * 3.  Decrement pin count.
 	 *
-	 * This avoid unnecessary unbinding of later objects in order to makr
+	 * This avoid unnecessary unbinding of later objects in order to make
 	 * room for the earlier objects *unless* we need to defragment.
 	 */
 	retry = 0;
 	do {
-		ret = 0;
+		int ret = 0;
 
 		/* Unbind any ill-fitting objects or pin. */
 		list_for_each_entry(obj, objects, exec_list) {
@@ -623,7 +479,7 @@ i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
 			    (need_mappable && !obj->map_and_fenceable))
 				ret = i915_gem_object_unbind(obj);
 			else
-				ret = pin_and_fence_object(obj, ring);
+				ret = i915_gem_execbuffer_reserve_object(obj, ring);
 			if (ret)
 				goto err;
 		}
@@ -633,77 +489,22 @@ i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
 			if (obj->gtt_space)
 				continue;
 
-			ret = pin_and_fence_object(obj, ring);
-			if (ret) {
-				int ret_ignore;
-
-				/* This can potentially raise a harmless
-				 * -EINVAL if we failed to bind in the above
-				 * call. It cannot raise -EINTR since we know
-				 * that the bo is freshly bound and so will
-				 * not need to be flushed or waited upon.
-				 */
-				ret_ignore = i915_gem_object_unbind(obj);
-				(void)ret_ignore;
-				WARN_ON(obj->gtt_space);
-				break;
-			}
+			ret = i915_gem_execbuffer_reserve_object(obj, ring);
+			if (ret)
+				goto err;
 		}
 
-		/* Decrement pin count for bound objects */
-		list_for_each_entry(obj, objects, exec_list) {
-			struct drm_i915_gem_exec_object2 *entry;
-
-			if (!obj->gtt_space)
-				continue;
-
-			entry = obj->exec_entry;
-			if (entry->flags & __EXEC_OBJECT_HAS_FENCE) {
-				i915_gem_object_unpin_fence(obj);
-				entry->flags &= ~__EXEC_OBJECT_HAS_FENCE;
-			}
-
-			i915_gem_object_unpin(obj);
-
-			/* ... and ensure ppgtt mapping exist if needed. */
-			if (dev_priv->mm.aliasing_ppgtt && !obj->has_aliasing_ppgtt_mapping) {
-				i915_ppgtt_bind_object(dev_priv->mm.aliasing_ppgtt,
-						       obj, obj->cache_level);
+err:		/* Decrement pin count for bound objects */
+		list_for_each_entry(obj, objects, exec_list)
+			i915_gem_execbuffer_unreserve_object(obj);
 
-				obj->has_aliasing_ppgtt_mapping = 1;
-			}
-		}
-
-		if (ret != -ENOSPC || retry > 1)
+		if (ret != -ENOSPC || retry++)
 			return ret;
 
-		/* First attempt, just clear anything that is purgeable.
-		 * Second attempt, clear the entire GTT.
-		 */
-		ret = i915_gem_evict_everything(ring->dev, retry == 0);
+		ret = i915_gem_evict_everything(ring->dev);
 		if (ret)
 			return ret;
-
-		retry++;
 	} while (1);
-
-err:
-	list_for_each_entry_continue_reverse(obj, objects, exec_list) {
-		struct drm_i915_gem_exec_object2 *entry;
-
-		if (!obj->gtt_space)
-			continue;
-
-		entry = obj->exec_entry;
-		if (entry->flags & __EXEC_OBJECT_HAS_FENCE) {
-			i915_gem_object_unpin_fence(obj);
-			entry->flags &= ~__EXEC_OBJECT_HAS_FENCE;
-		}
-
-		i915_gem_object_unpin(obj);
-	}
-
-	return ret;
 }
 
 static int
@@ -809,18 +610,6 @@ err:
 	return ret;
 }
 
-static void
-i915_gem_execbuffer_flush(struct drm_device *dev,
-			  uint32_t invalidate_domains,
-			  uint32_t flush_domains)
-{
-	if (flush_domains & I915_GEM_DOMAIN_CPU)
-		intel_gtt_chipset_flush();
-
-	if (flush_domains & I915_GEM_DOMAIN_GTT)
-		wmb();
-}
-
 static int
 i915_gem_execbuffer_wait_for_flips(struct intel_ring_buffer *ring, u32 flips)
 {
@@ -853,48 +642,45 @@ i915_gem_execbuffer_wait_for_flips(struct intel_ring_buffer *ring, u32 flips)
 	return 0;
 }
 
-
 static int
 i915_gem_execbuffer_move_to_gpu(struct intel_ring_buffer *ring,
 				struct list_head *objects)
 {
 	struct drm_i915_gem_object *obj;
-	struct change_domains cd;
+	uint32_t flush_domains = 0;
+	uint32_t flips = 0;
 	int ret;
 
-	memset(&cd, 0, sizeof(cd));
-	list_for_each_entry(obj, objects, exec_list)
-		i915_gem_object_set_to_gpu_domain(obj, ring, &cd);
-
-	if (cd.invalidate_domains | cd.flush_domains) {
-		i915_gem_execbuffer_flush(ring->dev,
-					  cd.invalidate_domains,
-					  cd.flush_domains);
-	}
-
-	if (cd.flips) {
-		ret = i915_gem_execbuffer_wait_for_flips(ring, cd.flips);
+	list_for_each_entry(obj, objects, exec_list) {
+		ret = i915_gem_object_sync(obj, ring);
 		if (ret)
 			return ret;
+
+		if (obj->base.write_domain & I915_GEM_DOMAIN_CPU)
+			i915_gem_clflush_object(obj);
+
+		if (obj->base.pending_write_domain)
+			flips |= atomic_read(&obj->pending_flip);
+
+		flush_domains |= obj->base.write_domain;
 	}
 
-	list_for_each_entry(obj, objects, exec_list) {
-		ret = i915_gem_object_sync(obj, ring);
+	if (flips) {
+		ret = i915_gem_execbuffer_wait_for_flips(ring, flips);
 		if (ret)
 			return ret;
 	}
 
+	if (flush_domains & I915_GEM_DOMAIN_CPU)
+		intel_gtt_chipset_flush();
+
+	if (flush_domains & I915_GEM_DOMAIN_GTT)
+		wmb();
+
 	/* Unconditionally invalidate gpu caches and ensure that we do flush
 	 * any residual writes from the previous batch.
 	 */
-	ret = i915_gem_flush_ring(ring,
-				  I915_GEM_GPU_DOMAINS,
-				  ring->gpu_caches_dirty ? I915_GEM_GPU_DOMAINS : 0);
-	if (ret)
-		return ret;
-
-	ring->gpu_caches_dirty = false;
-	return 0;
+	return intel_ring_invalidate_all_caches(ring);
 }
 
 static bool
@@ -942,9 +728,8 @@ i915_gem_execbuffer_move_to_active(struct list_head *objects,
 	struct drm_i915_gem_object *obj;
 
 	list_for_each_entry(obj, objects, exec_list) {
-		  u32 old_read = obj->base.read_domains;
-		  u32 old_write = obj->base.write_domain;
-
+		u32 old_read = obj->base.read_domains;
+		u32 old_write = obj->base.write_domain;
 
 		obj->base.read_domains = obj->base.pending_read_domains;
 		obj->base.write_domain = obj->base.pending_write_domain;
@@ -953,17 +738,13 @@ i915_gem_execbuffer_move_to_active(struct list_head *objects,
 		i915_gem_object_move_to_active(obj, ring, seqno);
 		if (obj->base.write_domain) {
 			obj->dirty = 1;
-			obj->pending_gpu_write = true;
-			list_move_tail(&obj->gpu_write_list,
-				       &ring->gpu_write_list);
+			obj->last_write_seqno = seqno;
 			if (obj->pin_count) /* check for potential scanout */
-				intel_mark_busy(ring->dev, obj);
+				intel_mark_fb_busy(obj);
 		}
 
 		trace_i915_gem_object_change_domain(obj, old_read, old_write);
 	}
-
-	intel_mark_busy(ring->dev, NULL);
 }
 
 static void
@@ -971,16 +752,11 @@ i915_gem_execbuffer_retire_commands(struct drm_device *dev,
 				    struct drm_file *file,
 				    struct intel_ring_buffer *ring)
 {
-	struct drm_i915_gem_request *request;
-
 	/* Unconditionally force add_request to emit a full flush. */
 	ring->gpu_caches_dirty = true;
 
 	/* Add a breadcrumb for the completion of the batch buffer */
-	request = kzalloc(sizeof(*request), GFP_KERNEL);
-	if (request == NULL || i915_add_request(ring, file, request)) {
-		kfree(request);
-	}
+	(void)i915_add_request(ring, file, NULL);
 }
 
 static int
@@ -1326,8 +1102,7 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
 		return -ENOMEM;
 	}
 	ret = copy_from_user(exec_list,
-			     (struct drm_i915_relocation_entry __user *)
-			     (uintptr_t) args->buffers_ptr,
+			     (void __user *)(uintptr_t)args->buffers_ptr,
 			     sizeof(*exec_list) * args->buffer_count);
 	if (ret != 0) {
 		DRM_DEBUG("copy %d exec entries failed %d\n",
@@ -1366,8 +1141,7 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
 		for (i = 0; i < args->buffer_count; i++)
 			exec_list[i].offset = exec2_list[i].offset;
 		/* ... and back out to userspace */
-		ret = copy_to_user((struct drm_i915_relocation_entry __user *)
-				   (uintptr_t) args->buffers_ptr,
+		ret = copy_to_user((void __user *)(uintptr_t)args->buffers_ptr,
 				   exec_list,
 				   sizeof(*exec_list) * args->buffer_count);
 		if (ret) {
@@ -1421,8 +1195,7 @@ i915_gem_execbuffer2(struct drm_device *dev, void *data,
 	ret = i915_gem_do_execbuffer(dev, data, file, args, exec2_list);
 	if (!ret) {
 		/* Copy the new buffer offsets back to the user's exec list. */
-		ret = copy_to_user((struct drm_i915_relocation_entry __user *)
-				   (uintptr_t) args->buffers_ptr,
+		ret = copy_to_user((void __user *)(uintptr_t)args->buffers_ptr,
 				   exec2_list,
 				   sizeof(*exec2_list) * args->buffer_count);
 		if (ret) {
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 69261acb94b3..df470b5e8d36 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -166,8 +166,7 @@ void i915_gem_cleanup_aliasing_ppgtt(struct drm_device *dev)
 }
 
 static void i915_ppgtt_insert_sg_entries(struct i915_hw_ppgtt *ppgtt,
-					 struct scatterlist *sg_list,
-					 unsigned sg_len,
+					 const struct sg_table *pages,
 					 unsigned first_entry,
 					 uint32_t pte_flags)
 {
@@ -179,12 +178,12 @@ static void i915_ppgtt_insert_sg_entries(struct i915_hw_ppgtt *ppgtt,
 	struct scatterlist *sg;
 
 	/* init sg walking */
-	sg = sg_list;
+	sg = pages->sgl;
 	i = 0;
 	segment_len = sg_dma_len(sg) >> PAGE_SHIFT;
 	m = 0;
 
-	while (i < sg_len) {
+	while (i < pages->nents) {
 		pt_vaddr = kmap_atomic(ppgtt->pt_pages[act_pd]);
 
 		for (j = first_pte; j < I915_PPGTT_PT_ENTRIES; j++) {
@@ -193,13 +192,11 @@ static void i915_ppgtt_insert_sg_entries(struct i915_hw_ppgtt *ppgtt,
 			pt_vaddr[j] = pte | pte_flags;
 
 			/* grab the next page */
-			m++;
-			if (m == segment_len) {
-				sg = sg_next(sg);
-				i++;
-				if (i == sg_len)
+			if (++m == segment_len) {
+				if (++i == pages->nents)
 					break;
 
+				sg = sg_next(sg);
 				segment_len = sg_dma_len(sg) >> PAGE_SHIFT;
 				m = 0;
 			}
@@ -212,44 +209,10 @@ static void i915_ppgtt_insert_sg_entries(struct i915_hw_ppgtt *ppgtt,
 	}
 }
 
-static void i915_ppgtt_insert_pages(struct i915_hw_ppgtt *ppgtt,
-				    unsigned first_entry, unsigned num_entries,
-				    struct page **pages, uint32_t pte_flags)
-{
-	uint32_t *pt_vaddr, pte;
-	unsigned act_pd = first_entry / I915_PPGTT_PT_ENTRIES;
-	unsigned first_pte = first_entry % I915_PPGTT_PT_ENTRIES;
-	unsigned last_pte, i;
-	dma_addr_t page_addr;
-
-	while (num_entries) {
-		last_pte = first_pte + num_entries;
-		last_pte = min_t(unsigned, last_pte, I915_PPGTT_PT_ENTRIES);
-
-		pt_vaddr = kmap_atomic(ppgtt->pt_pages[act_pd]);
-
-		for (i = first_pte; i < last_pte; i++) {
-			page_addr = page_to_phys(*pages);
-			pte = GEN6_PTE_ADDR_ENCODE(page_addr);
-			pt_vaddr[i] = pte | pte_flags;
-
-			pages++;
-		}
-
-		kunmap_atomic(pt_vaddr);
-
-		num_entries -= last_pte - first_pte;
-		first_pte = 0;
-		act_pd++;
-	}
-}
-
 void i915_ppgtt_bind_object(struct i915_hw_ppgtt *ppgtt,
 			    struct drm_i915_gem_object *obj,
 			    enum i915_cache_level cache_level)
 {
-	struct drm_device *dev = obj->base.dev;
-	struct drm_i915_private *dev_priv = dev->dev_private;
 	uint32_t pte_flags = GEN6_PTE_VALID;
 
 	switch (cache_level) {
@@ -260,7 +223,7 @@ void i915_ppgtt_bind_object(struct i915_hw_ppgtt *ppgtt,
 		pte_flags |= GEN6_PTE_CACHE_LLC;
 		break;
 	case I915_CACHE_NONE:
-		if (IS_HASWELL(dev))
+		if (IS_HASWELL(obj->base.dev))
 			pte_flags |= HSW_PTE_UNCACHED;
 		else
 			pte_flags |= GEN6_PTE_UNCACHED;
@@ -269,26 +232,10 @@ void i915_ppgtt_bind_object(struct i915_hw_ppgtt *ppgtt,
 		BUG();
 	}
 
-	if (obj->sg_table) {
-		i915_ppgtt_insert_sg_entries(ppgtt,
-					     obj->sg_table->sgl,
-					     obj->sg_table->nents,
-					     obj->gtt_space->start >> PAGE_SHIFT,
-					     pte_flags);
-	} else if (dev_priv->mm.gtt->needs_dmar) {
-		BUG_ON(!obj->sg_list);
-
-		i915_ppgtt_insert_sg_entries(ppgtt,
-					     obj->sg_list,
-					     obj->num_sg,
-					     obj->gtt_space->start >> PAGE_SHIFT,
-					     pte_flags);
-	} else
-		i915_ppgtt_insert_pages(ppgtt,
-					obj->gtt_space->start >> PAGE_SHIFT,
-					obj->base.size >> PAGE_SHIFT,
-					obj->pages,
-					pte_flags);
+	i915_ppgtt_insert_sg_entries(ppgtt,
+				     obj->pages,
+				     obj->gtt_space->start >> PAGE_SHIFT,
+				     pte_flags);
 }
 
 void i915_ppgtt_unbind_object(struct i915_hw_ppgtt *ppgtt,
@@ -350,7 +297,7 @@ void i915_gem_restore_gtt_mappings(struct drm_device *dev)
 	intel_gtt_clear_range(dev_priv->mm.gtt_start / PAGE_SIZE,
 			      (dev_priv->mm.gtt_end - dev_priv->mm.gtt_start) / PAGE_SIZE);
 
-	list_for_each_entry(obj, &dev_priv->mm.gtt_list, gtt_list) {
+	list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list) {
 		i915_gem_clflush_object(obj);
 		i915_gem_gtt_bind_object(obj, obj->cache_level);
 	}
@@ -360,44 +307,26 @@ void i915_gem_restore_gtt_mappings(struct drm_device *dev)
 
 int i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj)
 {
-	struct drm_device *dev = obj->base.dev;
-	struct drm_i915_private *dev_priv = dev->dev_private;
-
-	/* don't map imported dma buf objects */
-	if (dev_priv->mm.gtt->needs_dmar && !obj->sg_table)
-		return intel_gtt_map_memory(obj->pages,
-					    obj->base.size >> PAGE_SHIFT,
-					    &obj->sg_list,
-					    &obj->num_sg);
-	else
+	if (obj->has_dma_mapping)
 		return 0;
+
+	if (!dma_map_sg(&obj->base.dev->pdev->dev,
+			obj->pages->sgl, obj->pages->nents,
+			PCI_DMA_BIDIRECTIONAL))
+		return -ENOSPC;
+
+	return 0;
 }
 
 void i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj,
 			      enum i915_cache_level cache_level)
 {
 	struct drm_device *dev = obj->base.dev;
-	struct drm_i915_private *dev_priv = dev->dev_private;
 	unsigned int agp_type = cache_level_to_agp_type(dev, cache_level);
 
-	if (obj->sg_table) {
-		intel_gtt_insert_sg_entries(obj->sg_table->sgl,
-					    obj->sg_table->nents,
-					    obj->gtt_space->start >> PAGE_SHIFT,
-					    agp_type);
-	} else if (dev_priv->mm.gtt->needs_dmar) {
-		BUG_ON(!obj->sg_list);
-
-		intel_gtt_insert_sg_entries(obj->sg_list,
-					    obj->num_sg,
-					    obj->gtt_space->start >> PAGE_SHIFT,
-					    agp_type);
-	} else
-		intel_gtt_insert_pages(obj->gtt_space->start >> PAGE_SHIFT,
-				       obj->base.size >> PAGE_SHIFT,
-				       obj->pages,
-				       agp_type);
-
+	intel_gtt_insert_sg_entries(obj->pages,
+				    obj->gtt_space->start >> PAGE_SHIFT,
+				    agp_type);
 	obj->has_global_gtt_mapping = 1;
 }
 
@@ -417,14 +346,31 @@ void i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj)
 
 	interruptible = do_idling(dev_priv);
 
-	if (obj->sg_list) {
-		intel_gtt_unmap_memory(obj->sg_list, obj->num_sg);
-		obj->sg_list = NULL;
-	}
+	if (!obj->has_dma_mapping)
+		dma_unmap_sg(&dev->pdev->dev,
+			     obj->pages->sgl, obj->pages->nents,
+			     PCI_DMA_BIDIRECTIONAL);
 
 	undo_idling(dev_priv, interruptible);
 }
 
+static void i915_gtt_color_adjust(struct drm_mm_node *node,
+				  unsigned long color,
+				  unsigned long *start,
+				  unsigned long *end)
+{
+	if (node->color != color)
+		*start += 4096;
+
+	if (!list_empty(&node->node_list)) {
+		node = list_entry(node->node_list.next,
+				  struct drm_mm_node,
+				  node_list);
+		if (node->allocated && node->color != color)
+			*end -= 4096;
+	}
+}
+
 void i915_gem_init_global_gtt(struct drm_device *dev,
 			      unsigned long start,
 			      unsigned long mappable_end,
@@ -434,6 +380,8 @@ void i915_gem_init_global_gtt(struct drm_device *dev,
 
 	/* Substract the guard page ... */
 	drm_mm_init(&dev_priv->mm.gtt_space, start, end - start - PAGE_SIZE);
+	if (!HAS_LLC(dev))
+		dev_priv->mm.gtt_space.color_adjust = i915_gtt_color_adjust;
 
 	dev_priv->mm.gtt_start = start;
 	dev_priv->mm.gtt_mappable_end = mappable_end;
diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c
index c2b7b67e410d..3208650a235c 100644
--- a/drivers/gpu/drm/i915/i915_gem_tiling.c
+++ b/drivers/gpu/drm/i915/i915_gem_tiling.c
@@ -469,18 +469,20 @@ i915_gem_swizzle_page(struct page *page)
 void
 i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj)
 {
+	struct scatterlist *sg;
 	int page_count = obj->base.size >> PAGE_SHIFT;
 	int i;
 
 	if (obj->bit_17 == NULL)
 		return;
 
-	for (i = 0; i < page_count; i++) {
-		char new_bit_17 = page_to_phys(obj->pages[i]) >> 17;
+	for_each_sg(obj->pages->sgl, sg, page_count, i) {
+		struct page *page = sg_page(sg);
+		char new_bit_17 = page_to_phys(page) >> 17;
 		if ((new_bit_17 & 0x1) !=
 		    (test_bit(i, obj->bit_17) != 0)) {
-			i915_gem_swizzle_page(obj->pages[i]);
-			set_page_dirty(obj->pages[i]);
+			i915_gem_swizzle_page(page);
+			set_page_dirty(page);
 		}
 	}
 }
@@ -488,6 +490,7 @@ i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj)
 void
 i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj)
 {
+	struct scatterlist *sg;
 	int page_count = obj->base.size >> PAGE_SHIFT;
 	int i;
 
@@ -501,8 +504,9 @@ i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj)
 		}
 	}
 
-	for (i = 0; i < page_count; i++) {
-		if (page_to_phys(obj->pages[i]) & (1 << 17))
+	for_each_sg(obj->pages->sgl, sg, page_count, i) {
+		struct page *page = sg_page(sg);
+		if (page_to_phys(page) & (1 << 17))
 			__set_bit(i, obj->bit_17);
 		else
 			__clear_bit(i, obj->bit_17);
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 505357886bbb..4e9888388c0c 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -295,11 +295,21 @@ static void i915_hotplug_work_func(struct work_struct *work)
 	drm_helper_hpd_irq_event(dev);
 }
 
-static void i915_handle_rps_change(struct drm_device *dev)
+/* defined intel_pm.c */
+extern spinlock_t mchdev_lock;
+
+static void ironlake_handle_rps_change(struct drm_device *dev)
 {
 	drm_i915_private_t *dev_priv = dev->dev_private;
 	u32 busy_up, busy_down, max_avg, min_avg;
-	u8 new_delay = dev_priv->cur_delay;
+	u8 new_delay;
+	unsigned long flags;
+
+	spin_lock_irqsave(&mchdev_lock, flags);
+
+	I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS));
+
+	new_delay = dev_priv->ips.cur_delay;
 
 	I915_WRITE16(MEMINTRSTS, MEMINT_EVAL_CHG);
 	busy_up = I915_READ(RCPREVBSYTUPAVG);
@@ -309,19 +319,21 @@ static void i915_handle_rps_change(struct drm_device *dev)
 
 	/* Handle RCS change request from hw */
 	if (busy_up > max_avg) {
-		if (dev_priv->cur_delay != dev_priv->max_delay)
-			new_delay = dev_priv->cur_delay - 1;
-		if (new_delay < dev_priv->max_delay)
-			new_delay = dev_priv->max_delay;
+		if (dev_priv->ips.cur_delay != dev_priv->ips.max_delay)
+			new_delay = dev_priv->ips.cur_delay - 1;
+		if (new_delay < dev_priv->ips.max_delay)
+			new_delay = dev_priv->ips.max_delay;
 	} else if (busy_down < min_avg) {
-		if (dev_priv->cur_delay != dev_priv->min_delay)
-			new_delay = dev_priv->cur_delay + 1;
-		if (new_delay > dev_priv->min_delay)
-			new_delay = dev_priv->min_delay;
+		if (dev_priv->ips.cur_delay != dev_priv->ips.min_delay)
+			new_delay = dev_priv->ips.cur_delay + 1;
+		if (new_delay > dev_priv->ips.min_delay)
+			new_delay = dev_priv->ips.min_delay;
 	}
 
 	if (ironlake_set_drps(dev, new_delay))
-		dev_priv->cur_delay = new_delay;
+		dev_priv->ips.cur_delay = new_delay;
+
+	spin_unlock_irqrestore(&mchdev_lock, flags);
 
 	return;
 }
@@ -334,7 +346,7 @@ static void notify_ring(struct drm_device *dev,
 	if (ring->obj == NULL)
 		return;
 
-	trace_i915_gem_request_complete(ring, ring->get_seqno(ring));
+	trace_i915_gem_request_complete(ring, ring->get_seqno(ring, false));
 
 	wake_up_all(&ring->irq_queue);
 	if (i915_enable_hangcheck) {
@@ -348,16 +360,16 @@ static void notify_ring(struct drm_device *dev,
 static void gen6_pm_rps_work(struct work_struct *work)
 {
 	drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
-						    rps_work);
+						    rps.work);
 	u32 pm_iir, pm_imr;
 	u8 new_delay;
 
-	spin_lock_irq(&dev_priv->rps_lock);
-	pm_iir = dev_priv->pm_iir;
-	dev_priv->pm_iir = 0;
+	spin_lock_irq(&dev_priv->rps.lock);
+	pm_iir = dev_priv->rps.pm_iir;
+	dev_priv->rps.pm_iir = 0;
 	pm_imr = I915_READ(GEN6_PMIMR);
 	I915_WRITE(GEN6_PMIMR, 0);
-	spin_unlock_irq(&dev_priv->rps_lock);
+	spin_unlock_irq(&dev_priv->rps.lock);
 
 	if ((pm_iir & GEN6_PM_DEFERRED_EVENTS) == 0)
 		return;
@@ -365,11 +377,17 @@ static void gen6_pm_rps_work(struct work_struct *work)
 	mutex_lock(&dev_priv->dev->struct_mutex);
 
 	if (pm_iir & GEN6_PM_RP_UP_THRESHOLD)
-		new_delay = dev_priv->cur_delay + 1;
+		new_delay = dev_priv->rps.cur_delay + 1;
 	else
-		new_delay = dev_priv->cur_delay - 1;
+		new_delay = dev_priv->rps.cur_delay - 1;
 
-	gen6_set_rps(dev_priv->dev, new_delay);
+	/* sysfs frequency interfaces may have snuck in while servicing the
+	 * interrupt
+	 */
+	if (!(new_delay > dev_priv->rps.max_delay ||
+	      new_delay < dev_priv->rps.min_delay)) {
+		gen6_set_rps(dev_priv->dev, new_delay);
+	}
 
 	mutex_unlock(&dev_priv->dev->struct_mutex);
 }
@@ -443,7 +461,7 @@ static void ivybridge_handle_parity_error(struct drm_device *dev)
 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
 	unsigned long flags;
 
-	if (!IS_IVYBRIDGE(dev))
+	if (!HAS_L3_GPU_CACHE(dev))
 		return;
 
 	spin_lock_irqsave(&dev_priv->irq_lock, flags);
@@ -487,19 +505,19 @@ static void gen6_queue_rps_work(struct drm_i915_private *dev_priv,
 	 * IIR bits should never already be set because IMR should
 	 * prevent an interrupt from being shown in IIR. The warning
 	 * displays a case where we've unsafely cleared
-	 * dev_priv->pm_iir. Although missing an interrupt of the same
+	 * dev_priv->rps.pm_iir. Although missing an interrupt of the same
 	 * type is not a problem, it displays a problem in the logic.
 	 *
-	 * The mask bit in IMR is cleared by rps_work.
+	 * The mask bit in IMR is cleared by dev_priv->rps.work.
 	 */
 
-	spin_lock_irqsave(&dev_priv->rps_lock, flags);
-	dev_priv->pm_iir |= pm_iir;
-	I915_WRITE(GEN6_PMIMR, dev_priv->pm_iir);
+	spin_lock_irqsave(&dev_priv->rps.lock, flags);
+	dev_priv->rps.pm_iir |= pm_iir;
+	I915_WRITE(GEN6_PMIMR, dev_priv->rps.pm_iir);
 	POSTING_READ(GEN6_PMIMR);
-	spin_unlock_irqrestore(&dev_priv->rps_lock, flags);
+	spin_unlock_irqrestore(&dev_priv->rps.lock, flags);
 
-	queue_work(dev_priv->wq, &dev_priv->rps_work);
+	queue_work(dev_priv->wq, &dev_priv->rps.work);
 }
 
 static irqreturn_t valleyview_irq_handler(DRM_IRQ_ARGS)
@@ -792,10 +810,8 @@ static irqreturn_t ironlake_irq_handler(DRM_IRQ_ARGS)
 			ibx_irq_handler(dev, pch_iir);
 	}
 
-	if (de_iir & DE_PCU_EVENT) {
-		I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS));
-		i915_handle_rps_change(dev);
-	}
+	if (IS_GEN5(dev) &&  de_iir & DE_PCU_EVENT)
+		ironlake_handle_rps_change(dev);
 
 	if (IS_GEN6(dev) && pm_iir & GEN6_PM_DEFERRED_EVENTS)
 		gen6_queue_rps_work(dev_priv, pm_iir);
@@ -842,26 +858,55 @@ static void i915_error_work_func(struct work_struct *work)
 	}
 }
 
+/* NB: please notice the memset */
+static void i915_get_extra_instdone(struct drm_device *dev,
+				    uint32_t *instdone)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	memset(instdone, 0, sizeof(*instdone) * I915_NUM_INSTDONE_REG);
+
+	switch(INTEL_INFO(dev)->gen) {
+	case 2:
+	case 3:
+		instdone[0] = I915_READ(INSTDONE);
+		break;
+	case 4:
+	case 5:
+	case 6:
+		instdone[0] = I915_READ(INSTDONE_I965);
+		instdone[1] = I915_READ(INSTDONE1);
+		break;
+	default:
+		WARN_ONCE(1, "Unsupported platform\n");
+	case 7:
+		instdone[0] = I915_READ(GEN7_INSTDONE_1);
+		instdone[1] = I915_READ(GEN7_SC_INSTDONE);
+		instdone[2] = I915_READ(GEN7_SAMPLER_INSTDONE);
+		instdone[3] = I915_READ(GEN7_ROW_INSTDONE);
+		break;
+	}
+}
+
 #ifdef CONFIG_DEBUG_FS
 static struct drm_i915_error_object *
 i915_error_object_create(struct drm_i915_private *dev_priv,
 			 struct drm_i915_gem_object *src)
 {
 	struct drm_i915_error_object *dst;
-	int page, page_count;
+	int i, count;
 	u32 reloc_offset;
 
 	if (src == NULL || src->pages == NULL)
 		return NULL;
 
-	page_count = src->base.size / PAGE_SIZE;
+	count = src->base.size / PAGE_SIZE;
 
-	dst = kmalloc(sizeof(*dst) + page_count * sizeof(u32 *), GFP_ATOMIC);
+	dst = kmalloc(sizeof(*dst) + count * sizeof(u32 *), GFP_ATOMIC);
 	if (dst == NULL)
 		return NULL;
 
 	reloc_offset = src->gtt_offset;
-	for (page = 0; page < page_count; page++) {
+	for (i = 0; i < count; i++) {
 		unsigned long flags;
 		void *d;
 
@@ -884,30 +929,33 @@ i915_error_object_create(struct drm_i915_private *dev_priv,
 			memcpy_fromio(d, s, PAGE_SIZE);
 			io_mapping_unmap_atomic(s);
 		} else {
+			struct page *page;
 			void *s;
 
-			drm_clflush_pages(&src->pages[page], 1);
+			page = i915_gem_object_get_page(src, i);
+
+			drm_clflush_pages(&page, 1);
 
-			s = kmap_atomic(src->pages[page]);
+			s = kmap_atomic(page);
 			memcpy(d, s, PAGE_SIZE);
 			kunmap_atomic(s);
 
-			drm_clflush_pages(&src->pages[page], 1);
+			drm_clflush_pages(&page, 1);
 		}
 		local_irq_restore(flags);
 
-		dst->pages[page] = d;
+		dst->pages[i] = d;
 
 		reloc_offset += PAGE_SIZE;
 	}
-	dst->page_count = page_count;
+	dst->page_count = count;
 	dst->gtt_offset = src->gtt_offset;
 
 	return dst;
 
 unwind:
-	while (page--)
-		kfree(dst->pages[page]);
+	while (i--)
+		kfree(dst->pages[i]);
 	kfree(dst);
 	return NULL;
 }
@@ -948,7 +996,8 @@ static void capture_bo(struct drm_i915_error_buffer *err,
 {
 	err->size = obj->base.size;
 	err->name = obj->base.name;
-	err->seqno = obj->last_rendering_seqno;
+	err->rseqno = obj->last_read_seqno;
+	err->wseqno = obj->last_write_seqno;
 	err->gtt_offset = obj->gtt_offset;
 	err->read_domains = obj->base.read_domains;
 	err->write_domain = obj->base.write_domain;
@@ -1038,12 +1087,12 @@ i915_error_first_batchbuffer(struct drm_i915_private *dev_priv,
 	if (!ring->get_seqno)
 		return NULL;
 
-	seqno = ring->get_seqno(ring);
+	seqno = ring->get_seqno(ring, false);
 	list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) {
 		if (obj->ring != ring)
 			continue;
 
-		if (i915_seqno_passed(seqno, obj->last_rendering_seqno))
+		if (i915_seqno_passed(seqno, obj->last_read_seqno))
 			continue;
 
 		if ((obj->base.read_domains & I915_GEM_DOMAIN_COMMAND) == 0)
@@ -1079,10 +1128,8 @@ static void i915_record_ring_state(struct drm_device *dev,
 		error->ipehr[ring->id] = I915_READ(RING_IPEHR(ring->mmio_base));
 		error->instdone[ring->id] = I915_READ(RING_INSTDONE(ring->mmio_base));
 		error->instps[ring->id] = I915_READ(RING_INSTPS(ring->mmio_base));
-		if (ring->id == RCS) {
-			error->instdone1 = I915_READ(INSTDONE1);
+		if (ring->id == RCS)
 			error->bbaddr = I915_READ64(BB_ADDR);
-		}
 	} else {
 		error->faddr[ring->id] = I915_READ(DMA_FADD_I8XX);
 		error->ipeir[ring->id] = I915_READ(IPEIR);
@@ -1092,7 +1139,7 @@ static void i915_record_ring_state(struct drm_device *dev,
 
 	error->waiting[ring->id] = waitqueue_active(&ring->irq_queue);
 	error->instpm[ring->id] = I915_READ(RING_INSTPM(ring->mmio_base));
-	error->seqno[ring->id] = ring->get_seqno(ring);
+	error->seqno[ring->id] = ring->get_seqno(ring, false);
 	error->acthd[ring->id] = intel_ring_get_active_head(ring);
 	error->head[ring->id] = I915_READ_HEAD(ring);
 	error->tail[ring->id] = I915_READ_TAIL(ring);
@@ -1198,6 +1245,11 @@ static void i915_capture_error_state(struct drm_device *dev)
 		error->done_reg = I915_READ(DONE_REG);
 	}
 
+	if (INTEL_INFO(dev)->gen == 7)
+		error->err_int = I915_READ(GEN7_ERR_INT);
+
+	i915_get_extra_instdone(dev, error->extra_instdone);
+
 	i915_gem_record_fences(dev, error);
 	i915_gem_record_rings(dev, error);
 
@@ -1209,7 +1261,7 @@ static void i915_capture_error_state(struct drm_device *dev)
 	list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list)
 		i++;
 	error->active_bo_count = i;
-	list_for_each_entry(obj, &dev_priv->mm.gtt_list, gtt_list)
+	list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list)
 		if (obj->pin_count)
 			i++;
 	error->pinned_bo_count = i - error->active_bo_count;
@@ -1234,7 +1286,7 @@ static void i915_capture_error_state(struct drm_device *dev)
 		error->pinned_bo_count =
 			capture_pinned_bo(error->pinned_bo,
 					  error->pinned_bo_count,
-					  &dev_priv->mm.gtt_list);
+					  &dev_priv->mm.bound_list);
 
 	do_gettimeofday(&error->time);
 
@@ -1273,24 +1325,26 @@ void i915_destroy_error_state(struct drm_device *dev)
 static void i915_report_and_clear_eir(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
+	uint32_t instdone[I915_NUM_INSTDONE_REG];
 	u32 eir = I915_READ(EIR);
-	int pipe;
+	int pipe, i;
 
 	if (!eir)
 		return;
 
 	pr_err("render error detected, EIR: 0x%08x\n", eir);
 
+	i915_get_extra_instdone(dev, instdone);
+
 	if (IS_G4X(dev)) {
 		if (eir & (GM45_ERROR_MEM_PRIV | GM45_ERROR_CP_PRIV)) {
 			u32 ipeir = I915_READ(IPEIR_I965);
 
 			pr_err("  IPEIR: 0x%08x\n", I915_READ(IPEIR_I965));
 			pr_err("  IPEHR: 0x%08x\n", I915_READ(IPEHR_I965));
-			pr_err("  INSTDONE: 0x%08x\n",
-			       I915_READ(INSTDONE_I965));
+			for (i = 0; i < ARRAY_SIZE(instdone); i++)
+				pr_err("  INSTDONE_%d: 0x%08x\n", i, instdone[i]);
 			pr_err("  INSTPS: 0x%08x\n", I915_READ(INSTPS));
-			pr_err("  INSTDONE1: 0x%08x\n", I915_READ(INSTDONE1));
 			pr_err("  ACTHD: 0x%08x\n", I915_READ(ACTHD_I965));
 			I915_WRITE(IPEIR_I965, ipeir);
 			POSTING_READ(IPEIR_I965);
@@ -1324,12 +1378,13 @@ static void i915_report_and_clear_eir(struct drm_device *dev)
 	if (eir & I915_ERROR_INSTRUCTION) {
 		pr_err("instruction error\n");
 		pr_err("  INSTPM: 0x%08x\n", I915_READ(INSTPM));
+		for (i = 0; i < ARRAY_SIZE(instdone); i++)
+			pr_err("  INSTDONE_%d: 0x%08x\n", i, instdone[i]);
 		if (INTEL_INFO(dev)->gen < 4) {
 			u32 ipeir = I915_READ(IPEIR);
 
 			pr_err("  IPEIR: 0x%08x\n", I915_READ(IPEIR));
 			pr_err("  IPEHR: 0x%08x\n", I915_READ(IPEHR));
-			pr_err("  INSTDONE: 0x%08x\n", I915_READ(INSTDONE));
 			pr_err("  ACTHD: 0x%08x\n", I915_READ(ACTHD));
 			I915_WRITE(IPEIR, ipeir);
 			POSTING_READ(IPEIR);
@@ -1338,10 +1393,7 @@ static void i915_report_and_clear_eir(struct drm_device *dev)
 
 			pr_err("  IPEIR: 0x%08x\n", I915_READ(IPEIR_I965));
 			pr_err("  IPEHR: 0x%08x\n", I915_READ(IPEHR_I965));
-			pr_err("  INSTDONE: 0x%08x\n",
-			       I915_READ(INSTDONE_I965));
 			pr_err("  INSTPS: 0x%08x\n", I915_READ(INSTPS));
-			pr_err("  INSTDONE1: 0x%08x\n", I915_READ(INSTDONE1));
 			pr_err("  ACTHD: 0x%08x\n", I915_READ(ACTHD_I965));
 			I915_WRITE(IPEIR_I965, ipeir);
 			POSTING_READ(IPEIR_I965);
@@ -1589,7 +1641,8 @@ ring_last_seqno(struct intel_ring_buffer *ring)
 static bool i915_hangcheck_ring_idle(struct intel_ring_buffer *ring, bool *err)
 {
 	if (list_empty(&ring->request_list) ||
-	    i915_seqno_passed(ring->get_seqno(ring), ring_last_seqno(ring))) {
+	    i915_seqno_passed(ring->get_seqno(ring, false),
+			      ring_last_seqno(ring))) {
 		/* Issue a wake-up to catch stuck h/w. */
 		if (waitqueue_active(&ring->irq_queue)) {
 			DRM_ERROR("Hangcheck timer elapsed... %s idle\n",
@@ -1655,7 +1708,7 @@ void i915_hangcheck_elapsed(unsigned long data)
 {
 	struct drm_device *dev = (struct drm_device *)data;
 	drm_i915_private_t *dev_priv = dev->dev_private;
-	uint32_t acthd[I915_NUM_RINGS], instdone, instdone1;
+	uint32_t acthd[I915_NUM_RINGS], instdone[I915_NUM_INSTDONE_REG];
 	struct intel_ring_buffer *ring;
 	bool err = false, idle;
 	int i;
@@ -1683,25 +1736,16 @@ void i915_hangcheck_elapsed(unsigned long data)
 		return;
 	}
 
-	if (INTEL_INFO(dev)->gen < 4) {
-		instdone = I915_READ(INSTDONE);
-		instdone1 = 0;
-	} else {
-		instdone = I915_READ(INSTDONE_I965);
-		instdone1 = I915_READ(INSTDONE1);
-	}
-
+	i915_get_extra_instdone(dev, instdone);
 	if (memcmp(dev_priv->last_acthd, acthd, sizeof(acthd)) == 0 &&
-	    dev_priv->last_instdone == instdone &&
-	    dev_priv->last_instdone1 == instdone1) {
+	    memcmp(dev_priv->prev_instdone, instdone, sizeof(instdone)) == 0) {
 		if (i915_hangcheck_hung(dev))
 			return;
 	} else {
 		dev_priv->hangcheck_count = 0;
 
 		memcpy(dev_priv->last_acthd, acthd, sizeof(acthd));
-		dev_priv->last_instdone = instdone;
-		dev_priv->last_instdone1 = instdone1;
+		memcpy(dev_priv->prev_instdone, instdone, sizeof(instdone));
 	}
 
 repeat:
@@ -2646,7 +2690,7 @@ void intel_irq_init(struct drm_device *dev)
 
 	INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
 	INIT_WORK(&dev_priv->error_work, i915_error_work_func);
-	INIT_WORK(&dev_priv->rps_work, gen6_pm_rps_work);
+	INIT_WORK(&dev_priv->rps.work, gen6_pm_rps_work);
 	INIT_WORK(&dev_priv->parity_error_work, ivybridge_parity_work);
 
 	dev->driver->get_vblank_counter = i915_get_vblank_counter;
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 28725ce5b82c..7637824c6a7d 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -450,6 +450,7 @@
 #define RING_ACTHD(base)	((base)+0x74)
 #define RING_NOPID(base)	((base)+0x94)
 #define RING_IMR(base)		((base)+0xa8)
+#define RING_TIMESTAMP(base)	((base)+0x358)
 #define   TAIL_ADDR		0x001FFFF8
 #define   HEAD_WRAP_COUNT	0xFFE00000
 #define   HEAD_WRAP_ONE		0x00200000
@@ -478,6 +479,11 @@
 #define IPEIR_I965	0x02064
 #define IPEHR_I965	0x02068
 #define INSTDONE_I965	0x0206c
+#define GEN7_INSTDONE_1		0x0206c
+#define GEN7_SC_INSTDONE	0x07100
+#define GEN7_SAMPLER_INSTDONE	0x0e160
+#define GEN7_ROW_INSTDONE	0x0e164
+#define I915_NUM_INSTDONE_REG	4
 #define RING_IPEIR(base)	((base)+0x64)
 #define RING_IPEHR(base)	((base)+0x68)
 #define RING_INSTDONE(base)	((base)+0x6c)
@@ -500,6 +506,8 @@
 #define DMA_FADD_I8XX	0x020d0
 
 #define ERROR_GEN6	0x040a0
+#define GEN7_ERR_INT	0x44040
+#define   ERR_INT_MMIO_UNCLAIMED (1<<13)
 
 /* GM45+ chicken bits -- debug workaround bits that may be required
  * for various sorts of correct behavior.  The top 16 bits of each are
@@ -529,6 +537,8 @@
 #define   GFX_PSMI_GRANULARITY		(1<<10)
 #define   GFX_PPGTT_ENABLE		(1<<9)
 
+#define VLV_DISPLAY_BASE 0x180000
+
 #define SCPD0		0x0209c /* 915+ only */
 #define IER		0x020a0
 #define IIR		0x020a4
@@ -1496,6 +1506,14 @@
 					 GEN7_CXT_EXTENDED_SIZE(ctx_reg) + \
 					 GEN7_CXT_GT1_SIZE(ctx_reg) + \
 					 GEN7_CXT_VFSTATE_SIZE(ctx_reg))
+#define HSW_CXT_POWER_SIZE(ctx_reg)	((ctx_reg >> 26) & 0x3f)
+#define HSW_CXT_RING_SIZE(ctx_reg)	((ctx_reg >> 23) & 0x7)
+#define HSW_CXT_RENDER_SIZE(ctx_reg)	((ctx_reg >> 15) & 0xff)
+#define HSW_CXT_TOTAL_SIZE(ctx_reg)	(HSW_CXT_POWER_SIZE(ctx_reg) + \
+					 HSW_CXT_RING_SIZE(ctx_reg) + \
+					 HSW_CXT_RENDER_SIZE(ctx_reg) + \
+					 GEN7_CXT_VFSTATE_SIZE(ctx_reg))
+
 
 /*
  * Overlay regs
@@ -1549,12 +1567,35 @@
 
 /* VGA port control */
 #define ADPA			0x61100
+#define PCH_ADPA                0xe1100
+#define VLV_ADPA		(VLV_DISPLAY_BASE + ADPA)
+
 #define   ADPA_DAC_ENABLE	(1<<31)
 #define   ADPA_DAC_DISABLE	0
 #define   ADPA_PIPE_SELECT_MASK	(1<<30)
 #define   ADPA_PIPE_A_SELECT	0
 #define   ADPA_PIPE_B_SELECT	(1<<30)
 #define   ADPA_PIPE_SELECT(pipe) ((pipe) << 30)
+/* CPT uses bits 29:30 for pch transcoder select */
+#define   ADPA_CRT_HOTPLUG_MASK  0x03ff0000 /* bit 25-16 */
+#define   ADPA_CRT_HOTPLUG_MONITOR_NONE  (0<<24)
+#define   ADPA_CRT_HOTPLUG_MONITOR_MASK  (3<<24)
+#define   ADPA_CRT_HOTPLUG_MONITOR_COLOR (3<<24)
+#define   ADPA_CRT_HOTPLUG_MONITOR_MONO  (2<<24)
+#define   ADPA_CRT_HOTPLUG_ENABLE        (1<<23)
+#define   ADPA_CRT_HOTPLUG_PERIOD_64     (0<<22)
+#define   ADPA_CRT_HOTPLUG_PERIOD_128    (1<<22)
+#define   ADPA_CRT_HOTPLUG_WARMUP_5MS    (0<<21)
+#define   ADPA_CRT_HOTPLUG_WARMUP_10MS   (1<<21)
+#define   ADPA_CRT_HOTPLUG_SAMPLE_2S     (0<<20)
+#define   ADPA_CRT_HOTPLUG_SAMPLE_4S     (1<<20)
+#define   ADPA_CRT_HOTPLUG_VOLTAGE_40    (0<<18)
+#define   ADPA_CRT_HOTPLUG_VOLTAGE_50    (1<<18)
+#define   ADPA_CRT_HOTPLUG_VOLTAGE_60    (2<<18)
+#define   ADPA_CRT_HOTPLUG_VOLTAGE_70    (3<<18)
+#define   ADPA_CRT_HOTPLUG_VOLREF_325MV  (0<<17)
+#define   ADPA_CRT_HOTPLUG_VOLREF_475MV  (1<<17)
+#define   ADPA_CRT_HOTPLUG_FORCE_TRIGGER (1<<16)
 #define   ADPA_USE_VGA_HVPOLARITY (1<<15)
 #define   ADPA_SETS_HVPOLARITY	0
 #define   ADPA_VSYNC_CNTL_DISABLE (1<<11)
@@ -1753,6 +1794,10 @@
 
 /* Video Data Island Packet control */
 #define VIDEO_DIP_DATA		0x61178
+/* Read the description of VIDEO_DIP_DATA (before Haswel) or VIDEO_DIP_ECC
+ * (Haswell and newer) to see which VIDEO_DIP_DATA byte corresponds to each byte
+ * of the infoframe structure specified by CEA-861. */
+#define   VIDEO_DIP_DATA_SIZE	32
 #define VIDEO_DIP_CTL		0x61170
 /* Pre HSW: */
 #define   VIDEO_DIP_ENABLE		(1 << 31)
@@ -3889,31 +3934,6 @@
 #define FDI_PLL_CTL_1           0xfe000
 #define FDI_PLL_CTL_2           0xfe004
 
-/* CRT */
-#define PCH_ADPA                0xe1100
-#define  ADPA_TRANS_SELECT_MASK (1<<30)
-#define  ADPA_TRANS_A_SELECT    0
-#define  ADPA_TRANS_B_SELECT    (1<<30)
-#define  ADPA_CRT_HOTPLUG_MASK  0x03ff0000 /* bit 25-16 */
-#define  ADPA_CRT_HOTPLUG_MONITOR_NONE  (0<<24)
-#define  ADPA_CRT_HOTPLUG_MONITOR_MASK  (3<<24)
-#define  ADPA_CRT_HOTPLUG_MONITOR_COLOR (3<<24)
-#define  ADPA_CRT_HOTPLUG_MONITOR_MONO  (2<<24)
-#define  ADPA_CRT_HOTPLUG_ENABLE        (1<<23)
-#define  ADPA_CRT_HOTPLUG_PERIOD_64     (0<<22)
-#define  ADPA_CRT_HOTPLUG_PERIOD_128    (1<<22)
-#define  ADPA_CRT_HOTPLUG_WARMUP_5MS    (0<<21)
-#define  ADPA_CRT_HOTPLUG_WARMUP_10MS   (1<<21)
-#define  ADPA_CRT_HOTPLUG_SAMPLE_2S     (0<<20)
-#define  ADPA_CRT_HOTPLUG_SAMPLE_4S     (1<<20)
-#define  ADPA_CRT_HOTPLUG_VOLTAGE_40    (0<<18)
-#define  ADPA_CRT_HOTPLUG_VOLTAGE_50    (1<<18)
-#define  ADPA_CRT_HOTPLUG_VOLTAGE_60    (2<<18)
-#define  ADPA_CRT_HOTPLUG_VOLTAGE_70    (3<<18)
-#define  ADPA_CRT_HOTPLUG_VOLREF_325MV  (0<<17)
-#define  ADPA_CRT_HOTPLUG_VOLREF_475MV  (1<<17)
-#define  ADPA_CRT_HOTPLUG_FORCE_TRIGGER (1<<16)
-
 /* or SDVOB */
 #define HDMIB   0xe1140
 #define  PORT_ENABLE    (1 << 31)
@@ -4021,6 +4041,8 @@
 #define  PORT_TRANS_C_SEL_CPT	(2<<29)
 #define  PORT_TRANS_SEL_MASK	(3<<29)
 #define  PORT_TRANS_SEL_CPT(pipe)	((pipe) << 29)
+#define  PORT_TO_PIPE(val)	(((val) & (1<<30)) >> 30)
+#define  PORT_TO_PIPE_CPT(val)	(((val) & PORT_TRANS_SEL_MASK) >> 29)
 
 #define TRANS_DP_CTL_A		0xe0300
 #define TRANS_DP_CTL_B		0xe1300
@@ -4239,7 +4261,15 @@
 #define G4X_HDMIW_HDMIEDID		0x6210C
 
 #define IBX_HDMIW_HDMIEDID_A		0xE2050
+#define IBX_HDMIW_HDMIEDID_B		0xE2150
+#define IBX_HDMIW_HDMIEDID(pipe) _PIPE(pipe, \
+					IBX_HDMIW_HDMIEDID_A, \
+					IBX_HDMIW_HDMIEDID_B)
 #define IBX_AUD_CNTL_ST_A		0xE20B4
+#define IBX_AUD_CNTL_ST_B		0xE21B4
+#define IBX_AUD_CNTL_ST(pipe) _PIPE(pipe, \
+					IBX_AUD_CNTL_ST_A, \
+					IBX_AUD_CNTL_ST_B)
 #define IBX_ELD_BUFFER_SIZE		(0x1f << 10)
 #define IBX_ELD_ADDRESS			(0x1f << 5)
 #define IBX_ELD_ACK			(1 << 4)
@@ -4248,7 +4278,15 @@
 #define IBX_CP_READYB			(1 << 1)
 
 #define CPT_HDMIW_HDMIEDID_A		0xE5050
+#define CPT_HDMIW_HDMIEDID_B		0xE5150
+#define CPT_HDMIW_HDMIEDID(pipe) _PIPE(pipe, \
+					CPT_HDMIW_HDMIEDID_A, \
+					CPT_HDMIW_HDMIEDID_B)
 #define CPT_AUD_CNTL_ST_A		0xE50B4
+#define CPT_AUD_CNTL_ST_B		0xE51B4
+#define CPT_AUD_CNTL_ST(pipe) _PIPE(pipe, \
+					CPT_AUD_CNTL_ST_A, \
+					CPT_AUD_CNTL_ST_B)
 #define CPT_AUD_CNTRL_ST2		0xE50C0
 
 /* These are the 4 32-bit write offset registers for each stream
@@ -4258,7 +4296,15 @@
 #define GEN7_SO_WRITE_OFFSET(n)		(0x5280 + (n) * 4)
 
 #define IBX_AUD_CONFIG_A			0xe2000
+#define IBX_AUD_CONFIG_B			0xe2100
+#define IBX_AUD_CFG(pipe) _PIPE(pipe, \
+					IBX_AUD_CONFIG_A, \
+					IBX_AUD_CONFIG_B)
 #define CPT_AUD_CONFIG_A			0xe5000
+#define CPT_AUD_CONFIG_B			0xe5100
+#define CPT_AUD_CFG(pipe) _PIPE(pipe, \
+					CPT_AUD_CONFIG_A, \
+					CPT_AUD_CONFIG_B)
 #define   AUD_CONFIG_N_VALUE_INDEX		(1 << 29)
 #define   AUD_CONFIG_N_PROG_ENABLE		(1 << 28)
 #define   AUD_CONFIG_UPPER_N_SHIFT		20
@@ -4269,195 +4315,233 @@
 #define   AUD_CONFIG_PIXEL_CLOCK_HDMI		(0xf << 16)
 #define   AUD_CONFIG_DISABLE_NCTS		(1 << 3)
 
+/* HSW Audio */
+#define   HSW_AUD_CONFIG_A		0x65000 /* Audio Configuration Transcoder A */
+#define   HSW_AUD_CONFIG_B		0x65100 /* Audio Configuration Transcoder B */
+#define   HSW_AUD_CFG(pipe) _PIPE(pipe, \
+					HSW_AUD_CONFIG_A, \
+					HSW_AUD_CONFIG_B)
+
+#define   HSW_AUD_MISC_CTRL_A		0x65010 /* Audio Misc Control Convert 1 */
+#define   HSW_AUD_MISC_CTRL_B		0x65110 /* Audio Misc Control Convert 2 */
+#define   HSW_AUD_MISC_CTRL(pipe) _PIPE(pipe, \
+					HSW_AUD_MISC_CTRL_A, \
+					HSW_AUD_MISC_CTRL_B)
+
+#define   HSW_AUD_DIP_ELD_CTRL_ST_A	0x650b4 /* Audio DIP and ELD Control State Transcoder A */
+#define   HSW_AUD_DIP_ELD_CTRL_ST_B	0x651b4 /* Audio DIP and ELD Control State Transcoder B */
+#define   HSW_AUD_DIP_ELD_CTRL(pipe) _PIPE(pipe, \
+					HSW_AUD_DIP_ELD_CTRL_ST_A, \
+					HSW_AUD_DIP_ELD_CTRL_ST_B)
+
+/* Audio Digital Converter */
+#define   HSW_AUD_DIG_CNVT_1		0x65080 /* Audio Converter 1 */
+#define   HSW_AUD_DIG_CNVT_2		0x65180 /* Audio Converter 1 */
+#define   AUD_DIG_CNVT(pipe) _PIPE(pipe, \
+					HSW_AUD_DIG_CNVT_1, \
+					HSW_AUD_DIG_CNVT_2)
+#define   DIP_PORT_SEL_MASK		0x3
+
+#define   HSW_AUD_EDID_DATA_A		0x65050
+#define   HSW_AUD_EDID_DATA_B		0x65150
+#define   HSW_AUD_EDID_DATA(pipe) _PIPE(pipe, \
+					HSW_AUD_EDID_DATA_A, \
+					HSW_AUD_EDID_DATA_B)
+
+#define   HSW_AUD_PIPE_CONV_CFG		0x6507c /* Audio pipe and converter configs */
+#define   HSW_AUD_PIN_ELD_CP_VLD	0x650c0 /* Audio ELD and CP Ready Status */
+#define   AUDIO_INACTIVE_C		(1<<11)
+#define   AUDIO_INACTIVE_B		(1<<7)
+#define   AUDIO_INACTIVE_A		(1<<3)
+#define   AUDIO_OUTPUT_ENABLE_A		(1<<2)
+#define   AUDIO_OUTPUT_ENABLE_B		(1<<6)
+#define   AUDIO_OUTPUT_ENABLE_C		(1<<10)
+#define   AUDIO_ELD_VALID_A		(1<<0)
+#define   AUDIO_ELD_VALID_B		(1<<4)
+#define   AUDIO_ELD_VALID_C		(1<<8)
+#define   AUDIO_CP_READY_A		(1<<1)
+#define   AUDIO_CP_READY_B		(1<<5)
+#define   AUDIO_CP_READY_C		(1<<9)
+
 /* HSW Power Wells */
-#define HSW_PWR_WELL_CTL1		0x45400		/* BIOS */
-#define HSW_PWR_WELL_CTL2		0x45404		/* Driver */
-#define HSW_PWR_WELL_CTL3		0x45408		/* KVMR */
-#define HSW_PWR_WELL_CTL4		0x4540C		/* Debug */
-#define   HSW_PWR_WELL_ENABLE				(1<<31)
-#define   HSW_PWR_WELL_STATE				(1<<30)
-#define HSW_PWR_WELL_CTL5		0x45410
+#define HSW_PWR_WELL_CTL1			0x45400 /* BIOS */
+#define HSW_PWR_WELL_CTL2			0x45404 /* Driver */
+#define HSW_PWR_WELL_CTL3			0x45408 /* KVMR */
+#define HSW_PWR_WELL_CTL4			0x4540C /* Debug */
+#define   HSW_PWR_WELL_ENABLE			(1<<31)
+#define   HSW_PWR_WELL_STATE			(1<<30)
+#define HSW_PWR_WELL_CTL5			0x45410
 #define   HSW_PWR_WELL_ENABLE_SINGLE_STEP	(1<<31)
 #define   HSW_PWR_WELL_PWR_GATE_OVERRIDE	(1<<20)
-#define   HSW_PWR_WELL_FORCE_ON				(1<<19)
-#define HSW_PWR_WELL_CTL6		0x45414
+#define   HSW_PWR_WELL_FORCE_ON			(1<<19)
+#define HSW_PWR_WELL_CTL6			0x45414
 
 /* Per-pipe DDI Function Control */
-#define PIPE_DDI_FUNC_CTL_A			0x60400
-#define PIPE_DDI_FUNC_CTL_B			0x61400
-#define PIPE_DDI_FUNC_CTL_C			0x62400
+#define PIPE_DDI_FUNC_CTL_A		0x60400
+#define PIPE_DDI_FUNC_CTL_B		0x61400
+#define PIPE_DDI_FUNC_CTL_C		0x62400
 #define PIPE_DDI_FUNC_CTL_EDP		0x6F400
-#define DDI_FUNC_CTL(pipe) _PIPE(pipe, \
-					PIPE_DDI_FUNC_CTL_A, \
-					PIPE_DDI_FUNC_CTL_B)
+#define DDI_FUNC_CTL(pipe) _PIPE(pipe, PIPE_DDI_FUNC_CTL_A, \
+				       PIPE_DDI_FUNC_CTL_B)
 #define  PIPE_DDI_FUNC_ENABLE		(1<<31)
 /* Those bits are ignored by pipe EDP since it can only connect to DDI A */
-#define  PIPE_DDI_PORT_MASK			(7<<28)
-#define  PIPE_DDI_SELECT_PORT(x)		((x)<<28)
-#define  PIPE_DDI_MODE_SELECT_HDMI		(0<<24)
-#define  PIPE_DDI_MODE_SELECT_DVI		(1<<24)
+#define  PIPE_DDI_PORT_MASK		(7<<28)
+#define  PIPE_DDI_SELECT_PORT(x)	((x)<<28)
+#define  PIPE_DDI_MODE_SELECT_MASK	(7<<24)
+#define  PIPE_DDI_MODE_SELECT_HDMI	(0<<24)
+#define  PIPE_DDI_MODE_SELECT_DVI	(1<<24)
 #define  PIPE_DDI_MODE_SELECT_DP_SST	(2<<24)
 #define  PIPE_DDI_MODE_SELECT_DP_MST	(3<<24)
-#define  PIPE_DDI_MODE_SELECT_FDI		(4<<24)
-#define  PIPE_DDI_BPC_8					(0<<20)
-#define  PIPE_DDI_BPC_10				(1<<20)
-#define  PIPE_DDI_BPC_6					(2<<20)
-#define  PIPE_DDI_BPC_12				(3<<20)
-#define  PIPE_DDI_BFI_ENABLE			(1<<4)
-#define  PIPE_DDI_PORT_WIDTH_X1			(0<<1)
-#define  PIPE_DDI_PORT_WIDTH_X2			(1<<1)
-#define  PIPE_DDI_PORT_WIDTH_X4			(3<<1)
+#define  PIPE_DDI_MODE_SELECT_FDI	(4<<24)
+#define  PIPE_DDI_BPC_MASK		(7<<20)
+#define  PIPE_DDI_BPC_8			(0<<20)
+#define  PIPE_DDI_BPC_10		(1<<20)
+#define  PIPE_DDI_BPC_6			(2<<20)
+#define  PIPE_DDI_BPC_12		(3<<20)
+#define  PIPE_DDI_PVSYNC		(1<<17)
+#define  PIPE_DDI_PHSYNC		(1<<16)
+#define  PIPE_DDI_BFI_ENABLE		(1<<4)
+#define  PIPE_DDI_PORT_WIDTH_X1		(0<<1)
+#define  PIPE_DDI_PORT_WIDTH_X2		(1<<1)
+#define  PIPE_DDI_PORT_WIDTH_X4		(3<<1)
 
 /* DisplayPort Transport Control */
 #define DP_TP_CTL_A			0x64040
 #define DP_TP_CTL_B			0x64140
-#define DP_TP_CTL(port) _PORT(port, \
-					DP_TP_CTL_A, \
-					DP_TP_CTL_B)
-#define  DP_TP_CTL_ENABLE		(1<<31)
-#define  DP_TP_CTL_MODE_SST	(0<<27)
-#define  DP_TP_CTL_MODE_MST	(1<<27)
+#define DP_TP_CTL(port) _PORT(port, DP_TP_CTL_A, DP_TP_CTL_B)
+#define  DP_TP_CTL_ENABLE			(1<<31)
+#define  DP_TP_CTL_MODE_SST			(0<<27)
+#define  DP_TP_CTL_MODE_MST			(1<<27)
 #define  DP_TP_CTL_ENHANCED_FRAME_ENABLE	(1<<18)
-#define  DP_TP_CTL_FDI_AUTOTRAIN	(1<<15)
+#define  DP_TP_CTL_FDI_AUTOTRAIN		(1<<15)
 #define  DP_TP_CTL_LINK_TRAIN_MASK		(7<<8)
 #define  DP_TP_CTL_LINK_TRAIN_PAT1		(0<<8)
 #define  DP_TP_CTL_LINK_TRAIN_PAT2		(1<<8)
-#define  DP_TP_CTL_LINK_TRAIN_NORMAL	(3<<8)
+#define  DP_TP_CTL_LINK_TRAIN_NORMAL		(3<<8)
 
 /* DisplayPort Transport Status */
 #define DP_TP_STATUS_A			0x64044
 #define DP_TP_STATUS_B			0x64144
-#define DP_TP_STATUS(port) _PORT(port, \
-					DP_TP_STATUS_A, \
-					DP_TP_STATUS_B)
+#define DP_TP_STATUS(port) _PORT(port, DP_TP_STATUS_A, DP_TP_STATUS_B)
 #define  DP_TP_STATUS_AUTOTRAIN_DONE	(1<<12)
 
 /* DDI Buffer Control */
 #define DDI_BUF_CTL_A				0x64000
 #define DDI_BUF_CTL_B				0x64100
-#define DDI_BUF_CTL(port) _PORT(port, \
-					DDI_BUF_CTL_A, \
-					DDI_BUF_CTL_B)
-#define  DDI_BUF_CTL_ENABLE				(1<<31)
+#define DDI_BUF_CTL(port) _PORT(port, DDI_BUF_CTL_A, DDI_BUF_CTL_B)
+#define  DDI_BUF_CTL_ENABLE			(1<<31)
 #define  DDI_BUF_EMP_400MV_0DB_HSW		(0<<24)   /* Sel0 */
-#define  DDI_BUF_EMP_400MV_3_5DB_HSW	(1<<24)   /* Sel1 */
+#define  DDI_BUF_EMP_400MV_3_5DB_HSW		(1<<24)   /* Sel1 */
 #define  DDI_BUF_EMP_400MV_6DB_HSW		(2<<24)   /* Sel2 */
-#define  DDI_BUF_EMP_400MV_9_5DB_HSW	(3<<24)   /* Sel3 */
+#define  DDI_BUF_EMP_400MV_9_5DB_HSW		(3<<24)   /* Sel3 */
 #define  DDI_BUF_EMP_600MV_0DB_HSW		(4<<24)   /* Sel4 */
-#define  DDI_BUF_EMP_600MV_3_5DB_HSW	(5<<24)   /* Sel5 */
+#define  DDI_BUF_EMP_600MV_3_5DB_HSW		(5<<24)   /* Sel5 */
 #define  DDI_BUF_EMP_600MV_6DB_HSW		(6<<24)   /* Sel6 */
 #define  DDI_BUF_EMP_800MV_0DB_HSW		(7<<24)   /* Sel7 */
-#define  DDI_BUF_EMP_800MV_3_5DB_HSW	(8<<24)   /* Sel8 */
-#define  DDI_BUF_EMP_MASK				(0xf<<24)
-#define  DDI_BUF_IS_IDLE				(1<<7)
-#define  DDI_PORT_WIDTH_X1				(0<<1)
-#define  DDI_PORT_WIDTH_X2				(1<<1)
-#define  DDI_PORT_WIDTH_X4				(3<<1)
+#define  DDI_BUF_EMP_800MV_3_5DB_HSW		(8<<24)   /* Sel8 */
+#define  DDI_BUF_EMP_MASK			(0xf<<24)
+#define  DDI_BUF_IS_IDLE			(1<<7)
+#define  DDI_PORT_WIDTH_X1			(0<<1)
+#define  DDI_PORT_WIDTH_X2			(1<<1)
+#define  DDI_PORT_WIDTH_X4			(3<<1)
 #define  DDI_INIT_DISPLAY_DETECTED		(1<<0)
 
 /* DDI Buffer Translations */
 #define DDI_BUF_TRANS_A				0x64E00
 #define DDI_BUF_TRANS_B				0x64E60
-#define DDI_BUF_TRANS(port) _PORT(port, \
-					DDI_BUF_TRANS_A, \
-					DDI_BUF_TRANS_B)
+#define DDI_BUF_TRANS(port) _PORT(port, DDI_BUF_TRANS_A, DDI_BUF_TRANS_B)
 
 /* Sideband Interface (SBI) is programmed indirectly, via
  * SBI_ADDR, which contains the register offset; and SBI_DATA,
  * which contains the payload */
-#define SBI_ADDR				0xC6000
-#define SBI_DATA				0xC6004
+#define SBI_ADDR			0xC6000
+#define SBI_DATA			0xC6004
 #define SBI_CTL_STAT			0xC6008
 #define  SBI_CTL_OP_CRRD		(0x6<<8)
 #define  SBI_CTL_OP_CRWR		(0x7<<8)
 #define  SBI_RESPONSE_FAIL		(0x1<<1)
-#define  SBI_RESPONSE_SUCCESS	(0x0<<1)
-#define  SBI_BUSY				(0x1<<0)
-#define  SBI_READY				(0x0<<0)
+#define  SBI_RESPONSE_SUCCESS		(0x0<<1)
+#define  SBI_BUSY			(0x1<<0)
+#define  SBI_READY			(0x0<<0)
 
 /* SBI offsets */
-#define  SBI_SSCDIVINTPHASE6		0x0600
+#define  SBI_SSCDIVINTPHASE6			0x0600
 #define   SBI_SSCDIVINTPHASE_DIVSEL_MASK	((0x7f)<<1)
 #define   SBI_SSCDIVINTPHASE_DIVSEL(x)		((x)<<1)
 #define   SBI_SSCDIVINTPHASE_INCVAL_MASK	((0x7f)<<8)
 #define   SBI_SSCDIVINTPHASE_INCVAL(x)		((x)<<8)
-#define   SBI_SSCDIVINTPHASE_DIR(x)			((x)<<15)
+#define   SBI_SSCDIVINTPHASE_DIR(x)		((x)<<15)
 #define   SBI_SSCDIVINTPHASE_PROPAGATE		(1<<0)
-#define  SBI_SSCCTL					0x020c
+#define  SBI_SSCCTL				0x020c
 #define  SBI_SSCCTL6				0x060C
-#define   SBI_SSCCTL_DISABLE		(1<<0)
+#define   SBI_SSCCTL_DISABLE			(1<<0)
 #define  SBI_SSCAUXDIV6				0x0610
 #define   SBI_SSCAUXDIV_FINALDIV2SEL(x)		((x)<<4)
-#define  SBI_DBUFF0					0x2a00
+#define  SBI_DBUFF0				0x2a00
 
 /* LPT PIXCLK_GATE */
-#define PIXCLK_GATE				0xC6020
-#define  PIXCLK_GATE_UNGATE		1<<0
-#define  PIXCLK_GATE_GATE		0<<0
+#define PIXCLK_GATE			0xC6020
+#define  PIXCLK_GATE_UNGATE		(1<<0)
+#define  PIXCLK_GATE_GATE		(0<<0)
 
 /* SPLL */
-#define SPLL_CTL				0x46020
+#define SPLL_CTL			0x46020
 #define  SPLL_PLL_ENABLE		(1<<31)
 #define  SPLL_PLL_SCC			(1<<28)
 #define  SPLL_PLL_NON_SCC		(2<<28)
-#define  SPLL_PLL_FREQ_810MHz	(0<<26)
-#define  SPLL_PLL_FREQ_1350MHz	(1<<26)
+#define  SPLL_PLL_FREQ_810MHz		(0<<26)
+#define  SPLL_PLL_FREQ_1350MHz		(1<<26)
 
 /* WRPLL */
-#define WRPLL_CTL1				0x46040
-#define WRPLL_CTL2				0x46060
-#define  WRPLL_PLL_ENABLE				(1<<31)
-#define  WRPLL_PLL_SELECT_SSC			(0x01<<28)
-#define  WRPLL_PLL_SELECT_NON_SCC		(0x02<<28)
+#define WRPLL_CTL1			0x46040
+#define WRPLL_CTL2			0x46060
+#define  WRPLL_PLL_ENABLE		(1<<31)
+#define  WRPLL_PLL_SELECT_SSC		(0x01<<28)
+#define  WRPLL_PLL_SELECT_NON_SCC	(0x02<<28)
 #define  WRPLL_PLL_SELECT_LCPLL_2700	(0x03<<28)
 /* WRPLL divider programming */
-#define  WRPLL_DIVIDER_REFERENCE(x)		((x)<<0)
-#define  WRPLL_DIVIDER_POST(x)			((x)<<8)
-#define  WRPLL_DIVIDER_FEEDBACK(x)		((x)<<16)
+#define  WRPLL_DIVIDER_REFERENCE(x)	((x)<<0)
+#define  WRPLL_DIVIDER_POST(x)		((x)<<8)
+#define  WRPLL_DIVIDER_FEEDBACK(x)	((x)<<16)
 
 /* Port clock selection */
 #define PORT_CLK_SEL_A			0x46100
 #define PORT_CLK_SEL_B			0x46104
-#define PORT_CLK_SEL(port) _PORT(port, \
-					PORT_CLK_SEL_A, \
-					PORT_CLK_SEL_B)
+#define PORT_CLK_SEL(port) _PORT(port, PORT_CLK_SEL_A, PORT_CLK_SEL_B)
 #define  PORT_CLK_SEL_LCPLL_2700	(0<<29)
 #define  PORT_CLK_SEL_LCPLL_1350	(1<<29)
 #define  PORT_CLK_SEL_LCPLL_810		(2<<29)
-#define  PORT_CLK_SEL_SPLL			(3<<29)
+#define  PORT_CLK_SEL_SPLL		(3<<29)
 #define  PORT_CLK_SEL_WRPLL1		(4<<29)
 #define  PORT_CLK_SEL_WRPLL2		(5<<29)
 
 /* Pipe clock selection */
 #define PIPE_CLK_SEL_A			0x46140
 #define PIPE_CLK_SEL_B			0x46144
-#define PIPE_CLK_SEL(pipe) _PIPE(pipe, \
-					PIPE_CLK_SEL_A, \
-					PIPE_CLK_SEL_B)
+#define PIPE_CLK_SEL(pipe) _PIPE(pipe, PIPE_CLK_SEL_A, PIPE_CLK_SEL_B)
 /* For each pipe, we need to select the corresponding port clock */
-#define  PIPE_CLK_SEL_DISABLED	(0x0<<29)
-#define  PIPE_CLK_SEL_PORT(x)	((x+1)<<29)
+#define  PIPE_CLK_SEL_DISABLED		(0x0<<29)
+#define  PIPE_CLK_SEL_PORT(x)		((x+1)<<29)
 
 /* LCPLL Control */
-#define LCPLL_CTL				0x130040
+#define LCPLL_CTL			0x130040
 #define  LCPLL_PLL_DISABLE		(1<<31)
 #define  LCPLL_PLL_LOCK			(1<<30)
-#define  LCPLL_CD_CLOCK_DISABLE	(1<<25)
+#define  LCPLL_CD_CLOCK_DISABLE		(1<<25)
 #define  LCPLL_CD2X_CLOCK_DISABLE	(1<<23)
 
 /* Pipe WM_LINETIME - watermark line time */
 #define PIPE_WM_LINETIME_A		0x45270
 #define PIPE_WM_LINETIME_B		0x45274
-#define PIPE_WM_LINETIME(pipe) _PIPE(pipe, \
-					PIPE_WM_LINETIME_A, \
-					PIPE_WM_LINETIME_B)
-#define   PIPE_WM_LINETIME_MASK		(0x1ff)
-#define   PIPE_WM_LINETIME_TIME(x)			((x))
+#define PIPE_WM_LINETIME(pipe) _PIPE(pipe, PIPE_WM_LINETIME_A, \
+					   PIPE_WM_LINETIME_B)
+#define   PIPE_WM_LINETIME_MASK			(0x1ff)
+#define   PIPE_WM_LINETIME_TIME(x)		((x))
 #define   PIPE_WM_LINETIME_IPS_LINETIME_MASK	(0x1ff<<16)
-#define   PIPE_WM_LINETIME_IPS_LINETIME(x)		((x)<<16)
+#define   PIPE_WM_LINETIME_IPS_LINETIME(x)	((x)<<16)
 
 /* SFUSE_STRAP */
-#define SFUSE_STRAP				0xc2014
+#define SFUSE_STRAP			0xc2014
 #define  SFUSE_STRAP_DDIB_DETECTED	(1<<2)
 #define  SFUSE_STRAP_DDIC_DETECTED	(1<<1)
 #define  SFUSE_STRAP_DDID_DETECTED	(1<<0)
diff --git a/drivers/gpu/drm/i915/i915_sysfs.c b/drivers/gpu/drm/i915/i915_sysfs.c
index 7631807a2788..903eebd2117a 100644
--- a/drivers/gpu/drm/i915/i915_sysfs.c
+++ b/drivers/gpu/drm/i915/i915_sysfs.c
@@ -46,32 +46,32 @@ static u32 calc_residency(struct drm_device *dev, const u32 reg)
 }
 
 static ssize_t
-show_rc6_mask(struct device *dev, struct device_attribute *attr, char *buf)
+show_rc6_mask(struct device *kdev, struct device_attribute *attr, char *buf)
 {
-	struct drm_minor *dminor = container_of(dev, struct drm_minor, kdev);
+	struct drm_minor *dminor = container_of(kdev, struct drm_minor, kdev);
 	return snprintf(buf, PAGE_SIZE, "%x", intel_enable_rc6(dminor->dev));
 }
 
 static ssize_t
-show_rc6_ms(struct device *dev, struct device_attribute *attr, char *buf)
+show_rc6_ms(struct device *kdev, struct device_attribute *attr, char *buf)
 {
-	struct drm_minor *dminor = container_of(dev, struct drm_minor, kdev);
+	struct drm_minor *dminor = container_of(kdev, struct drm_minor, kdev);
 	u32 rc6_residency = calc_residency(dminor->dev, GEN6_GT_GFX_RC6);
 	return snprintf(buf, PAGE_SIZE, "%u", rc6_residency);
 }
 
 static ssize_t
-show_rc6p_ms(struct device *dev, struct device_attribute *attr, char *buf)
+show_rc6p_ms(struct device *kdev, struct device_attribute *attr, char *buf)
 {
-	struct drm_minor *dminor = container_of(dev, struct drm_minor, kdev);
+	struct drm_minor *dminor = container_of(kdev, struct drm_minor, kdev);
 	u32 rc6p_residency = calc_residency(dminor->dev, GEN6_GT_GFX_RC6p);
 	return snprintf(buf, PAGE_SIZE, "%u", rc6p_residency);
 }
 
 static ssize_t
-show_rc6pp_ms(struct device *dev, struct device_attribute *attr, char *buf)
+show_rc6pp_ms(struct device *kdev, struct device_attribute *attr, char *buf)
 {
-	struct drm_minor *dminor = container_of(dev, struct drm_minor, kdev);
+	struct drm_minor *dminor = container_of(kdev, struct drm_minor, kdev);
 	u32 rc6pp_residency = calc_residency(dminor->dev, GEN6_GT_GFX_RC6pp);
 	return snprintf(buf, PAGE_SIZE, "%u", rc6pp_residency);
 }
@@ -93,6 +93,7 @@ static struct attribute_group rc6_attr_group = {
 	.name = power_group_name,
 	.attrs =  rc6_attrs
 };
+#endif
 
 static int l3_access_valid(struct drm_device *dev, loff_t offset)
 {
@@ -202,37 +203,214 @@ static struct bin_attribute dpf_attrs = {
 	.mmap = NULL
 };
 
+static ssize_t gt_cur_freq_mhz_show(struct device *kdev,
+				    struct device_attribute *attr, char *buf)
+{
+	struct drm_minor *minor = container_of(kdev, struct drm_minor, kdev);
+	struct drm_device *dev = minor->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	int ret;
+
+	ret = i915_mutex_lock_interruptible(dev);
+	if (ret)
+		return ret;
+
+	ret = dev_priv->rps.cur_delay * GT_FREQUENCY_MULTIPLIER;
+	mutex_unlock(&dev->struct_mutex);
+
+	return snprintf(buf, PAGE_SIZE, "%d", ret);
+}
+
+static ssize_t gt_max_freq_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf)
+{
+	struct drm_minor *minor = container_of(kdev, struct drm_minor, kdev);
+	struct drm_device *dev = minor->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	int ret;
+
+	ret = i915_mutex_lock_interruptible(dev);
+	if (ret)
+		return ret;
+
+	ret = dev_priv->rps.max_delay * GT_FREQUENCY_MULTIPLIER;
+	mutex_unlock(&dev->struct_mutex);
+
+	return snprintf(buf, PAGE_SIZE, "%d", ret);
+}
+
+static ssize_t gt_max_freq_mhz_store(struct device *kdev,
+				     struct device_attribute *attr,
+				     const char *buf, size_t count)
+{
+	struct drm_minor *minor = container_of(kdev, struct drm_minor, kdev);
+	struct drm_device *dev = minor->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	u32 val, rp_state_cap, hw_max, hw_min;
+	ssize_t ret;
+
+	ret = kstrtou32(buf, 0, &val);
+	if (ret)
+		return ret;
+
+	val /= GT_FREQUENCY_MULTIPLIER;
+
+	ret = mutex_lock_interruptible(&dev->struct_mutex);
+	if (ret)
+		return ret;
+
+	rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
+	hw_max = (rp_state_cap & 0xff);
+	hw_min = ((rp_state_cap & 0xff0000) >> 16);
+
+	if (val < hw_min || val > hw_max || val < dev_priv->rps.min_delay) {
+		mutex_unlock(&dev->struct_mutex);
+		return -EINVAL;
+	}
+
+	if (dev_priv->rps.cur_delay > val)
+		gen6_set_rps(dev_priv->dev, val);
+
+	dev_priv->rps.max_delay = val;
+
+	mutex_unlock(&dev->struct_mutex);
+
+	return count;
+}
+
+static ssize_t gt_min_freq_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf)
+{
+	struct drm_minor *minor = container_of(kdev, struct drm_minor, kdev);
+	struct drm_device *dev = minor->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	int ret;
+
+	ret = i915_mutex_lock_interruptible(dev);
+	if (ret)
+		return ret;
+
+	ret = dev_priv->rps.min_delay * GT_FREQUENCY_MULTIPLIER;
+	mutex_unlock(&dev->struct_mutex);
+
+	return snprintf(buf, PAGE_SIZE, "%d", ret);
+}
+
+static ssize_t gt_min_freq_mhz_store(struct device *kdev,
+				     struct device_attribute *attr,
+				     const char *buf, size_t count)
+{
+	struct drm_minor *minor = container_of(kdev, struct drm_minor, kdev);
+	struct drm_device *dev = minor->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	u32 val, rp_state_cap, hw_max, hw_min;
+	ssize_t ret;
+
+	ret = kstrtou32(buf, 0, &val);
+	if (ret)
+		return ret;
+
+	val /= GT_FREQUENCY_MULTIPLIER;
+
+	ret = mutex_lock_interruptible(&dev->struct_mutex);
+	if (ret)
+		return ret;
+
+	rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
+	hw_max = (rp_state_cap & 0xff);
+	hw_min = ((rp_state_cap & 0xff0000) >> 16);
+
+	if (val < hw_min || val > hw_max || val > dev_priv->rps.max_delay) {
+		mutex_unlock(&dev->struct_mutex);
+		return -EINVAL;
+	}
+
+	if (dev_priv->rps.cur_delay < val)
+		gen6_set_rps(dev_priv->dev, val);
+
+	dev_priv->rps.min_delay = val;
+
+	mutex_unlock(&dev->struct_mutex);
+
+	return count;
+
+}
+
+static DEVICE_ATTR(gt_cur_freq_mhz, S_IRUGO, gt_cur_freq_mhz_show, NULL);
+static DEVICE_ATTR(gt_max_freq_mhz, S_IRUGO | S_IWUSR, gt_max_freq_mhz_show, gt_max_freq_mhz_store);
+static DEVICE_ATTR(gt_min_freq_mhz, S_IRUGO | S_IWUSR, gt_min_freq_mhz_show, gt_min_freq_mhz_store);
+
+
+static ssize_t gt_rp_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf);
+static DEVICE_ATTR(gt_RP0_freq_mhz, S_IRUGO, gt_rp_mhz_show, NULL);
+static DEVICE_ATTR(gt_RP1_freq_mhz, S_IRUGO, gt_rp_mhz_show, NULL);
+static DEVICE_ATTR(gt_RPn_freq_mhz, S_IRUGO, gt_rp_mhz_show, NULL);
+
+/* For now we have a static number of RP states */
+static ssize_t gt_rp_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf)
+{
+	struct drm_minor *minor = container_of(kdev, struct drm_minor, kdev);
+	struct drm_device *dev = minor->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	u32 val, rp_state_cap;
+	ssize_t ret;
+
+	ret = mutex_lock_interruptible(&dev->struct_mutex);
+	if (ret)
+		return ret;
+	rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
+	mutex_unlock(&dev->struct_mutex);
+
+	if (attr == &dev_attr_gt_RP0_freq_mhz) {
+		val = ((rp_state_cap & 0x0000ff) >> 0) * GT_FREQUENCY_MULTIPLIER;
+	} else if (attr == &dev_attr_gt_RP1_freq_mhz) {
+		val = ((rp_state_cap & 0x00ff00) >> 8) * GT_FREQUENCY_MULTIPLIER;
+	} else if (attr == &dev_attr_gt_RPn_freq_mhz) {
+		val = ((rp_state_cap & 0xff0000) >> 16) * GT_FREQUENCY_MULTIPLIER;
+	} else {
+		BUG();
+	}
+	return snprintf(buf, PAGE_SIZE, "%d", val);
+}
+
+static const struct attribute *gen6_attrs[] = {
+	&dev_attr_gt_cur_freq_mhz.attr,
+	&dev_attr_gt_max_freq_mhz.attr,
+	&dev_attr_gt_min_freq_mhz.attr,
+	&dev_attr_gt_RP0_freq_mhz.attr,
+	&dev_attr_gt_RP1_freq_mhz.attr,
+	&dev_attr_gt_RPn_freq_mhz.attr,
+	NULL,
+};
+
 void i915_setup_sysfs(struct drm_device *dev)
 {
 	int ret;
 
+#ifdef CONFIG_PM
 	if (INTEL_INFO(dev)->gen >= 6) {
 		ret = sysfs_merge_group(&dev->primary->kdev.kobj,
 					&rc6_attr_group);
 		if (ret)
 			DRM_ERROR("RC6 residency sysfs setup failed\n");
 	}
-
-	if (IS_IVYBRIDGE(dev)) {
+#endif
+	if (HAS_L3_GPU_CACHE(dev)) {
 		ret = device_create_bin_file(&dev->primary->kdev, &dpf_attrs);
 		if (ret)
 			DRM_ERROR("l3 parity sysfs setup failed\n");
 	}
+
+	if (INTEL_INFO(dev)->gen >= 6) {
+		ret = sysfs_create_files(&dev->primary->kdev.kobj, gen6_attrs);
+		if (ret)
+			DRM_ERROR("gen6 sysfs setup failed\n");
+	}
 }
 
 void i915_teardown_sysfs(struct drm_device *dev)
 {
+	sysfs_remove_files(&dev->primary->kdev.kobj, gen6_attrs);
 	device_remove_bin_file(&dev->primary->kdev,  &dpf_attrs);
+#ifdef CONFIG_PM
 	sysfs_unmerge_group(&dev->primary->kdev.kobj, &rc6_attr_group);
+#endif
 }
-#else
-void i915_setup_sysfs(struct drm_device *dev)
-{
-	return;
-}
-
-void i915_teardown_sysfs(struct drm_device *dev)
-{
-	return;
-}
-#endif /* CONFIG_PM */
diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h
index fe90b3a84a6d..8134421b89a6 100644
--- a/drivers/gpu/drm/i915/i915_trace.h
+++ b/drivers/gpu/drm/i915/i915_trace.h
@@ -214,22 +214,18 @@ TRACE_EVENT(i915_gem_evict,
 );
 
 TRACE_EVENT(i915_gem_evict_everything,
-	    TP_PROTO(struct drm_device *dev, bool purgeable),
-	    TP_ARGS(dev, purgeable),
+	    TP_PROTO(struct drm_device *dev),
+	    TP_ARGS(dev),
 
 	    TP_STRUCT__entry(
 			     __field(u32, dev)
-			     __field(bool, purgeable)
 			    ),
 
 	    TP_fast_assign(
 			   __entry->dev = dev->primary->index;
-			   __entry->purgeable = purgeable;
 			  ),
 
-	    TP_printk("dev=%d%s",
-		      __entry->dev,
-		      __entry->purgeable ? ", purgeable only" : "")
+	    TP_printk("dev=%d", __entry->dev)
 );
 
 TRACE_EVENT(i915_gem_ring_dispatch,
@@ -434,6 +430,21 @@ TRACE_EVENT(i915_reg_rw,
 		(u32)(__entry->val >> 32))
 );
 
+TRACE_EVENT(intel_gpu_freq_change,
+	    TP_PROTO(u32 freq),
+	    TP_ARGS(freq),
+
+	    TP_STRUCT__entry(
+			     __field(u32, freq)
+			     ),
+
+	    TP_fast_assign(
+			   __entry->freq = freq;
+			   ),
+
+	    TP_printk("new_freq=%u", __entry->freq)
+);
+
 #endif /* _I915_TRACE_H_ */
 
 /* This part must be outside protection */
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
index c8f1c0db446d..893f30164b7e 100644
--- a/drivers/gpu/drm/i915/intel_crt.c
+++ b/drivers/gpu/drm/i915/intel_crt.c
@@ -46,6 +46,7 @@
 struct intel_crt {
 	struct intel_encoder base;
 	bool force_hotplug_required;
+	u32 adpa_reg;
 };
 
 static struct intel_crt *intel_attached_crt(struct drm_connector *connector)
@@ -54,42 +55,68 @@ static struct intel_crt *intel_attached_crt(struct drm_connector *connector)
 			    struct intel_crt, base);
 }
 
-static void pch_crt_dpms(struct drm_encoder *encoder, int mode)
+static struct intel_crt *intel_encoder_to_crt(struct intel_encoder *encoder)
 {
-	struct drm_device *dev = encoder->dev;
+	return container_of(encoder, struct intel_crt, base);
+}
+
+static bool intel_crt_get_hw_state(struct intel_encoder *encoder,
+				   enum pipe *pipe)
+{
+	struct drm_device *dev = encoder->base.dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_crt *crt = intel_encoder_to_crt(encoder);
+	u32 tmp;
+
+	tmp = I915_READ(crt->adpa_reg);
+
+	if (!(tmp & ADPA_DAC_ENABLE))
+		return false;
+
+	if (HAS_PCH_CPT(dev))
+		*pipe = PORT_TO_PIPE_CPT(tmp);
+	else
+		*pipe = PORT_TO_PIPE(tmp);
+
+	return true;
+}
+
+static void intel_disable_crt(struct intel_encoder *encoder)
+{
+	struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
+	struct intel_crt *crt = intel_encoder_to_crt(encoder);
 	u32 temp;
 
-	temp = I915_READ(PCH_ADPA);
+	temp = I915_READ(crt->adpa_reg);
+	temp &= ~(ADPA_HSYNC_CNTL_DISABLE | ADPA_VSYNC_CNTL_DISABLE);
 	temp &= ~ADPA_DAC_ENABLE;
+	I915_WRITE(crt->adpa_reg, temp);
+}
 
-	switch (mode) {
-	case DRM_MODE_DPMS_ON:
-		temp |= ADPA_DAC_ENABLE;
-		break;
-	case DRM_MODE_DPMS_STANDBY:
-	case DRM_MODE_DPMS_SUSPEND:
-	case DRM_MODE_DPMS_OFF:
-		/* Just leave port enable cleared */
-		break;
-	}
+static void intel_enable_crt(struct intel_encoder *encoder)
+{
+	struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
+	struct intel_crt *crt = intel_encoder_to_crt(encoder);
+	u32 temp;
 
-	I915_WRITE(PCH_ADPA, temp);
+	temp = I915_READ(crt->adpa_reg);
+	temp |= ADPA_DAC_ENABLE;
+	I915_WRITE(crt->adpa_reg, temp);
 }
 
-static void gmch_crt_dpms(struct drm_encoder *encoder, int mode)
+/* Note: The caller is required to filter out dpms modes not supported by the
+ * platform. */
+static void intel_crt_set_dpms(struct intel_encoder *encoder, int mode)
 {
-	struct drm_device *dev = encoder->dev;
+	struct drm_device *dev = encoder->base.dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_crt *crt = intel_encoder_to_crt(encoder);
 	u32 temp;
 
-	temp = I915_READ(ADPA);
+	temp = I915_READ(crt->adpa_reg);
 	temp &= ~(ADPA_HSYNC_CNTL_DISABLE | ADPA_VSYNC_CNTL_DISABLE);
 	temp &= ~ADPA_DAC_ENABLE;
 
-	if (IS_VALLEYVIEW(dev) && mode != DRM_MODE_DPMS_ON)
-		mode = DRM_MODE_DPMS_OFF;
-
 	switch (mode) {
 	case DRM_MODE_DPMS_ON:
 		temp |= ADPA_DAC_ENABLE;
@@ -105,7 +132,51 @@ static void gmch_crt_dpms(struct drm_encoder *encoder, int mode)
 		break;
 	}
 
-	I915_WRITE(ADPA, temp);
+	I915_WRITE(crt->adpa_reg, temp);
+}
+
+static void intel_crt_dpms(struct drm_connector *connector, int mode)
+{
+	struct drm_device *dev = connector->dev;
+	struct intel_encoder *encoder = intel_attached_encoder(connector);
+	struct drm_crtc *crtc;
+	int old_dpms;
+
+	/* PCH platforms and VLV only support on/off. */
+	if (INTEL_INFO(dev)->gen < 5 && mode != DRM_MODE_DPMS_ON)
+		mode = DRM_MODE_DPMS_OFF;
+
+	if (mode == connector->dpms)
+		return;
+
+	old_dpms = connector->dpms;
+	connector->dpms = mode;
+
+	/* Only need to change hw state when actually enabled */
+	crtc = encoder->base.crtc;
+	if (!crtc) {
+		encoder->connectors_active = false;
+		return;
+	}
+
+	/* We need the pipe to run for anything but OFF. */
+	if (mode == DRM_MODE_DPMS_OFF)
+		encoder->connectors_active = false;
+	else
+		encoder->connectors_active = true;
+
+	if (mode < old_dpms) {
+		/* From off to on, enable the pipe first. */
+		intel_crtc_update_dpms(crtc);
+
+		intel_crt_set_dpms(encoder, mode);
+	} else {
+		intel_crt_set_dpms(encoder, mode);
+
+		intel_crtc_update_dpms(crtc);
+	}
+
+	intel_modeset_check_state(connector->dev);
 }
 
 static int intel_crt_mode_valid(struct drm_connector *connector,
@@ -144,19 +215,15 @@ static void intel_crt_mode_set(struct drm_encoder *encoder,
 
 	struct drm_device *dev = encoder->dev;
 	struct drm_crtc *crtc = encoder->crtc;
+	struct intel_crt *crt =
+		intel_encoder_to_crt(to_intel_encoder(encoder));
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	int dpll_md_reg;
 	u32 adpa, dpll_md;
-	u32 adpa_reg;
 
 	dpll_md_reg = DPLL_MD(intel_crtc->pipe);
 
-	if (HAS_PCH_SPLIT(dev))
-		adpa_reg = PCH_ADPA;
-	else
-		adpa_reg = ADPA;
-
 	/*
 	 * Disable separate mode multiplier used when cloning SDVO to CRT
 	 * XXX this needs to be adjusted when we really are cloning
@@ -184,7 +251,7 @@ static void intel_crt_mode_set(struct drm_encoder *encoder,
 	if (!HAS_PCH_SPLIT(dev))
 		I915_WRITE(BCLRPAT(intel_crtc->pipe), 0);
 
-	I915_WRITE(adpa_reg, adpa);
+	I915_WRITE(crt->adpa_reg, adpa);
 }
 
 static bool intel_ironlake_crt_detect_hotplug(struct drm_connector *connector)
@@ -544,14 +611,12 @@ intel_crt_detect(struct drm_connector *connector, bool force)
 		return connector->status;
 
 	/* for pre-945g platforms use load detect */
-	if (intel_get_load_detect_pipe(&crt->base, connector, NULL,
-				       &tmp)) {
+	if (intel_get_load_detect_pipe(connector, NULL, &tmp)) {
 		if (intel_crt_detect_ddc(connector))
 			status = connector_status_connected;
 		else
 			status = intel_crt_load_detect(crt);
-		intel_release_load_detect_pipe(&crt->base, connector,
-					       &tmp);
+		intel_release_load_detect_pipe(connector, &tmp);
 	} else
 		status = connector_status_unknown;
 
@@ -602,25 +667,15 @@ static void intel_crt_reset(struct drm_connector *connector)
  * Routines for controlling stuff on the analog port
  */
 
-static const struct drm_encoder_helper_funcs pch_encoder_funcs = {
+static const struct drm_encoder_helper_funcs crt_encoder_funcs = {
 	.mode_fixup = intel_crt_mode_fixup,
-	.prepare = intel_encoder_prepare,
-	.commit = intel_encoder_commit,
 	.mode_set = intel_crt_mode_set,
-	.dpms = pch_crt_dpms,
-};
-
-static const struct drm_encoder_helper_funcs gmch_encoder_funcs = {
-	.mode_fixup = intel_crt_mode_fixup,
-	.prepare = intel_encoder_prepare,
-	.commit = intel_encoder_commit,
-	.mode_set = intel_crt_mode_set,
-	.dpms = gmch_crt_dpms,
+	.disable = intel_encoder_noop,
 };
 
 static const struct drm_connector_funcs intel_crt_connector_funcs = {
 	.reset = intel_crt_reset,
-	.dpms = drm_helper_connector_dpms,
+	.dpms = intel_crt_dpms,
 	.detect = intel_crt_detect,
 	.fill_modes = drm_helper_probe_single_connector_modes,
 	.destroy = intel_crt_destroy,
@@ -661,7 +716,6 @@ void intel_crt_init(struct drm_device *dev)
 	struct intel_crt *crt;
 	struct intel_connector *intel_connector;
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	const struct drm_encoder_helper_funcs *encoder_helper_funcs;
 
 	/* Skip machines without VGA that falsely report hotplug events */
 	if (dmi_check_system(intel_no_crt))
@@ -687,13 +741,11 @@ void intel_crt_init(struct drm_device *dev)
 	intel_connector_attach_encoder(intel_connector, &crt->base);
 
 	crt->base.type = INTEL_OUTPUT_ANALOG;
-	crt->base.clone_mask = (1 << INTEL_SDVO_NON_TV_CLONE_BIT |
-				1 << INTEL_ANALOG_CLONE_BIT |
-				1 << INTEL_SDVO_LVDS_CLONE_BIT);
+	crt->base.cloneable = true;
 	if (IS_HASWELL(dev))
 		crt->base.crtc_mask = (1 << 0);
 	else
-		crt->base.crtc_mask = (1 << 0) | (1 << 1);
+		crt->base.crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
 
 	if (IS_GEN2(dev))
 		connector->interlace_allowed = 0;
@@ -702,11 +754,18 @@ void intel_crt_init(struct drm_device *dev)
 	connector->doublescan_allowed = 0;
 
 	if (HAS_PCH_SPLIT(dev))
-		encoder_helper_funcs = &pch_encoder_funcs;
+		crt->adpa_reg = PCH_ADPA;
+	else if (IS_VALLEYVIEW(dev))
+		crt->adpa_reg = VLV_ADPA;
 	else
-		encoder_helper_funcs = &gmch_encoder_funcs;
+		crt->adpa_reg = ADPA;
+
+	crt->base.disable = intel_disable_crt;
+	crt->base.enable = intel_enable_crt;
+	crt->base.get_hw_state = intel_crt_get_hw_state;
+	intel_connector->get_hw_state = intel_connector_get_hw_state;
 
-	drm_encoder_helper_add(&crt->base.base, encoder_helper_funcs);
+	drm_encoder_helper_add(&crt->base.base, &crt_encoder_funcs);
 	drm_connector_helper_add(connector, &intel_crt_connector_helper_funcs);
 
 	drm_sysfs_connector_add(connector);
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
index 933c74859172..bfe375466a0e 100644
--- a/drivers/gpu/drm/i915/intel_ddi.c
+++ b/drivers/gpu/drm/i915/intel_ddi.c
@@ -250,7 +250,7 @@ void intel_ddi_init(struct drm_device *dev, enum port port)
 	case PORT_B:
 	case PORT_C:
 	case PORT_D:
-		intel_hdmi_init(dev, DDI_BUF_CTL(port));
+		intel_hdmi_init(dev, DDI_BUF_CTL(port), port);
 		break;
 	default:
 		DRM_DEBUG_DRIVER("No handlers defined for port %d, skipping DDI initialization\n",
@@ -267,7 +267,8 @@ struct wrpll_tmds_clock {
 	u16 r2;		/* Reference divider */
 };
 
-/* Table of matching values for WRPLL clocks programming for each frequency */
+/* Table of matching values for WRPLL clocks programming for each frequency.
+ * The code assumes this table is sorted. */
 static const struct wrpll_tmds_clock wrpll_tmds_clock_table[] = {
 	{19750,	38,	25,	18},
 	{20000,	48,	32,	18},
@@ -277,7 +278,6 @@ static const struct wrpll_tmds_clock wrpll_tmds_clock_table[] = {
 	{23000,	36,	23,	15},
 	{23500,	40,	40,	23},
 	{23750,	26,	16,	14},
-	{23750,	26,	16,	14},
 	{24000,	36,	24,	15},
 	{25000,	36,	25,	15},
 	{25175,	26,	40,	33},
@@ -437,7 +437,6 @@ static const struct wrpll_tmds_clock wrpll_tmds_clock_table[] = {
 	{108000,	8,	24,	15},
 	{108108,	8,	173,	108},
 	{109000,	6,	23,	19},
-	{109000,	6,	23,	19},
 	{110000,	6,	22,	18},
 	{110013,	6,	22,	18},
 	{110250,	8,	49,	30},
@@ -614,7 +613,6 @@ static const struct wrpll_tmds_clock wrpll_tmds_clock_table[] = {
 	{218250,	4,	42,	26},
 	{218750,	4,	34,	21},
 	{219000,	4,	47,	29},
-	{219000,	4,	47,	29},
 	{220000,	4,	44,	27},
 	{220640,	4,	49,	30},
 	{220750,	4,	36,	22},
@@ -658,7 +656,7 @@ void intel_ddi_mode_set(struct drm_encoder *encoder,
 	struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
 	int port = intel_hdmi->ddi_port;
 	int pipe = intel_crtc->pipe;
-	int p, n2, r2, valid=0;
+	int p, n2, r2;
 	u32 temp, i;
 
 	/* On Haswell, we need to enable the clocks and prepare DDI function to
@@ -666,26 +664,23 @@ void intel_ddi_mode_set(struct drm_encoder *encoder,
 	 */
 	DRM_DEBUG_KMS("Preparing HDMI DDI mode for Haswell on port %c, pipe %c\n", port_name(port), pipe_name(pipe));
 
-	for (i=0; i < ARRAY_SIZE(wrpll_tmds_clock_table); i++) {
-		if (crtc->mode.clock == wrpll_tmds_clock_table[i].clock) {
-			p = wrpll_tmds_clock_table[i].p;
-			n2 = wrpll_tmds_clock_table[i].n2;
-			r2 = wrpll_tmds_clock_table[i].r2;
+	for (i = 0; i < ARRAY_SIZE(wrpll_tmds_clock_table); i++)
+		if (crtc->mode.clock <= wrpll_tmds_clock_table[i].clock)
+			break;
 
-			DRM_DEBUG_KMS("WR PLL clock: found settings for %dKHz refresh rate: p=%d, n2=%d, r2=%d\n",
-					crtc->mode.clock,
-					p, n2, r2);
+	if (i == ARRAY_SIZE(wrpll_tmds_clock_table))
+		i--;
 
-			valid = 1;
-			break;
-		}
-	}
+	p = wrpll_tmds_clock_table[i].p;
+	n2 = wrpll_tmds_clock_table[i].n2;
+	r2 = wrpll_tmds_clock_table[i].r2;
 
-	if (!valid) {
-		DRM_ERROR("Unable to find WR PLL clock settings for %dKHz refresh rate\n",
-				crtc->mode.clock);
-		return;
-	}
+	if (wrpll_tmds_clock_table[i].clock != crtc->mode.clock)
+		DRM_INFO("WR PLL: using settings for %dKHz on %dKHz mode\n",
+			 wrpll_tmds_clock_table[i].clock, crtc->mode.clock);
+
+	DRM_DEBUG_KMS("WR PLL: %dKHz refresh rate with p=%d, n2=%d r2=%d\n",
+		      crtc->mode.clock, p, n2, r2);
 
 	/* Enable LCPLL if disabled */
 	temp = I915_READ(LCPLL_CTL);
@@ -718,46 +713,107 @@ void intel_ddi_mode_set(struct drm_encoder *encoder,
 		/* Proper support for digital audio needs a new logic and a new set
 		 * of registers, so we leave it for future patch bombing.
 		 */
-		DRM_DEBUG_DRIVER("HDMI audio on pipe %c not yet supported on DDI\n",
+		DRM_DEBUG_DRIVER("HDMI audio on pipe %c on DDI\n",
 				 pipe_name(intel_crtc->pipe));
+
+		/* write eld */
+		DRM_DEBUG_DRIVER("HDMI audio: write eld information\n");
+		intel_write_eld(encoder, adjusted_mode);
 	}
 
 	/* Enable PIPE_DDI_FUNC_CTL for the pipe to work in HDMI mode */
-	temp = I915_READ(DDI_FUNC_CTL(pipe));
-	temp &= ~PIPE_DDI_PORT_MASK;
-	temp &= ~PIPE_DDI_BPC_12;
-	temp |= PIPE_DDI_SELECT_PORT(port) |
-			PIPE_DDI_MODE_SELECT_HDMI |
-			((intel_crtc->bpp > 24) ?
-				PIPE_DDI_BPC_12 :
-				PIPE_DDI_BPC_8) |
-			PIPE_DDI_FUNC_ENABLE;
+	temp = PIPE_DDI_FUNC_ENABLE | PIPE_DDI_SELECT_PORT(port);
+
+	switch (intel_crtc->bpp) {
+	case 18:
+		temp |= PIPE_DDI_BPC_6;
+		break;
+	case 24:
+		temp |= PIPE_DDI_BPC_8;
+		break;
+	case 30:
+		temp |= PIPE_DDI_BPC_10;
+		break;
+	case 36:
+		temp |= PIPE_DDI_BPC_12;
+		break;
+	default:
+		WARN(1, "%d bpp unsupported by pipe DDI function\n",
+		     intel_crtc->bpp);
+	}
+
+	if (intel_hdmi->has_hdmi_sink)
+		temp |= PIPE_DDI_MODE_SELECT_HDMI;
+	else
+		temp |= PIPE_DDI_MODE_SELECT_DVI;
+
+	if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
+		temp |= PIPE_DDI_PVSYNC;
+	if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
+		temp |= PIPE_DDI_PHSYNC;
 
 	I915_WRITE(DDI_FUNC_CTL(pipe), temp);
 
 	intel_hdmi->set_infoframes(encoder, adjusted_mode);
 }
 
-void intel_ddi_dpms(struct drm_encoder *encoder, int mode)
+bool intel_ddi_get_hw_state(struct intel_encoder *encoder,
+			    enum pipe *pipe)
 {
-	struct drm_device *dev = encoder->dev;
+	struct drm_device *dev = encoder->base.dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
+	struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
+	u32 tmp;
+	int i;
+
+	tmp = I915_READ(DDI_BUF_CTL(intel_hdmi->ddi_port));
+
+	if (!(tmp & DDI_BUF_CTL_ENABLE))
+		return false;
+
+	for_each_pipe(i) {
+		tmp = I915_READ(DDI_FUNC_CTL(i));
+
+		if ((tmp & PIPE_DDI_PORT_MASK)
+		    == PIPE_DDI_SELECT_PORT(intel_hdmi->ddi_port)) {
+			*pipe = i;
+			return true;
+		}
+	}
+
+	DRM_DEBUG_KMS("No pipe for ddi port %i found\n", intel_hdmi->ddi_port);
+
+	return true;
+}
+
+void intel_enable_ddi(struct intel_encoder *encoder)
+{
+	struct drm_device *dev = encoder->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
 	int port = intel_hdmi->ddi_port;
 	u32 temp;
 
 	temp = I915_READ(DDI_BUF_CTL(port));
-
-	if (mode != DRM_MODE_DPMS_ON) {
-		temp &= ~DDI_BUF_CTL_ENABLE;
-	} else {
-		temp |= DDI_BUF_CTL_ENABLE;
-	}
+	temp |= DDI_BUF_CTL_ENABLE;
 
 	/* Enable DDI_BUF_CTL. In HDMI/DVI mode, the port width,
 	 * and swing/emphasis values are ignored so nothing special needs
 	 * to be done besides enabling the port.
 	 */
-	I915_WRITE(DDI_BUF_CTL(port),
-			temp);
+	I915_WRITE(DDI_BUF_CTL(port), temp);
+}
+
+void intel_disable_ddi(struct intel_encoder *encoder)
+{
+	struct drm_device *dev = encoder->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
+	int port = intel_hdmi->ddi_port;
+	u32 temp;
+
+	temp = I915_READ(DDI_BUF_CTL(port));
+	temp &= ~DDI_BUF_CTL_ENABLE;
+
+	I915_WRITE(DDI_BUF_CTL(port), temp);
 }
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 7ea9a3ceb269..e3c02655d36f 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -1006,7 +1006,7 @@ void intel_wait_for_pipe_off(struct drm_device *dev, int pipe)
 		/* Wait for the Pipe State to go off */
 		if (wait_for((I915_READ(reg) & I965_PIPECONF_ACTIVE) == 0,
 			     100))
-			DRM_DEBUG_KMS("pipe_off wait timed out\n");
+			WARN(1, "pipe_off wait timed out\n");
 	} else {
 		u32 last_line, line_mask;
 		int reg = PIPEDSL(pipe);
@@ -1024,7 +1024,7 @@ void intel_wait_for_pipe_off(struct drm_device *dev, int pipe)
 		} while (((I915_READ(reg) & line_mask) != last_line) &&
 			 time_after(timeout, jiffies));
 		if (time_after(jiffies, timeout))
-			DRM_DEBUG_KMS("pipe_off wait timed out\n");
+			WARN(1, "pipe_off wait timed out\n");
 	}
 }
 
@@ -1431,6 +1431,8 @@ static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv,
  * protect mechanism may be enabled.
  *
  * Note!  This is for pre-ILK only.
+ *
+ * Unfortunately needed by dvo_ns2501 since the dvo depends on it running.
  */
 static void intel_enable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
 {
@@ -1860,59 +1862,6 @@ static void intel_disable_plane(struct drm_i915_private *dev_priv,
 	intel_wait_for_vblank(dev_priv->dev, pipe);
 }
 
-static void disable_pch_dp(struct drm_i915_private *dev_priv,
-			   enum pipe pipe, int reg, u32 port_sel)
-{
-	u32 val = I915_READ(reg);
-	if (dp_pipe_enabled(dev_priv, pipe, port_sel, val)) {
-		DRM_DEBUG_KMS("Disabling pch dp %x on pipe %d\n", reg, pipe);
-		I915_WRITE(reg, val & ~DP_PORT_EN);
-	}
-}
-
-static void disable_pch_hdmi(struct drm_i915_private *dev_priv,
-			     enum pipe pipe, int reg)
-{
-	u32 val = I915_READ(reg);
-	if (hdmi_pipe_enabled(dev_priv, pipe, val)) {
-		DRM_DEBUG_KMS("Disabling pch HDMI %x on pipe %d\n",
-			      reg, pipe);
-		I915_WRITE(reg, val & ~PORT_ENABLE);
-	}
-}
-
-/* Disable any ports connected to this transcoder */
-static void intel_disable_pch_ports(struct drm_i915_private *dev_priv,
-				    enum pipe pipe)
-{
-	u32 reg, val;
-
-	val = I915_READ(PCH_PP_CONTROL);
-	I915_WRITE(PCH_PP_CONTROL, val | PANEL_UNLOCK_REGS);
-
-	disable_pch_dp(dev_priv, pipe, PCH_DP_B, TRANS_DP_PORT_SEL_B);
-	disable_pch_dp(dev_priv, pipe, PCH_DP_C, TRANS_DP_PORT_SEL_C);
-	disable_pch_dp(dev_priv, pipe, PCH_DP_D, TRANS_DP_PORT_SEL_D);
-
-	reg = PCH_ADPA;
-	val = I915_READ(reg);
-	if (adpa_pipe_enabled(dev_priv, pipe, val))
-		I915_WRITE(reg, val & ~ADPA_DAC_ENABLE);
-
-	reg = PCH_LVDS;
-	val = I915_READ(reg);
-	if (lvds_pipe_enabled(dev_priv, pipe, val)) {
-		DRM_DEBUG_KMS("disable lvds on pipe %d val 0x%08x\n", pipe, val);
-		I915_WRITE(reg, val & ~LVDS_PORT_EN);
-		POSTING_READ(reg);
-		udelay(100);
-	}
-
-	disable_pch_hdmi(dev_priv, pipe, HDMIB);
-	disable_pch_hdmi(dev_priv, pipe, HDMIC);
-	disable_pch_hdmi(dev_priv, pipe, HDMID);
-}
-
 int
 intel_pin_and_fence_fb_obj(struct drm_device *dev,
 			   struct drm_i915_gem_object *obj,
@@ -2201,16 +2150,17 @@ intel_finish_fb(struct drm_framebuffer *old_fb)
 
 static int
 intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
-		    struct drm_framebuffer *old_fb)
+		    struct drm_framebuffer *fb)
 {
 	struct drm_device *dev = crtc->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct drm_i915_master_private *master_priv;
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+	struct drm_framebuffer *old_fb;
 	int ret;
 
 	/* no fb bound */
-	if (!crtc->fb) {
+	if (!fb) {
 		DRM_ERROR("No FB bound\n");
 		return 0;
 	}
@@ -2224,7 +2174,7 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
 
 	mutex_lock(&dev->struct_mutex);
 	ret = intel_pin_and_fence_fb_obj(dev,
-					 to_intel_framebuffer(crtc->fb)->obj,
+					 to_intel_framebuffer(fb)->obj,
 					 NULL);
 	if (ret != 0) {
 		mutex_unlock(&dev->struct_mutex);
@@ -2232,17 +2182,22 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
 		return ret;
 	}
 
-	if (old_fb)
-		intel_finish_fb(old_fb);
+	if (crtc->fb)
+		intel_finish_fb(crtc->fb);
 
-	ret = dev_priv->display.update_plane(crtc, crtc->fb, x, y);
+	ret = dev_priv->display.update_plane(crtc, fb, x, y);
 	if (ret) {
-		intel_unpin_fb_obj(to_intel_framebuffer(crtc->fb)->obj);
+		intel_unpin_fb_obj(to_intel_framebuffer(fb)->obj);
 		mutex_unlock(&dev->struct_mutex);
 		DRM_ERROR("failed to update base address\n");
 		return ret;
 	}
 
+	old_fb = crtc->fb;
+	crtc->fb = fb;
+	crtc->x = x;
+	crtc->y = y;
+
 	if (old_fb) {
 		intel_wait_for_vblank(dev, intel_crtc->pipe);
 		intel_unpin_fb_obj(to_intel_framebuffer(old_fb)->obj);
@@ -2709,11 +2664,10 @@ static void ivb_manual_fdi_link_train(struct drm_crtc *crtc)
 	DRM_DEBUG_KMS("FDI train done.\n");
 }
 
-static void ironlake_fdi_pll_enable(struct drm_crtc *crtc)
+static void ironlake_fdi_pll_enable(struct intel_crtc *intel_crtc)
 {
-	struct drm_device *dev = crtc->dev;
+	struct drm_device *dev = intel_crtc->base.dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 	int pipe = intel_crtc->pipe;
 	u32 reg, temp;
 
@@ -2754,6 +2708,35 @@ static void ironlake_fdi_pll_enable(struct drm_crtc *crtc)
 	}
 }
 
+static void ironlake_fdi_pll_disable(struct intel_crtc *intel_crtc)
+{
+	struct drm_device *dev = intel_crtc->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	int pipe = intel_crtc->pipe;
+	u32 reg, temp;
+
+	/* Switch from PCDclk to Rawclk */
+	reg = FDI_RX_CTL(pipe);
+	temp = I915_READ(reg);
+	I915_WRITE(reg, temp & ~FDI_PCDCLK);
+
+	/* Disable CPU FDI TX PLL */
+	reg = FDI_TX_CTL(pipe);
+	temp = I915_READ(reg);
+	I915_WRITE(reg, temp & ~FDI_TX_PLL_ENABLE);
+
+	POSTING_READ(reg);
+	udelay(100);
+
+	reg = FDI_RX_CTL(pipe);
+	temp = I915_READ(reg);
+	I915_WRITE(reg, temp & ~FDI_RX_PLL_ENABLE);
+
+	/* Wait for the clocks to turn off. */
+	POSTING_READ(reg);
+	udelay(100);
+}
+
 static void cpt_phase_pointer_disable(struct drm_device *dev, int pipe)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
@@ -2838,13 +2821,13 @@ static void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc)
 static bool intel_crtc_driving_pch(struct drm_crtc *crtc)
 {
 	struct drm_device *dev = crtc->dev;
-	struct intel_encoder *encoder;
+	struct intel_encoder *intel_encoder;
 
 	/*
 	 * If there's a non-PCH eDP on this crtc, it must be DP_A, and that
 	 * must be driven by its own crtc; no sharing is possible.
 	 */
-	for_each_encoder_on_crtc(dev, crtc, encoder) {
+	for_each_encoder_on_crtc(dev, crtc, intel_encoder) {
 
 		/* On Haswell, LPT PCH handles the VGA connection via FDI, and Haswell
 		 * CPU handles all others */
@@ -2852,19 +2835,19 @@ static bool intel_crtc_driving_pch(struct drm_crtc *crtc)
 			/* It is still unclear how this will work on PPT, so throw up a warning */
 			WARN_ON(!HAS_PCH_LPT(dev));
 
-			if (encoder->type == DRM_MODE_ENCODER_DAC) {
+			if (intel_encoder->type == INTEL_OUTPUT_ANALOG) {
 				DRM_DEBUG_KMS("Haswell detected DAC encoder, assuming is PCH\n");
 				return true;
 			} else {
 				DRM_DEBUG_KMS("Haswell detected encoder %d, assuming is CPU\n",
-						encoder->type);
+					      intel_encoder->type);
 				return false;
 			}
 		}
 
-		switch (encoder->type) {
+		switch (intel_encoder->type) {
 		case INTEL_OUTPUT_EDP:
-			if (!intel_encoder_is_pch_edp(&encoder->base))
+			if (!intel_encoder_is_pch_edp(&intel_encoder->base))
 				return false;
 			continue;
 		}
@@ -3181,11 +3164,14 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc)
 	struct drm_device *dev = crtc->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+	struct intel_encoder *encoder;
 	int pipe = intel_crtc->pipe;
 	int plane = intel_crtc->plane;
 	u32 temp;
 	bool is_pch_port;
 
+	WARN_ON(!crtc->enabled);
+
 	if (intel_crtc->active)
 		return;
 
@@ -3200,10 +3186,16 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc)
 
 	is_pch_port = intel_crtc_driving_pch(crtc);
 
-	if (is_pch_port)
-		ironlake_fdi_pll_enable(crtc);
-	else
-		ironlake_fdi_disable(crtc);
+	if (is_pch_port) {
+		ironlake_fdi_pll_enable(intel_crtc);
+	} else {
+		assert_fdi_tx_disabled(dev_priv, pipe);
+		assert_fdi_rx_disabled(dev_priv, pipe);
+	}
+
+	for_each_encoder_on_crtc(dev, crtc, encoder)
+		if (encoder->pre_enable)
+			encoder->pre_enable(encoder);
 
 	/* Enable panel fitting for LVDS */
 	if (dev_priv->pch_pf_size &&
@@ -3234,6 +3226,12 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc)
 	mutex_unlock(&dev->struct_mutex);
 
 	intel_crtc_update_cursor(crtc, true);
+
+	for_each_encoder_on_crtc(dev, crtc, encoder)
+		encoder->enable(encoder);
+
+	if (HAS_PCH_CPT(dev))
+		intel_cpt_verify_modeset(dev, intel_crtc->pipe);
 }
 
 static void ironlake_crtc_disable(struct drm_crtc *crtc)
@@ -3241,13 +3239,18 @@ static void ironlake_crtc_disable(struct drm_crtc *crtc)
 	struct drm_device *dev = crtc->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+	struct intel_encoder *encoder;
 	int pipe = intel_crtc->pipe;
 	int plane = intel_crtc->plane;
 	u32 reg, temp;
 
+
 	if (!intel_crtc->active)
 		return;
 
+	for_each_encoder_on_crtc(dev, crtc, encoder)
+		encoder->disable(encoder);
+
 	intel_crtc_wait_for_pending_flips(crtc);
 	drm_vblank_off(dev, pipe);
 	intel_crtc_update_cursor(crtc, false);
@@ -3263,14 +3266,11 @@ static void ironlake_crtc_disable(struct drm_crtc *crtc)
 	I915_WRITE(PF_CTL(pipe), 0);
 	I915_WRITE(PF_WIN_SZ(pipe), 0);
 
-	ironlake_fdi_disable(crtc);
+	for_each_encoder_on_crtc(dev, crtc, encoder)
+		if (encoder->post_disable)
+			encoder->post_disable(encoder);
 
-	/* This is a horrible layering violation; we should be doing this in
-	 * the connector/encoder ->prepare instead, but we don't always have
-	 * enough information there about the config to know whether it will
-	 * actually be necessary or just cause undesired flicker.
-	 */
-	intel_disable_pch_ports(dev_priv, pipe);
+	ironlake_fdi_disable(crtc);
 
 	intel_disable_transcoder(dev_priv, pipe);
 
@@ -3304,26 +3304,7 @@ static void ironlake_crtc_disable(struct drm_crtc *crtc)
 	/* disable PCH DPLL */
 	intel_disable_pch_pll(intel_crtc);
 
-	/* Switch from PCDclk to Rawclk */
-	reg = FDI_RX_CTL(pipe);
-	temp = I915_READ(reg);
-	I915_WRITE(reg, temp & ~FDI_PCDCLK);
-
-	/* Disable CPU FDI TX PLL */
-	reg = FDI_TX_CTL(pipe);
-	temp = I915_READ(reg);
-	I915_WRITE(reg, temp & ~FDI_TX_PLL_ENABLE);
-
-	POSTING_READ(reg);
-	udelay(100);
-
-	reg = FDI_RX_CTL(pipe);
-	temp = I915_READ(reg);
-	I915_WRITE(reg, temp & ~FDI_RX_PLL_ENABLE);
-
-	/* Wait for the clocks to turn off. */
-	POSTING_READ(reg);
-	udelay(100);
+	ironlake_fdi_pll_disable(intel_crtc);
 
 	intel_crtc->active = false;
 	intel_update_watermarks(dev);
@@ -3333,30 +3314,6 @@ static void ironlake_crtc_disable(struct drm_crtc *crtc)
 	mutex_unlock(&dev->struct_mutex);
 }
 
-static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode)
-{
-	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-	int pipe = intel_crtc->pipe;
-	int plane = intel_crtc->plane;
-
-	/* XXX: When our outputs are all unaware of DPMS modes other than off
-	 * and on, we should map those modes to DRM_MODE_DPMS_OFF in the CRTC.
-	 */
-	switch (mode) {
-	case DRM_MODE_DPMS_ON:
-	case DRM_MODE_DPMS_STANDBY:
-	case DRM_MODE_DPMS_SUSPEND:
-		DRM_DEBUG_KMS("crtc %d/%d dpms on\n", pipe, plane);
-		ironlake_crtc_enable(crtc);
-		break;
-
-	case DRM_MODE_DPMS_OFF:
-		DRM_DEBUG_KMS("crtc %d/%d dpms off\n", pipe, plane);
-		ironlake_crtc_disable(crtc);
-		break;
-	}
-}
-
 static void ironlake_crtc_off(struct drm_crtc *crtc)
 {
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
@@ -3386,9 +3343,12 @@ static void i9xx_crtc_enable(struct drm_crtc *crtc)
 	struct drm_device *dev = crtc->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+	struct intel_encoder *encoder;
 	int pipe = intel_crtc->pipe;
 	int plane = intel_crtc->plane;
 
+	WARN_ON(!crtc->enabled);
+
 	if (intel_crtc->active)
 		return;
 
@@ -3405,6 +3365,9 @@ static void i9xx_crtc_enable(struct drm_crtc *crtc)
 	/* Give the overlay scaler a chance to enable if it's on this pipe */
 	intel_crtc_dpms_overlay(intel_crtc, true);
 	intel_crtc_update_cursor(crtc, true);
+
+	for_each_encoder_on_crtc(dev, crtc, encoder)
+		encoder->enable(encoder);
 }
 
 static void i9xx_crtc_disable(struct drm_crtc *crtc)
@@ -3412,12 +3375,17 @@ static void i9xx_crtc_disable(struct drm_crtc *crtc)
 	struct drm_device *dev = crtc->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+	struct intel_encoder *encoder;
 	int pipe = intel_crtc->pipe;
 	int plane = intel_crtc->plane;
 
+
 	if (!intel_crtc->active)
 		return;
 
+	for_each_encoder_on_crtc(dev, crtc, encoder)
+		encoder->disable(encoder);
+
 	/* Give the overlay scaler a chance to disable if it's on this pipe */
 	intel_crtc_wait_for_pending_flips(crtc);
 	drm_vblank_off(dev, pipe);
@@ -3436,45 +3404,17 @@ static void i9xx_crtc_disable(struct drm_crtc *crtc)
 	intel_update_watermarks(dev);
 }
 
-static void i9xx_crtc_dpms(struct drm_crtc *crtc, int mode)
-{
-	/* XXX: When our outputs are all unaware of DPMS modes other than off
-	 * and on, we should map those modes to DRM_MODE_DPMS_OFF in the CRTC.
-	 */
-	switch (mode) {
-	case DRM_MODE_DPMS_ON:
-	case DRM_MODE_DPMS_STANDBY:
-	case DRM_MODE_DPMS_SUSPEND:
-		i9xx_crtc_enable(crtc);
-		break;
-	case DRM_MODE_DPMS_OFF:
-		i9xx_crtc_disable(crtc);
-		break;
-	}
-}
-
 static void i9xx_crtc_off(struct drm_crtc *crtc)
 {
 }
 
-/**
- * Sets the power management mode of the pipe and plane.
- */
-static void intel_crtc_dpms(struct drm_crtc *crtc, int mode)
+static void intel_crtc_update_sarea(struct drm_crtc *crtc,
+				    bool enabled)
 {
 	struct drm_device *dev = crtc->dev;
-	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct drm_i915_master_private *master_priv;
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 	int pipe = intel_crtc->pipe;
-	bool enabled;
-
-	if (intel_crtc->dpms_mode == mode)
-		return;
-
-	intel_crtc->dpms_mode = mode;
-
-	dev_priv->display.dpms(crtc, mode);
 
 	if (!dev->primary->master)
 		return;
@@ -3483,8 +3423,6 @@ static void intel_crtc_dpms(struct drm_crtc *crtc, int mode)
 	if (!master_priv->sarea_priv)
 		return;
 
-	enabled = crtc->enabled && mode != DRM_MODE_DPMS_OFF;
-
 	switch (pipe) {
 	case 0:
 		master_priv->sarea_priv->pipeA_w = enabled ? crtc->mode.hdisplay : 0;
@@ -3500,13 +3438,42 @@ static void intel_crtc_dpms(struct drm_crtc *crtc, int mode)
 	}
 }
 
+/**
+ * Sets the power management mode of the pipe and plane.
+ */
+void intel_crtc_update_dpms(struct drm_crtc *crtc)
+{
+	struct drm_device *dev = crtc->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_encoder *intel_encoder;
+	bool enable = false;
+
+	for_each_encoder_on_crtc(dev, crtc, intel_encoder)
+		enable |= intel_encoder->connectors_active;
+
+	if (enable)
+		dev_priv->display.crtc_enable(crtc);
+	else
+		dev_priv->display.crtc_disable(crtc);
+
+	intel_crtc_update_sarea(crtc, enable);
+}
+
+static void intel_crtc_noop(struct drm_crtc *crtc)
+{
+}
+
 static void intel_crtc_disable(struct drm_crtc *crtc)
 {
-	struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
 	struct drm_device *dev = crtc->dev;
+	struct drm_connector *connector;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 
-	crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
+	/* crtc should still be enabled when we disable it. */
+	WARN_ON(!crtc->enabled);
+
+	dev_priv->display.crtc_disable(crtc);
+	intel_crtc_update_sarea(crtc, false);
 	dev_priv->display.off(crtc);
 
 	assert_plane_disabled(dev->dev_private, to_intel_crtc(crtc)->plane);
@@ -3516,63 +3483,128 @@ static void intel_crtc_disable(struct drm_crtc *crtc)
 		mutex_lock(&dev->struct_mutex);
 		intel_unpin_fb_obj(to_intel_framebuffer(crtc->fb)->obj);
 		mutex_unlock(&dev->struct_mutex);
+		crtc->fb = NULL;
+	}
+
+	/* Update computed state. */
+	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+		if (!connector->encoder || !connector->encoder->crtc)
+			continue;
+
+		if (connector->encoder->crtc != crtc)
+			continue;
+
+		connector->dpms = DRM_MODE_DPMS_OFF;
+		to_intel_encoder(connector->encoder)->connectors_active = false;
 	}
 }
 
-/* Prepare for a mode set.
- *
- * Note we could be a lot smarter here.  We need to figure out which outputs
- * will be enabled, which disabled (in short, how the config will changes)
- * and perform the minimum necessary steps to accomplish that, e.g. updating
- * watermarks, FBC configuration, making sure PLLs are programmed correctly,
- * panel fitting is in the proper state, etc.
- */
-static void i9xx_crtc_prepare(struct drm_crtc *crtc)
+void intel_modeset_disable(struct drm_device *dev)
 {
-	i9xx_crtc_disable(crtc);
+	struct drm_crtc *crtc;
+
+	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+		if (crtc->enabled)
+			intel_crtc_disable(crtc);
+	}
 }
 
-static void i9xx_crtc_commit(struct drm_crtc *crtc)
+void intel_encoder_noop(struct drm_encoder *encoder)
 {
-	i9xx_crtc_enable(crtc);
 }
 
-static void ironlake_crtc_prepare(struct drm_crtc *crtc)
+void intel_encoder_destroy(struct drm_encoder *encoder)
 {
-	ironlake_crtc_disable(crtc);
+	struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
+
+	drm_encoder_cleanup(encoder);
+	kfree(intel_encoder);
 }
 
-static void ironlake_crtc_commit(struct drm_crtc *crtc)
+/* Simple dpms helper for encodres with just one connector, no cloning and only
+ * one kind of off state. It clamps all !ON modes to fully OFF and changes the
+ * state of the entire output pipe. */
+void intel_encoder_dpms(struct intel_encoder *encoder, int mode)
 {
-	ironlake_crtc_enable(crtc);
+	if (mode == DRM_MODE_DPMS_ON) {
+		encoder->connectors_active = true;
+
+		intel_crtc_update_dpms(encoder->base.crtc);
+	} else {
+		encoder->connectors_active = false;
+
+		intel_crtc_update_dpms(encoder->base.crtc);
+	}
 }
 
-void intel_encoder_prepare(struct drm_encoder *encoder)
+/* Cross check the actual hw state with our own modeset state tracking (and it's
+ * internal consistency). */
+static void intel_connector_check_state(struct intel_connector *connector)
 {
-	struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private;
-	/* lvds has its own version of prepare see intel_lvds_prepare */
-	encoder_funcs->dpms(encoder, DRM_MODE_DPMS_OFF);
+	if (connector->get_hw_state(connector)) {
+		struct intel_encoder *encoder = connector->encoder;
+		struct drm_crtc *crtc;
+		bool encoder_enabled;
+		enum pipe pipe;
+
+		DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
+			      connector->base.base.id,
+			      drm_get_connector_name(&connector->base));
+
+		WARN(connector->base.dpms == DRM_MODE_DPMS_OFF,
+		     "wrong connector dpms state\n");
+		WARN(connector->base.encoder != &encoder->base,
+		     "active connector not linked to encoder\n");
+		WARN(!encoder->connectors_active,
+		     "encoder->connectors_active not set\n");
+
+		encoder_enabled = encoder->get_hw_state(encoder, &pipe);
+		WARN(!encoder_enabled, "encoder not enabled\n");
+		if (WARN_ON(!encoder->base.crtc))
+			return;
+
+		crtc = encoder->base.crtc;
+
+		WARN(!crtc->enabled, "crtc not enabled\n");
+		WARN(!to_intel_crtc(crtc)->active, "crtc not active\n");
+		WARN(pipe != to_intel_crtc(crtc)->pipe,
+		     "encoder active on the wrong pipe\n");
+	}
 }
 
-void intel_encoder_commit(struct drm_encoder *encoder)
+/* Even simpler default implementation, if there's really no special case to
+ * consider. */
+void intel_connector_dpms(struct drm_connector *connector, int mode)
 {
-	struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private;
-	struct drm_device *dev = encoder->dev;
-	struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
+	struct intel_encoder *encoder = intel_attached_encoder(connector);
 
-	/* lvds has its own version of commit see intel_lvds_commit */
-	encoder_funcs->dpms(encoder, DRM_MODE_DPMS_ON);
+	/* All the simple cases only support two dpms states. */
+	if (mode != DRM_MODE_DPMS_ON)
+		mode = DRM_MODE_DPMS_OFF;
 
-	if (HAS_PCH_CPT(dev))
-		intel_cpt_verify_modeset(dev, intel_crtc->pipe);
+	if (mode == connector->dpms)
+		return;
+
+	connector->dpms = mode;
+
+	/* Only need to change hw state when actually enabled */
+	if (encoder->base.crtc)
+		intel_encoder_dpms(encoder, mode);
+	else
+		WARN_ON(encoder->connectors_active != false);
+
+	intel_modeset_check_state(connector->dev);
 }
 
-void intel_encoder_destroy(struct drm_encoder *encoder)
+/* Simple connector->get_hw_state implementation for encoders that support only
+ * one connector and no cloning and hence the encoder state determines the state
+ * of the connector. */
+bool intel_connector_get_hw_state(struct intel_connector *connector)
 {
-	struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
+	enum pipe pipe = 0;
+	struct intel_encoder *encoder = connector->encoder;
 
-	drm_encoder_cleanup(encoder);
-	kfree(intel_encoder);
+	return encoder->get_hw_state(encoder, &pipe);
 }
 
 static bool intel_crtc_mode_fixup(struct drm_crtc *crtc,
@@ -3593,6 +3625,13 @@ static bool intel_crtc_mode_fixup(struct drm_crtc *crtc,
 	if (!(adjusted_mode->private_flags & INTEL_MODE_CRTC_TIMINGS_SET))
 		drm_mode_set_crtcinfo(adjusted_mode, 0);
 
+	/* WaPruneModeWithIncorrectHsyncOffset: Cantiga+ cannot handle modes
+	 * with a hsync front porch of 0.
+	 */
+	if ((INTEL_INFO(dev)->gen > 4 || IS_G4X(dev)) &&
+		adjusted_mode->hsync_start == adjusted_mode->hdisplay)
+		return false;
+
 	return true;
 }
 
@@ -3728,6 +3767,7 @@ static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv)
  * true if they don't match).
  */
 static bool intel_choose_pipe_bpp_dither(struct drm_crtc *crtc,
+					 struct drm_framebuffer *fb,
 					 unsigned int *pipe_bpp,
 					 struct drm_display_mode *mode)
 {
@@ -3797,7 +3837,7 @@ static bool intel_choose_pipe_bpp_dither(struct drm_crtc *crtc,
 	 * also stays within the max display bpc discovered above.
 	 */
 
-	switch (crtc->fb->depth) {
+	switch (fb->depth) {
 	case 8:
 		bpc = 8; /* since we go through a colormap */
 		break;
@@ -4216,7 +4256,7 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
 			      struct drm_display_mode *mode,
 			      struct drm_display_mode *adjusted_mode,
 			      int x, int y,
-			      struct drm_framebuffer *old_fb)
+			      struct drm_framebuffer *fb)
 {
 	struct drm_device *dev = crtc->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
@@ -4406,7 +4446,7 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
 	I915_WRITE(DSPCNTR(plane), dspcntr);
 	POSTING_READ(DSPCNTR(plane));
 
-	ret = intel_pipe_set_base(crtc, x, y, old_fb);
+	ret = intel_pipe_set_base(crtc, x, y, fb);
 
 	intel_update_watermarks(dev);
 
@@ -4560,24 +4600,130 @@ static int ironlake_get_refclk(struct drm_crtc *crtc)
 	return 120000;
 }
 
+static void ironlake_set_pipeconf(struct drm_crtc *crtc,
+				  struct drm_display_mode *adjusted_mode,
+				  bool dither)
+{
+	struct drm_i915_private *dev_priv = crtc->dev->dev_private;
+	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+	int pipe = intel_crtc->pipe;
+	uint32_t val;
+
+	val = I915_READ(PIPECONF(pipe));
+
+	val &= ~PIPE_BPC_MASK;
+	switch (intel_crtc->bpp) {
+	case 18:
+		val |= PIPE_6BPC;
+		break;
+	case 24:
+		val |= PIPE_8BPC;
+		break;
+	case 30:
+		val |= PIPE_10BPC;
+		break;
+	case 36:
+		val |= PIPE_12BPC;
+		break;
+	default:
+		val |= PIPE_8BPC;
+		break;
+	}
+
+	val &= ~(PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_MASK);
+	if (dither)
+		val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP);
+
+	val &= ~PIPECONF_INTERLACE_MASK;
+	if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE)
+		val |= PIPECONF_INTERLACED_ILK;
+	else
+		val |= PIPECONF_PROGRESSIVE;
+
+	I915_WRITE(PIPECONF(pipe), val);
+	POSTING_READ(PIPECONF(pipe));
+}
+
+static bool ironlake_compute_clocks(struct drm_crtc *crtc,
+				    struct drm_display_mode *adjusted_mode,
+				    intel_clock_t *clock,
+				    bool *has_reduced_clock,
+				    intel_clock_t *reduced_clock)
+{
+	struct drm_device *dev = crtc->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_encoder *intel_encoder;
+	int refclk;
+	const intel_limit_t *limit;
+	bool ret, is_sdvo = false, is_tv = false, is_lvds = false;
+
+	for_each_encoder_on_crtc(dev, crtc, intel_encoder) {
+		switch (intel_encoder->type) {
+		case INTEL_OUTPUT_LVDS:
+			is_lvds = true;
+			break;
+		case INTEL_OUTPUT_SDVO:
+		case INTEL_OUTPUT_HDMI:
+			is_sdvo = true;
+			if (intel_encoder->needs_tv_clock)
+				is_tv = true;
+			break;
+		case INTEL_OUTPUT_TVOUT:
+			is_tv = true;
+			break;
+		}
+	}
+
+	refclk = ironlake_get_refclk(crtc);
+
+	/*
+	 * Returns a set of divisors for the desired target clock with the given
+	 * refclk, or FALSE.  The returned values represent the clock equation:
+	 * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
+	 */
+	limit = intel_limit(crtc, refclk);
+	ret = limit->find_pll(limit, crtc, adjusted_mode->clock, refclk, NULL,
+			      clock);
+	if (!ret)
+		return false;
+
+	if (is_lvds && dev_priv->lvds_downclock_avail) {
+		/*
+		 * Ensure we match the reduced clock's P to the target clock.
+		 * If the clocks don't match, we can't switch the display clock
+		 * by using the FP0/FP1. In such case we will disable the LVDS
+		 * downclock feature.
+		*/
+		*has_reduced_clock = limit->find_pll(limit, crtc,
+						     dev_priv->lvds_downclock,
+						     refclk,
+						     clock,
+						     reduced_clock);
+	}
+
+	if (is_sdvo && is_tv)
+		i9xx_adjust_sdvo_tv_clock(adjusted_mode, clock);
+
+	return true;
+}
+
 static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
 				  struct drm_display_mode *mode,
 				  struct drm_display_mode *adjusted_mode,
 				  int x, int y,
-				  struct drm_framebuffer *old_fb)
+				  struct drm_framebuffer *fb)
 {
 	struct drm_device *dev = crtc->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 	int pipe = intel_crtc->pipe;
 	int plane = intel_crtc->plane;
-	int refclk, num_connectors = 0;
+	int num_connectors = 0;
 	intel_clock_t clock, reduced_clock;
-	u32 dpll, fp = 0, fp2 = 0, dspcntr, pipeconf;
+	u32 dpll, fp = 0, fp2 = 0;
 	bool ok, has_reduced_clock = false, is_sdvo = false;
 	bool is_crt = false, is_lvds = false, is_tv = false, is_dp = false;
 	struct intel_encoder *encoder, *edp_encoder = NULL;
-	const intel_limit_t *limit;
 	int ret;
 	struct fdi_m_n m_n = {0};
 	u32 temp;
@@ -4619,16 +4765,8 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
 		num_connectors++;
 	}
 
-	refclk = ironlake_get_refclk(crtc);
-
-	/*
-	 * Returns a set of divisors for the desired target clock with the given
-	 * refclk, or FALSE.  The returned values represent the clock equation:
-	 * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
-	 */
-	limit = intel_limit(crtc, refclk);
-	ok = limit->find_pll(limit, crtc, adjusted_mode->clock, refclk, NULL,
-			     &clock);
+	ok = ironlake_compute_clocks(crtc, adjusted_mode, &clock,
+				     &has_reduced_clock, &reduced_clock);
 	if (!ok) {
 		DRM_ERROR("Couldn't find PLL settings for mode!\n");
 		return -EINVAL;
@@ -4637,24 +4775,6 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
 	/* Ensure that the cursor is valid for the new mode before changing... */
 	intel_crtc_update_cursor(crtc, true);
 
-	if (is_lvds && dev_priv->lvds_downclock_avail) {
-		/*
-		 * Ensure we match the reduced clock's P to the target clock.
-		 * If the clocks don't match, we can't switch the display clock
-		 * by using the FP0/FP1. In such case we will disable the LVDS
-		 * downclock feature.
-		*/
-		has_reduced_clock = limit->find_pll(limit, crtc,
-						    dev_priv->lvds_downclock,
-						    refclk,
-						    &clock,
-						    &reduced_clock);
-	}
-
-	if (is_sdvo && is_tv)
-		i9xx_adjust_sdvo_tv_clock(adjusted_mode, &clock);
-
-
 	/* FDI link */
 	pixel_multiplier = intel_mode_get_pixel_multiplier(adjusted_mode);
 	lane = 0;
@@ -4682,32 +4802,17 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
 		target_clock = adjusted_mode->clock;
 
 	/* determine panel color depth */
-	temp = I915_READ(PIPECONF(pipe));
-	temp &= ~PIPE_BPC_MASK;
-	dither = intel_choose_pipe_bpp_dither(crtc, &pipe_bpp, mode);
-	switch (pipe_bpp) {
-	case 18:
-		temp |= PIPE_6BPC;
-		break;
-	case 24:
-		temp |= PIPE_8BPC;
-		break;
-	case 30:
-		temp |= PIPE_10BPC;
-		break;
-	case 36:
-		temp |= PIPE_12BPC;
-		break;
-	default:
+	dither = intel_choose_pipe_bpp_dither(crtc, fb, &pipe_bpp, mode);
+	if (is_lvds && dev_priv->lvds_dither)
+		dither = true;
+
+	if (pipe_bpp != 18 && pipe_bpp != 24 && pipe_bpp != 30 &&
+	    pipe_bpp != 36) {
 		WARN(1, "intel_choose_pipe_bpp returned invalid value %d\n",
-			pipe_bpp);
-		temp |= PIPE_8BPC;
+		     pipe_bpp);
 		pipe_bpp = 24;
-		break;
 	}
-
 	intel_crtc->bpp = pipe_bpp;
-	I915_WRITE(PIPECONF(pipe), temp);
 
 	if (!lane) {
 		/*
@@ -4791,12 +4896,6 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
 	else
 		dpll |= PLL_REF_INPUT_DREFCLK;
 
-	/* setup pipeconf */
-	pipeconf = I915_READ(PIPECONF(pipe));
-
-	/* Set up the display plane register */
-	dspcntr = DISPPLANE_GAMMA_ENABLE;
-
 	DRM_DEBUG_KMS("Mode for pipe %d:\n", pipe);
 	drm_mode_debug_printmodeline(mode);
 
@@ -4856,12 +4955,6 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
 		I915_WRITE(PCH_LVDS, temp);
 	}
 
-	pipeconf &= ~PIPECONF_DITHER_EN;
-	pipeconf &= ~PIPECONF_DITHER_TYPE_MASK;
-	if ((is_lvds && dev_priv->lvds_dither) || dither) {
-		pipeconf |= PIPECONF_DITHER_EN;
-		pipeconf |= PIPECONF_DITHER_TYPE_SP;
-	}
 	if (is_dp && !is_cpu_edp) {
 		intel_dp_set_m_n(crtc, mode, adjusted_mode);
 	} else {
@@ -4897,9 +4990,7 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
 		}
 	}
 
-	pipeconf &= ~PIPECONF_INTERLACE_MASK;
 	if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
-		pipeconf |= PIPECONF_INTERLACED_ILK;
 		/* the chip adds 2 halflines automatically */
 		adjusted_mode->crtc_vtotal -= 1;
 		adjusted_mode->crtc_vblank_end -= 1;
@@ -4907,7 +4998,6 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
 			   adjusted_mode->crtc_hsync_start
 			   - adjusted_mode->crtc_htotal/2);
 	} else {
-		pipeconf |= PIPECONF_PROGRESSIVE;
 		I915_WRITE(VSYNCSHIFT(pipe), 0);
 	}
 
@@ -4945,15 +5035,15 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
 	if (is_cpu_edp)
 		ironlake_set_pll_edp(crtc, adjusted_mode->clock);
 
-	I915_WRITE(PIPECONF(pipe), pipeconf);
-	POSTING_READ(PIPECONF(pipe));
+	ironlake_set_pipeconf(crtc, adjusted_mode, dither);
 
 	intel_wait_for_vblank(dev, pipe);
 
-	I915_WRITE(DSPCNTR(plane), dspcntr);
+	/* Set up the display plane register */
+	I915_WRITE(DSPCNTR(plane), DISPPLANE_GAMMA_ENABLE);
 	POSTING_READ(DSPCNTR(plane));
 
-	ret = intel_pipe_set_base(crtc, x, y, old_fb);
+	ret = intel_pipe_set_base(crtc, x, y, fb);
 
 	intel_update_watermarks(dev);
 
@@ -4966,7 +5056,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
 			       struct drm_display_mode *mode,
 			       struct drm_display_mode *adjusted_mode,
 			       int x, int y,
-			       struct drm_framebuffer *old_fb)
+			       struct drm_framebuffer *fb)
 {
 	struct drm_device *dev = crtc->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
@@ -4977,14 +5067,9 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
 	drm_vblank_pre_modeset(dev, pipe);
 
 	ret = dev_priv->display.crtc_mode_set(crtc, mode, adjusted_mode,
-					      x, y, old_fb);
+					      x, y, fb);
 	drm_vblank_post_modeset(dev, pipe);
 
-	if (ret)
-		intel_crtc->dpms_mode = DRM_MODE_DPMS_OFF;
-	else
-		intel_crtc->dpms_mode = DRM_MODE_DPMS_ON;
-
 	return ret;
 }
 
@@ -5057,6 +5142,91 @@ static void g4x_write_eld(struct drm_connector *connector,
 	I915_WRITE(G4X_AUD_CNTL_ST, i);
 }
 
+static void haswell_write_eld(struct drm_connector *connector,
+				     struct drm_crtc *crtc)
+{
+	struct drm_i915_private *dev_priv = connector->dev->dev_private;
+	uint8_t *eld = connector->eld;
+	struct drm_device *dev = crtc->dev;
+	uint32_t eldv;
+	uint32_t i;
+	int len;
+	int pipe = to_intel_crtc(crtc)->pipe;
+	int tmp;
+
+	int hdmiw_hdmiedid = HSW_AUD_EDID_DATA(pipe);
+	int aud_cntl_st = HSW_AUD_DIP_ELD_CTRL(pipe);
+	int aud_config = HSW_AUD_CFG(pipe);
+	int aud_cntrl_st2 = HSW_AUD_PIN_ELD_CP_VLD;
+
+
+	DRM_DEBUG_DRIVER("HDMI: Haswell Audio initialize....\n");
+
+	/* Audio output enable */
+	DRM_DEBUG_DRIVER("HDMI audio: enable codec\n");
+	tmp = I915_READ(aud_cntrl_st2);
+	tmp |= (AUDIO_OUTPUT_ENABLE_A << (pipe * 4));
+	I915_WRITE(aud_cntrl_st2, tmp);
+
+	/* Wait for 1 vertical blank */
+	intel_wait_for_vblank(dev, pipe);
+
+	/* Set ELD valid state */
+	tmp = I915_READ(aud_cntrl_st2);
+	DRM_DEBUG_DRIVER("HDMI audio: pin eld vld status=0x%8x\n", tmp);
+	tmp |= (AUDIO_ELD_VALID_A << (pipe * 4));
+	I915_WRITE(aud_cntrl_st2, tmp);
+	tmp = I915_READ(aud_cntrl_st2);
+	DRM_DEBUG_DRIVER("HDMI audio: eld vld status=0x%8x\n", tmp);
+
+	/* Enable HDMI mode */
+	tmp = I915_READ(aud_config);
+	DRM_DEBUG_DRIVER("HDMI audio: audio conf: 0x%8x\n", tmp);
+	/* clear N_programing_enable and N_value_index */
+	tmp &= ~(AUD_CONFIG_N_VALUE_INDEX | AUD_CONFIG_N_PROG_ENABLE);
+	I915_WRITE(aud_config, tmp);
+
+	DRM_DEBUG_DRIVER("ELD on pipe %c\n", pipe_name(pipe));
+
+	eldv = AUDIO_ELD_VALID_A << (pipe * 4);
+
+	if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) {
+		DRM_DEBUG_DRIVER("ELD: DisplayPort detected\n");
+		eld[5] |= (1 << 2);	/* Conn_Type, 0x1 = DisplayPort */
+		I915_WRITE(aud_config, AUD_CONFIG_N_VALUE_INDEX); /* 0x1 = DP */
+	} else
+		I915_WRITE(aud_config, 0);
+
+	if (intel_eld_uptodate(connector,
+			       aud_cntrl_st2, eldv,
+			       aud_cntl_st, IBX_ELD_ADDRESS,
+			       hdmiw_hdmiedid))
+		return;
+
+	i = I915_READ(aud_cntrl_st2);
+	i &= ~eldv;
+	I915_WRITE(aud_cntrl_st2, i);
+
+	if (!eld[0])
+		return;
+
+	i = I915_READ(aud_cntl_st);
+	i &= ~IBX_ELD_ADDRESS;
+	I915_WRITE(aud_cntl_st, i);
+	i = (i >> 29) & DIP_PORT_SEL_MASK;		/* DIP_Port_Select, 0x1 = PortB */
+	DRM_DEBUG_DRIVER("port num:%d\n", i);
+
+	len = min_t(uint8_t, eld[2], 21);	/* 84 bytes of hw ELD buffer */
+	DRM_DEBUG_DRIVER("ELD size %d\n", len);
+	for (i = 0; i < len; i++)
+		I915_WRITE(hdmiw_hdmiedid, *((uint32_t *)eld + i));
+
+	i = I915_READ(aud_cntrl_st2);
+	i |= eldv;
+	I915_WRITE(aud_cntrl_st2, i);
+
+}
+
 static void ironlake_write_eld(struct drm_connector *connector,
 				     struct drm_crtc *crtc)
 {
@@ -5069,28 +5239,24 @@ static void ironlake_write_eld(struct drm_connector *connector,
 	int aud_config;
 	int aud_cntl_st;
 	int aud_cntrl_st2;
+	int pipe = to_intel_crtc(crtc)->pipe;
 
 	if (HAS_PCH_IBX(connector->dev)) {
-		hdmiw_hdmiedid = IBX_HDMIW_HDMIEDID_A;
-		aud_config = IBX_AUD_CONFIG_A;
-		aud_cntl_st = IBX_AUD_CNTL_ST_A;
+		hdmiw_hdmiedid = IBX_HDMIW_HDMIEDID(pipe);
+		aud_config = IBX_AUD_CFG(pipe);
+		aud_cntl_st = IBX_AUD_CNTL_ST(pipe);
 		aud_cntrl_st2 = IBX_AUD_CNTL_ST2;
 	} else {
-		hdmiw_hdmiedid = CPT_HDMIW_HDMIEDID_A;
-		aud_config = CPT_AUD_CONFIG_A;
-		aud_cntl_st = CPT_AUD_CNTL_ST_A;
+		hdmiw_hdmiedid = CPT_HDMIW_HDMIEDID(pipe);
+		aud_config = CPT_AUD_CFG(pipe);
+		aud_cntl_st = CPT_AUD_CNTL_ST(pipe);
 		aud_cntrl_st2 = CPT_AUD_CNTRL_ST2;
 	}
 
-	i = to_intel_crtc(crtc)->pipe;
-	hdmiw_hdmiedid += i * 0x100;
-	aud_cntl_st += i * 0x100;
-	aud_config += i * 0x100;
-
-	DRM_DEBUG_DRIVER("ELD on pipe %c\n", pipe_name(i));
+	DRM_DEBUG_DRIVER("ELD on pipe %c\n", pipe_name(pipe));
 
 	i = I915_READ(aud_cntl_st);
-	i = (i >> 29) & 0x3;		/* DIP_Port_Select, 0x1 = PortB */
+	i = (i >> 29) & DIP_PORT_SEL_MASK;		/* DIP_Port_Select, 0x1 = PortB */
 	if (!i) {
 		DRM_DEBUG_DRIVER("Audio directed to unknown port\n");
 		/* operate blindly on all ports */
@@ -5337,8 +5503,6 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
 	uint32_t addr;
 	int ret;
 
-	DRM_DEBUG_KMS("\n");
-
 	/* if we want to turn off the cursor ignore width and height */
 	if (!handle) {
 		DRM_DEBUG_KMS("cursor off\n");
@@ -5584,17 +5748,18 @@ mode_fits_in_fbdev(struct drm_device *dev,
 	return fb;
 }
 
-bool intel_get_load_detect_pipe(struct intel_encoder *intel_encoder,
-				struct drm_connector *connector,
+bool intel_get_load_detect_pipe(struct drm_connector *connector,
 				struct drm_display_mode *mode,
 				struct intel_load_detect_pipe *old)
 {
 	struct intel_crtc *intel_crtc;
+	struct intel_encoder *intel_encoder =
+		intel_attached_encoder(connector);
 	struct drm_crtc *possible_crtc;
 	struct drm_encoder *encoder = &intel_encoder->base;
 	struct drm_crtc *crtc = NULL;
 	struct drm_device *dev = encoder->dev;
-	struct drm_framebuffer *old_fb;
+	struct drm_framebuffer *fb;
 	int i = -1;
 
 	DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
@@ -5615,21 +5780,12 @@ bool intel_get_load_detect_pipe(struct intel_encoder *intel_encoder,
 	if (encoder->crtc) {
 		crtc = encoder->crtc;
 
-		intel_crtc = to_intel_crtc(crtc);
-		old->dpms_mode = intel_crtc->dpms_mode;
+		old->dpms_mode = connector->dpms;
 		old->load_detect_temp = false;
 
 		/* Make sure the crtc and connector are running */
-		if (intel_crtc->dpms_mode != DRM_MODE_DPMS_ON) {
-			struct drm_encoder_helper_funcs *encoder_funcs;
-			struct drm_crtc_helper_funcs *crtc_funcs;
-
-			crtc_funcs = crtc->helper_private;
-			crtc_funcs->dpms(crtc, DRM_MODE_DPMS_ON);
-
-			encoder_funcs = encoder->helper_private;
-			encoder_funcs->dpms(encoder, DRM_MODE_DPMS_ON);
-		}
+		if (connector->dpms != DRM_MODE_DPMS_ON)
+			connector->funcs->dpms(connector, DRM_MODE_DPMS_ON);
 
 		return true;
 	}
@@ -5653,19 +5809,17 @@ bool intel_get_load_detect_pipe(struct intel_encoder *intel_encoder,
 		return false;
 	}
 
-	encoder->crtc = crtc;
-	connector->encoder = encoder;
+	intel_encoder->new_crtc = to_intel_crtc(crtc);
+	to_intel_connector(connector)->new_encoder = intel_encoder;
 
 	intel_crtc = to_intel_crtc(crtc);
-	old->dpms_mode = intel_crtc->dpms_mode;
+	old->dpms_mode = connector->dpms;
 	old->load_detect_temp = true;
 	old->release_fb = NULL;
 
 	if (!mode)
 		mode = &load_detect_mode;
 
-	old_fb = crtc->fb;
-
 	/* We need a framebuffer large enough to accommodate all accesses
 	 * that the plane may generate whilst we perform load detection.
 	 * We can not rely on the fbcon either being present (we get called
@@ -5673,50 +5827,52 @@ bool intel_get_load_detect_pipe(struct intel_encoder *intel_encoder,
 	 * not even exist) or that it is large enough to satisfy the
 	 * requested mode.
 	 */
-	crtc->fb = mode_fits_in_fbdev(dev, mode);
-	if (crtc->fb == NULL) {
+	fb = mode_fits_in_fbdev(dev, mode);
+	if (fb == NULL) {
 		DRM_DEBUG_KMS("creating tmp fb for load-detection\n");
-		crtc->fb = intel_framebuffer_create_for_mode(dev, mode, 24, 32);
-		old->release_fb = crtc->fb;
+		fb = intel_framebuffer_create_for_mode(dev, mode, 24, 32);
+		old->release_fb = fb;
 	} else
 		DRM_DEBUG_KMS("reusing fbdev for load-detection framebuffer\n");
-	if (IS_ERR(crtc->fb)) {
+	if (IS_ERR(fb)) {
 		DRM_DEBUG_KMS("failed to allocate framebuffer for load-detection\n");
-		crtc->fb = old_fb;
-		return false;
+		goto fail;
 	}
 
-	if (!drm_crtc_helper_set_mode(crtc, mode, 0, 0, old_fb)) {
+	if (!intel_set_mode(crtc, mode, 0, 0, fb)) {
 		DRM_DEBUG_KMS("failed to set mode on load-detect pipe\n");
 		if (old->release_fb)
 			old->release_fb->funcs->destroy(old->release_fb);
-		crtc->fb = old_fb;
-		return false;
+		goto fail;
 	}
 
 	/* let the connector get through one full cycle before testing */
 	intel_wait_for_vblank(dev, intel_crtc->pipe);
 
 	return true;
+fail:
+	connector->encoder = NULL;
+	encoder->crtc = NULL;
+	return false;
 }
 
-void intel_release_load_detect_pipe(struct intel_encoder *intel_encoder,
-				    struct drm_connector *connector,
+void intel_release_load_detect_pipe(struct drm_connector *connector,
 				    struct intel_load_detect_pipe *old)
 {
+	struct intel_encoder *intel_encoder =
+		intel_attached_encoder(connector);
 	struct drm_encoder *encoder = &intel_encoder->base;
-	struct drm_device *dev = encoder->dev;
-	struct drm_crtc *crtc = encoder->crtc;
-	struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private;
-	struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
 
 	DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
 		      connector->base.id, drm_get_connector_name(connector),
 		      encoder->base.id, drm_get_encoder_name(encoder));
 
 	if (old->load_detect_temp) {
-		connector->encoder = NULL;
-		drm_helper_disable_unused_functions(dev);
+		struct drm_crtc *crtc = encoder->crtc;
+
+		to_intel_connector(connector)->new_encoder = NULL;
+		intel_encoder->new_crtc = NULL;
+		intel_set_mode(crtc, NULL, 0, 0, NULL);
 
 		if (old->release_fb)
 			old->release_fb->funcs->destroy(old->release_fb);
@@ -5725,10 +5881,8 @@ void intel_release_load_detect_pipe(struct intel_encoder *intel_encoder,
 	}
 
 	/* Switch crtc and encoder back off if necessary */
-	if (old->dpms_mode != DRM_MODE_DPMS_ON) {
-		encoder_funcs->dpms(encoder, old->dpms_mode);
-		crtc_funcs->dpms(crtc, old->dpms_mode);
-	}
+	if (old->dpms_mode != DRM_MODE_DPMS_ON)
+		connector->funcs->dpms(connector, old->dpms_mode);
 }
 
 /* Returns the clock of the currently programmed mode of the given pipe. */
@@ -5850,46 +6004,6 @@ struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev,
 	return mode;
 }
 
-#define GPU_IDLE_TIMEOUT 500 /* ms */
-
-/* When this timer fires, we've been idle for awhile */
-static void intel_gpu_idle_timer(unsigned long arg)
-{
-	struct drm_device *dev = (struct drm_device *)arg;
-	drm_i915_private_t *dev_priv = dev->dev_private;
-
-	if (!list_empty(&dev_priv->mm.active_list)) {
-		/* Still processing requests, so just re-arm the timer. */
-		mod_timer(&dev_priv->idle_timer, jiffies +
-			  msecs_to_jiffies(GPU_IDLE_TIMEOUT));
-		return;
-	}
-
-	dev_priv->busy = false;
-	queue_work(dev_priv->wq, &dev_priv->idle_work);
-}
-
-#define CRTC_IDLE_TIMEOUT 1000 /* ms */
-
-static void intel_crtc_idle_timer(unsigned long arg)
-{
-	struct intel_crtc *intel_crtc = (struct intel_crtc *)arg;
-	struct drm_crtc *crtc = &intel_crtc->base;
-	drm_i915_private_t *dev_priv = crtc->dev->dev_private;
-	struct intel_framebuffer *intel_fb;
-
-	intel_fb = to_intel_framebuffer(crtc->fb);
-	if (intel_fb && intel_fb->obj->active) {
-		/* The framebuffer is still being accessed by the GPU. */
-		mod_timer(&intel_crtc->idle_timer, jiffies +
-			  msecs_to_jiffies(CRTC_IDLE_TIMEOUT));
-		return;
-	}
-
-	intel_crtc->busy = false;
-	queue_work(dev_priv->wq, &dev_priv->idle_work);
-}
-
 static void intel_increase_pllclock(struct drm_crtc *crtc)
 {
 	struct drm_device *dev = crtc->dev;
@@ -5919,10 +6033,6 @@ static void intel_increase_pllclock(struct drm_crtc *crtc)
 		if (dpll & DISPLAY_RATE_SELECT_FPA1)
 			DRM_DEBUG_DRIVER("failed to upclock LVDS!\n");
 	}
-
-	/* Schedule downclock */
-	mod_timer(&intel_crtc->idle_timer, jiffies +
-		  msecs_to_jiffies(CRTC_IDLE_TIMEOUT));
 }
 
 static void intel_decrease_pllclock(struct drm_crtc *crtc)
@@ -5961,89 +6071,46 @@ static void intel_decrease_pllclock(struct drm_crtc *crtc)
 
 }
 
-/**
- * intel_idle_update - adjust clocks for idleness
- * @work: work struct
- *
- * Either the GPU or display (or both) went idle.  Check the busy status
- * here and adjust the CRTC and GPU clocks as necessary.
- */
-static void intel_idle_update(struct work_struct *work)
+void intel_mark_busy(struct drm_device *dev)
 {
-	drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
-						    idle_work);
-	struct drm_device *dev = dev_priv->dev;
+	i915_update_gfx_val(dev->dev_private);
+}
+
+void intel_mark_idle(struct drm_device *dev)
+{
+}
+
+void intel_mark_fb_busy(struct drm_i915_gem_object *obj)
+{
+	struct drm_device *dev = obj->base.dev;
 	struct drm_crtc *crtc;
-	struct intel_crtc *intel_crtc;
 
 	if (!i915_powersave)
 		return;
 
-	mutex_lock(&dev->struct_mutex);
-
-	i915_update_gfx_val(dev_priv);
-
 	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
-		/* Skip inactive CRTCs */
 		if (!crtc->fb)
 			continue;
 
-		intel_crtc = to_intel_crtc(crtc);
-		if (!intel_crtc->busy)
-			intel_decrease_pllclock(crtc);
+		if (to_intel_framebuffer(crtc->fb)->obj == obj)
+			intel_increase_pllclock(crtc);
 	}
-
-
-	mutex_unlock(&dev->struct_mutex);
 }
 
-/**
- * intel_mark_busy - mark the GPU and possibly the display busy
- * @dev: drm device
- * @obj: object we're operating on
- *
- * Callers can use this function to indicate that the GPU is busy processing
- * commands.  If @obj matches one of the CRTC objects (i.e. it's a scanout
- * buffer), we'll also mark the display as busy, so we know to increase its
- * clock frequency.
- */
-void intel_mark_busy(struct drm_device *dev, struct drm_i915_gem_object *obj)
+void intel_mark_fb_idle(struct drm_i915_gem_object *obj)
 {
-	drm_i915_private_t *dev_priv = dev->dev_private;
-	struct drm_crtc *crtc = NULL;
-	struct intel_framebuffer *intel_fb;
-	struct intel_crtc *intel_crtc;
-
-	if (!drm_core_check_feature(dev, DRIVER_MODESET))
-		return;
-
-	if (!dev_priv->busy) {
-		intel_sanitize_pm(dev);
-		dev_priv->busy = true;
-	} else
-		mod_timer(&dev_priv->idle_timer, jiffies +
-			  msecs_to_jiffies(GPU_IDLE_TIMEOUT));
+	struct drm_device *dev = obj->base.dev;
+	struct drm_crtc *crtc;
 
-	if (obj == NULL)
+	if (!i915_powersave)
 		return;
 
 	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
 		if (!crtc->fb)
 			continue;
 
-		intel_crtc = to_intel_crtc(crtc);
-		intel_fb = to_intel_framebuffer(crtc->fb);
-		if (intel_fb->obj == obj) {
-			if (!intel_crtc->busy) {
-				/* Non-busy -> busy, upclock */
-				intel_increase_pllclock(crtc);
-				intel_crtc->busy = true;
-			} else {
-				/* Busy -> busy, put off timer */
-				mod_timer(&intel_crtc->idle_timer, jiffies +
-					  msecs_to_jiffies(CRTC_IDLE_TIMEOUT));
-			}
-		}
+		if (to_intel_framebuffer(crtc->fb)->obj == obj)
+			intel_decrease_pllclock(crtc);
 	}
 }
 
@@ -6394,7 +6461,7 @@ static int intel_gen7_queue_flip(struct drm_device *dev,
 	default:
 		WARN_ONCE(1, "unknown plane in flip command\n");
 		ret = -ENODEV;
-		goto err;
+		goto err_unpin;
 	}
 
 	ret = intel_ring_begin(ring, 4);
@@ -6502,7 +6569,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
 		goto cleanup_pending;
 
 	intel_disable_fbc(dev);
-	intel_mark_busy(dev, obj);
+	intel_mark_fb_busy(obj);
 	mutex_unlock(&dev->struct_mutex);
 
 	trace_i915_flip_request(intel_crtc->plane, obj);
@@ -6527,81 +6594,807 @@ free_work:
 	return ret;
 }
 
-static void intel_sanitize_modesetting(struct drm_device *dev,
-				       int pipe, int plane)
+static struct drm_crtc_helper_funcs intel_helper_funcs = {
+	.mode_set_base_atomic = intel_pipe_set_base_atomic,
+	.load_lut = intel_crtc_load_lut,
+	.disable = intel_crtc_noop,
+};
+
+bool intel_encoder_check_is_cloned(struct intel_encoder *encoder)
 {
-	struct drm_i915_private *dev_priv = dev->dev_private;
-	u32 reg, val;
-	int i;
+	struct intel_encoder *other_encoder;
+	struct drm_crtc *crtc = &encoder->new_crtc->base;
 
-	/* Clear any frame start delays used for debugging left by the BIOS */
-	for_each_pipe(i) {
-		reg = PIPECONF(i);
-		I915_WRITE(reg, I915_READ(reg) & ~PIPECONF_FRAME_START_DELAY_MASK);
+	if (WARN_ON(!crtc))
+		return false;
+
+	list_for_each_entry(other_encoder,
+			    &crtc->dev->mode_config.encoder_list,
+			    base.head) {
+
+		if (&other_encoder->new_crtc->base != crtc ||
+		    encoder == other_encoder)
+			continue;
+		else
+			return true;
 	}
 
-	if (HAS_PCH_SPLIT(dev))
-		return;
+	return false;
+}
 
-	/* Who knows what state these registers were left in by the BIOS or
-	 * grub?
-	 *
-	 * If we leave the registers in a conflicting state (e.g. with the
-	 * display plane reading from the other pipe than the one we intend
-	 * to use) then when we attempt to teardown the active mode, we will
-	 * not disable the pipes and planes in the correct order -- leaving
-	 * a plane reading from a disabled pipe and possibly leading to
-	 * undefined behaviour.
+static bool intel_encoder_crtc_ok(struct drm_encoder *encoder,
+				  struct drm_crtc *crtc)
+{
+	struct drm_device *dev;
+	struct drm_crtc *tmp;
+	int crtc_mask = 1;
+
+	WARN(!crtc, "checking null crtc?\n");
+
+	dev = crtc->dev;
+
+	list_for_each_entry(tmp, &dev->mode_config.crtc_list, head) {
+		if (tmp == crtc)
+			break;
+		crtc_mask <<= 1;
+	}
+
+	if (encoder->possible_crtcs & crtc_mask)
+		return true;
+	return false;
+}
+
+/**
+ * intel_modeset_update_staged_output_state
+ *
+ * Updates the staged output configuration state, e.g. after we've read out the
+ * current hw state.
+ */
+static void intel_modeset_update_staged_output_state(struct drm_device *dev)
+{
+	struct intel_encoder *encoder;
+	struct intel_connector *connector;
+
+	list_for_each_entry(connector, &dev->mode_config.connector_list,
+			    base.head) {
+		connector->new_encoder =
+			to_intel_encoder(connector->base.encoder);
+	}
+
+	list_for_each_entry(encoder, &dev->mode_config.encoder_list,
+			    base.head) {
+		encoder->new_crtc =
+			to_intel_crtc(encoder->base.crtc);
+	}
+}
+
+/**
+ * intel_modeset_commit_output_state
+ *
+ * This function copies the stage display pipe configuration to the real one.
+ */
+static void intel_modeset_commit_output_state(struct drm_device *dev)
+{
+	struct intel_encoder *encoder;
+	struct intel_connector *connector;
+
+	list_for_each_entry(connector, &dev->mode_config.connector_list,
+			    base.head) {
+		connector->base.encoder = &connector->new_encoder->base;
+	}
+
+	list_for_each_entry(encoder, &dev->mode_config.encoder_list,
+			    base.head) {
+		encoder->base.crtc = &encoder->new_crtc->base;
+	}
+}
+
+static struct drm_display_mode *
+intel_modeset_adjusted_mode(struct drm_crtc *crtc,
+			    struct drm_display_mode *mode)
+{
+	struct drm_device *dev = crtc->dev;
+	struct drm_display_mode *adjusted_mode;
+	struct drm_encoder_helper_funcs *encoder_funcs;
+	struct intel_encoder *encoder;
+
+	adjusted_mode = drm_mode_duplicate(dev, mode);
+	if (!adjusted_mode)
+		return ERR_PTR(-ENOMEM);
+
+	/* Pass our mode to the connectors and the CRTC to give them a chance to
+	 * adjust it according to limitations or connector properties, and also
+	 * a chance to reject the mode entirely.
 	 */
+	list_for_each_entry(encoder, &dev->mode_config.encoder_list,
+			    base.head) {
 
-	reg = DSPCNTR(plane);
-	val = I915_READ(reg);
+		if (&encoder->new_crtc->base != crtc)
+			continue;
+		encoder_funcs = encoder->base.helper_private;
+		if (!(encoder_funcs->mode_fixup(&encoder->base, mode,
+						adjusted_mode))) {
+			DRM_DEBUG_KMS("Encoder fixup failed\n");
+			goto fail;
+		}
+	}
 
-	if ((val & DISPLAY_PLANE_ENABLE) == 0)
-		return;
-	if (!!(val & DISPPLANE_SEL_PIPE_MASK) == pipe)
-		return;
+	if (!(intel_crtc_mode_fixup(crtc, mode, adjusted_mode))) {
+		DRM_DEBUG_KMS("CRTC fixup failed\n");
+		goto fail;
+	}
+	DRM_DEBUG_KMS("[CRTC:%d]\n", crtc->base.id);
 
-	/* This display plane is active and attached to the other CPU pipe. */
-	pipe = !pipe;
+	return adjusted_mode;
+fail:
+	drm_mode_destroy(dev, adjusted_mode);
+	return ERR_PTR(-EINVAL);
+}
 
-	/* Disable the plane and wait for it to stop reading from the pipe. */
-	intel_disable_plane(dev_priv, plane, pipe);
-	intel_disable_pipe(dev_priv, pipe);
+/* Computes which crtcs are affected and sets the relevant bits in the mask. For
+ * simplicity we use the crtc's pipe number (because it's easier to obtain). */
+static void
+intel_modeset_affected_pipes(struct drm_crtc *crtc, unsigned *modeset_pipes,
+			     unsigned *prepare_pipes, unsigned *disable_pipes)
+{
+	struct intel_crtc *intel_crtc;
+	struct drm_device *dev = crtc->dev;
+	struct intel_encoder *encoder;
+	struct intel_connector *connector;
+	struct drm_crtc *tmp_crtc;
+
+	*disable_pipes = *modeset_pipes = *prepare_pipes = 0;
+
+	/* Check which crtcs have changed outputs connected to them, these need
+	 * to be part of the prepare_pipes mask. We don't (yet) support global
+	 * modeset across multiple crtcs, so modeset_pipes will only have one
+	 * bit set at most. */
+	list_for_each_entry(connector, &dev->mode_config.connector_list,
+			    base.head) {
+		if (connector->base.encoder == &connector->new_encoder->base)
+			continue;
+
+		if (connector->base.encoder) {
+			tmp_crtc = connector->base.encoder->crtc;
+
+			*prepare_pipes |= 1 << to_intel_crtc(tmp_crtc)->pipe;
+		}
+
+		if (connector->new_encoder)
+			*prepare_pipes |=
+				1 << connector->new_encoder->new_crtc->pipe;
+	}
+
+	list_for_each_entry(encoder, &dev->mode_config.encoder_list,
+			    base.head) {
+		if (encoder->base.crtc == &encoder->new_crtc->base)
+			continue;
+
+		if (encoder->base.crtc) {
+			tmp_crtc = encoder->base.crtc;
+
+			*prepare_pipes |= 1 << to_intel_crtc(tmp_crtc)->pipe;
+		}
+
+		if (encoder->new_crtc)
+			*prepare_pipes |= 1 << encoder->new_crtc->pipe;
+	}
+
+	/* Check for any pipes that will be fully disabled ... */
+	list_for_each_entry(intel_crtc, &dev->mode_config.crtc_list,
+			    base.head) {
+		bool used = false;
+
+		/* Don't try to disable disabled crtcs. */
+		if (!intel_crtc->base.enabled)
+			continue;
+
+		list_for_each_entry(encoder, &dev->mode_config.encoder_list,
+				    base.head) {
+			if (encoder->new_crtc == intel_crtc)
+				used = true;
+		}
+
+		if (!used)
+			*disable_pipes |= 1 << intel_crtc->pipe;
+	}
+
+
+	/* set_mode is also used to update properties on life display pipes. */
+	intel_crtc = to_intel_crtc(crtc);
+	if (crtc->enabled)
+		*prepare_pipes |= 1 << intel_crtc->pipe;
+
+	/* We only support modeset on one single crtc, hence we need to do that
+	 * only for the passed in crtc iff we change anything else than just
+	 * disable crtcs.
+	 *
+	 * This is actually not true, to be fully compatible with the old crtc
+	 * helper we automatically disable _any_ output (i.e. doesn't need to be
+	 * connected to the crtc we're modesetting on) if it's disconnected.
+	 * Which is a rather nutty api (since changed the output configuration
+	 * without userspace's explicit request can lead to confusion), but
+	 * alas. Hence we currently need to modeset on all pipes we prepare. */
+	if (*prepare_pipes)
+		*modeset_pipes = *prepare_pipes;
+
+	/* ... and mask these out. */
+	*modeset_pipes &= ~(*disable_pipes);
+	*prepare_pipes &= ~(*disable_pipes);
 }
 
-static void intel_crtc_reset(struct drm_crtc *crtc)
+static bool intel_crtc_in_use(struct drm_crtc *crtc)
 {
+	struct drm_encoder *encoder;
 	struct drm_device *dev = crtc->dev;
-	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 
-	/* Reset flags back to the 'unknown' status so that they
-	 * will be correctly set on the initial modeset.
+	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head)
+		if (encoder->crtc == crtc)
+			return true;
+
+	return false;
+}
+
+static void
+intel_modeset_update_state(struct drm_device *dev, unsigned prepare_pipes)
+{
+	struct intel_encoder *intel_encoder;
+	struct intel_crtc *intel_crtc;
+	struct drm_connector *connector;
+
+	list_for_each_entry(intel_encoder, &dev->mode_config.encoder_list,
+			    base.head) {
+		if (!intel_encoder->base.crtc)
+			continue;
+
+		intel_crtc = to_intel_crtc(intel_encoder->base.crtc);
+
+		if (prepare_pipes & (1 << intel_crtc->pipe))
+			intel_encoder->connectors_active = false;
+	}
+
+	intel_modeset_commit_output_state(dev);
+
+	/* Update computed state. */
+	list_for_each_entry(intel_crtc, &dev->mode_config.crtc_list,
+			    base.head) {
+		intel_crtc->base.enabled = intel_crtc_in_use(&intel_crtc->base);
+	}
+
+	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+		if (!connector->encoder || !connector->encoder->crtc)
+			continue;
+
+		intel_crtc = to_intel_crtc(connector->encoder->crtc);
+
+		if (prepare_pipes & (1 << intel_crtc->pipe)) {
+			struct drm_property *dpms_property =
+				dev->mode_config.dpms_property;
+
+			connector->dpms = DRM_MODE_DPMS_ON;
+			drm_connector_property_set_value(connector,
+							 dpms_property,
+							 DRM_MODE_DPMS_ON);
+
+			intel_encoder = to_intel_encoder(connector->encoder);
+			intel_encoder->connectors_active = true;
+		}
+	}
+
+}
+
+#define for_each_intel_crtc_masked(dev, mask, intel_crtc) \
+	list_for_each_entry((intel_crtc), \
+			    &(dev)->mode_config.crtc_list, \
+			    base.head) \
+		if (mask & (1 <<(intel_crtc)->pipe)) \
+
+void
+intel_modeset_check_state(struct drm_device *dev)
+{
+	struct intel_crtc *crtc;
+	struct intel_encoder *encoder;
+	struct intel_connector *connector;
+
+	list_for_each_entry(connector, &dev->mode_config.connector_list,
+			    base.head) {
+		/* This also checks the encoder/connector hw state with the
+		 * ->get_hw_state callbacks. */
+		intel_connector_check_state(connector);
+
+		WARN(&connector->new_encoder->base != connector->base.encoder,
+		     "connector's staged encoder doesn't match current encoder\n");
+	}
+
+	list_for_each_entry(encoder, &dev->mode_config.encoder_list,
+			    base.head) {
+		bool enabled = false;
+		bool active = false;
+		enum pipe pipe, tracked_pipe;
+
+		DRM_DEBUG_KMS("[ENCODER:%d:%s]\n",
+			      encoder->base.base.id,
+			      drm_get_encoder_name(&encoder->base));
+
+		WARN(&encoder->new_crtc->base != encoder->base.crtc,
+		     "encoder's stage crtc doesn't match current crtc\n");
+		WARN(encoder->connectors_active && !encoder->base.crtc,
+		     "encoder's active_connectors set, but no crtc\n");
+
+		list_for_each_entry(connector, &dev->mode_config.connector_list,
+				    base.head) {
+			if (connector->base.encoder != &encoder->base)
+				continue;
+			enabled = true;
+			if (connector->base.dpms != DRM_MODE_DPMS_OFF)
+				active = true;
+		}
+		WARN(!!encoder->base.crtc != enabled,
+		     "encoder's enabled state mismatch "
+		     "(expected %i, found %i)\n",
+		     !!encoder->base.crtc, enabled);
+		WARN(active && !encoder->base.crtc,
+		     "active encoder with no crtc\n");
+
+		WARN(encoder->connectors_active != active,
+		     "encoder's computed active state doesn't match tracked active state "
+		     "(expected %i, found %i)\n", active, encoder->connectors_active);
+
+		active = encoder->get_hw_state(encoder, &pipe);
+		WARN(active != encoder->connectors_active,
+		     "encoder's hw state doesn't match sw tracking "
+		     "(expected %i, found %i)\n",
+		     encoder->connectors_active, active);
+
+		if (!encoder->base.crtc)
+			continue;
+
+		tracked_pipe = to_intel_crtc(encoder->base.crtc)->pipe;
+		WARN(active && pipe != tracked_pipe,
+		     "active encoder's pipe doesn't match"
+		     "(expected %i, found %i)\n",
+		     tracked_pipe, pipe);
+
+	}
+
+	list_for_each_entry(crtc, &dev->mode_config.crtc_list,
+			    base.head) {
+		bool enabled = false;
+		bool active = false;
+
+		DRM_DEBUG_KMS("[CRTC:%d]\n",
+			      crtc->base.base.id);
+
+		WARN(crtc->active && !crtc->base.enabled,
+		     "active crtc, but not enabled in sw tracking\n");
+
+		list_for_each_entry(encoder, &dev->mode_config.encoder_list,
+				    base.head) {
+			if (encoder->base.crtc != &crtc->base)
+				continue;
+			enabled = true;
+			if (encoder->connectors_active)
+				active = true;
+		}
+		WARN(active != crtc->active,
+		     "crtc's computed active state doesn't match tracked active state "
+		     "(expected %i, found %i)\n", active, crtc->active);
+		WARN(enabled != crtc->base.enabled,
+		     "crtc's computed enabled state doesn't match tracked enabled state "
+		     "(expected %i, found %i)\n", enabled, crtc->base.enabled);
+
+		assert_pipe(dev->dev_private, crtc->pipe, crtc->active);
+	}
+}
+
+bool intel_set_mode(struct drm_crtc *crtc,
+		    struct drm_display_mode *mode,
+		    int x, int y, struct drm_framebuffer *fb)
+{
+	struct drm_device *dev = crtc->dev;
+	drm_i915_private_t *dev_priv = dev->dev_private;
+	struct drm_display_mode *adjusted_mode, saved_mode, saved_hwmode;
+	struct drm_encoder_helper_funcs *encoder_funcs;
+	struct drm_encoder *encoder;
+	struct intel_crtc *intel_crtc;
+	unsigned disable_pipes, prepare_pipes, modeset_pipes;
+	bool ret = true;
+
+	intel_modeset_affected_pipes(crtc, &modeset_pipes,
+				     &prepare_pipes, &disable_pipes);
+
+	DRM_DEBUG_KMS("set mode pipe masks: modeset: %x, prepare: %x, disable: %x\n",
+		      modeset_pipes, prepare_pipes, disable_pipes);
+
+	for_each_intel_crtc_masked(dev, disable_pipes, intel_crtc)
+		intel_crtc_disable(&intel_crtc->base);
+
+	saved_hwmode = crtc->hwmode;
+	saved_mode = crtc->mode;
+
+	/* Hack: Because we don't (yet) support global modeset on multiple
+	 * crtcs, we don't keep track of the new mode for more than one crtc.
+	 * Hence simply check whether any bit is set in modeset_pipes in all the
+	 * pieces of code that are not yet converted to deal with mutliple crtcs
+	 * changing their mode at the same time. */
+	adjusted_mode = NULL;
+	if (modeset_pipes) {
+		adjusted_mode = intel_modeset_adjusted_mode(crtc, mode);
+		if (IS_ERR(adjusted_mode)) {
+			return false;
+		}
+	}
+
+	for_each_intel_crtc_masked(dev, prepare_pipes, intel_crtc) {
+		if (intel_crtc->base.enabled)
+			dev_priv->display.crtc_disable(&intel_crtc->base);
+	}
+
+	/* crtc->mode is already used by the ->mode_set callbacks, hence we need
+	 * to set it here already despite that we pass it down the callchain.
 	 */
-	intel_crtc->dpms_mode = -1;
+	if (modeset_pipes)
+		crtc->mode = *mode;
 
-	/* We need to fix up any BIOS configuration that conflicts with
-	 * our expectations.
+	/* Only after disabling all output pipelines that will be changed can we
+	 * update the the output configuration. */
+	intel_modeset_update_state(dev, prepare_pipes);
+
+	/* Set up the DPLL and any encoders state that needs to adjust or depend
+	 * on the DPLL.
 	 */
-	intel_sanitize_modesetting(dev, intel_crtc->pipe, intel_crtc->plane);
+	for_each_intel_crtc_masked(dev, modeset_pipes, intel_crtc) {
+		ret = !intel_crtc_mode_set(&intel_crtc->base,
+					   mode, adjusted_mode,
+					   x, y, fb);
+		if (!ret)
+		    goto done;
+
+		list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+
+			if (encoder->crtc != &intel_crtc->base)
+				continue;
+
+			DRM_DEBUG_KMS("[ENCODER:%d:%s] set [MODE:%d:%s]\n",
+				encoder->base.id, drm_get_encoder_name(encoder),
+				mode->base.id, mode->name);
+			encoder_funcs = encoder->helper_private;
+			encoder_funcs->mode_set(encoder, mode, adjusted_mode);
+		}
+	}
+
+	/* Now enable the clocks, plane, pipe, and connectors that we set up. */
+	for_each_intel_crtc_masked(dev, prepare_pipes, intel_crtc)
+		dev_priv->display.crtc_enable(&intel_crtc->base);
+
+	if (modeset_pipes) {
+		/* Store real post-adjustment hardware mode. */
+		crtc->hwmode = *adjusted_mode;
+
+		/* Calculate and store various constants which
+		 * are later needed by vblank and swap-completion
+		 * timestamping. They are derived from true hwmode.
+		 */
+		drm_calc_timestamping_constants(crtc);
+	}
+
+	/* FIXME: add subpixel order */
+done:
+	drm_mode_destroy(dev, adjusted_mode);
+	if (!ret && crtc->enabled) {
+		crtc->hwmode = saved_hwmode;
+		crtc->mode = saved_mode;
+	} else {
+		intel_modeset_check_state(dev);
+	}
+
+	return ret;
 }
 
-static struct drm_crtc_helper_funcs intel_helper_funcs = {
-	.dpms = intel_crtc_dpms,
-	.mode_fixup = intel_crtc_mode_fixup,
-	.mode_set = intel_crtc_mode_set,
-	.mode_set_base = intel_pipe_set_base,
-	.mode_set_base_atomic = intel_pipe_set_base_atomic,
-	.load_lut = intel_crtc_load_lut,
-	.disable = intel_crtc_disable,
-};
+#undef for_each_intel_crtc_masked
+
+static void intel_set_config_free(struct intel_set_config *config)
+{
+	if (!config)
+		return;
+
+	kfree(config->save_connector_encoders);
+	kfree(config->save_encoder_crtcs);
+	kfree(config);
+}
+
+static int intel_set_config_save_state(struct drm_device *dev,
+				       struct intel_set_config *config)
+{
+	struct drm_encoder *encoder;
+	struct drm_connector *connector;
+	int count;
+
+	config->save_encoder_crtcs =
+		kcalloc(dev->mode_config.num_encoder,
+			sizeof(struct drm_crtc *), GFP_KERNEL);
+	if (!config->save_encoder_crtcs)
+		return -ENOMEM;
+
+	config->save_connector_encoders =
+		kcalloc(dev->mode_config.num_connector,
+			sizeof(struct drm_encoder *), GFP_KERNEL);
+	if (!config->save_connector_encoders)
+		return -ENOMEM;
+
+	/* Copy data. Note that driver private data is not affected.
+	 * Should anything bad happen only the expected state is
+	 * restored, not the drivers personal bookkeeping.
+	 */
+	count = 0;
+	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+		config->save_encoder_crtcs[count++] = encoder->crtc;
+	}
+
+	count = 0;
+	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+		config->save_connector_encoders[count++] = connector->encoder;
+	}
+
+	return 0;
+}
+
+static void intel_set_config_restore_state(struct drm_device *dev,
+					   struct intel_set_config *config)
+{
+	struct intel_encoder *encoder;
+	struct intel_connector *connector;
+	int count;
+
+	count = 0;
+	list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) {
+		encoder->new_crtc =
+			to_intel_crtc(config->save_encoder_crtcs[count++]);
+	}
+
+	count = 0;
+	list_for_each_entry(connector, &dev->mode_config.connector_list, base.head) {
+		connector->new_encoder =
+			to_intel_encoder(config->save_connector_encoders[count++]);
+	}
+}
+
+static void
+intel_set_config_compute_mode_changes(struct drm_mode_set *set,
+				      struct intel_set_config *config)
+{
+
+	/* We should be able to check here if the fb has the same properties
+	 * and then just flip_or_move it */
+	if (set->crtc->fb != set->fb) {
+		/* If we have no fb then treat it as a full mode set */
+		if (set->crtc->fb == NULL) {
+			DRM_DEBUG_KMS("crtc has no fb, full mode set\n");
+			config->mode_changed = true;
+		} else if (set->fb == NULL) {
+			config->mode_changed = true;
+		} else if (set->fb->depth != set->crtc->fb->depth) {
+			config->mode_changed = true;
+		} else if (set->fb->bits_per_pixel !=
+			   set->crtc->fb->bits_per_pixel) {
+			config->mode_changed = true;
+		} else
+			config->fb_changed = true;
+	}
+
+	if (set->fb && (set->x != set->crtc->x || set->y != set->crtc->y))
+		config->fb_changed = true;
+
+	if (set->mode && !drm_mode_equal(set->mode, &set->crtc->mode)) {
+		DRM_DEBUG_KMS("modes are different, full mode set\n");
+		drm_mode_debug_printmodeline(&set->crtc->mode);
+		drm_mode_debug_printmodeline(set->mode);
+		config->mode_changed = true;
+	}
+}
+
+static int
+intel_modeset_stage_output_state(struct drm_device *dev,
+				 struct drm_mode_set *set,
+				 struct intel_set_config *config)
+{
+	struct drm_crtc *new_crtc;
+	struct intel_connector *connector;
+	struct intel_encoder *encoder;
+	int count, ro;
+
+	/* The upper layers ensure that we either disabl a crtc or have a list
+	 * of connectors. For paranoia, double-check this. */
+	WARN_ON(!set->fb && (set->num_connectors != 0));
+	WARN_ON(set->fb && (set->num_connectors == 0));
+
+	count = 0;
+	list_for_each_entry(connector, &dev->mode_config.connector_list,
+			    base.head) {
+		/* Otherwise traverse passed in connector list and get encoders
+		 * for them. */
+		for (ro = 0; ro < set->num_connectors; ro++) {
+			if (set->connectors[ro] == &connector->base) {
+				connector->new_encoder = connector->encoder;
+				break;
+			}
+		}
+
+		/* If we disable the crtc, disable all its connectors. Also, if
+		 * the connector is on the changing crtc but not on the new
+		 * connector list, disable it. */
+		if ((!set->fb || ro == set->num_connectors) &&
+		    connector->base.encoder &&
+		    connector->base.encoder->crtc == set->crtc) {
+			connector->new_encoder = NULL;
+
+			DRM_DEBUG_KMS("[CONNECTOR:%d:%s] to [NOCRTC]\n",
+				connector->base.base.id,
+				drm_get_connector_name(&connector->base));
+		}
+
+
+		if (&connector->new_encoder->base != connector->base.encoder) {
+			DRM_DEBUG_KMS("encoder changed, full mode switch\n");
+			config->mode_changed = true;
+		}
+
+		/* Disable all disconnected encoders. */
+		if (connector->base.status == connector_status_disconnected)
+			connector->new_encoder = NULL;
+	}
+	/* connector->new_encoder is now updated for all connectors. */
+
+	/* Update crtc of enabled connectors. */
+	count = 0;
+	list_for_each_entry(connector, &dev->mode_config.connector_list,
+			    base.head) {
+		if (!connector->new_encoder)
+			continue;
+
+		new_crtc = connector->new_encoder->base.crtc;
+
+		for (ro = 0; ro < set->num_connectors; ro++) {
+			if (set->connectors[ro] == &connector->base)
+				new_crtc = set->crtc;
+		}
+
+		/* Make sure the new CRTC will work with the encoder */
+		if (!intel_encoder_crtc_ok(&connector->new_encoder->base,
+					   new_crtc)) {
+			return -EINVAL;
+		}
+		connector->encoder->new_crtc = to_intel_crtc(new_crtc);
+
+		DRM_DEBUG_KMS("[CONNECTOR:%d:%s] to [CRTC:%d]\n",
+			connector->base.base.id,
+			drm_get_connector_name(&connector->base),
+			new_crtc->base.id);
+	}
+
+	/* Check for any encoders that needs to be disabled. */
+	list_for_each_entry(encoder, &dev->mode_config.encoder_list,
+			    base.head) {
+		list_for_each_entry(connector,
+				    &dev->mode_config.connector_list,
+				    base.head) {
+			if (connector->new_encoder == encoder) {
+				WARN_ON(!connector->new_encoder->new_crtc);
+
+				goto next_encoder;
+			}
+		}
+		encoder->new_crtc = NULL;
+next_encoder:
+		/* Only now check for crtc changes so we don't miss encoders
+		 * that will be disabled. */
+		if (&encoder->new_crtc->base != encoder->base.crtc) {
+			DRM_DEBUG_KMS("crtc changed, full mode switch\n");
+			config->mode_changed = true;
+		}
+	}
+	/* Now we've also updated encoder->new_crtc for all encoders. */
+
+	return 0;
+}
+
+static int intel_crtc_set_config(struct drm_mode_set *set)
+{
+	struct drm_device *dev;
+	struct drm_mode_set save_set;
+	struct intel_set_config *config;
+	int ret;
+
+	BUG_ON(!set);
+	BUG_ON(!set->crtc);
+	BUG_ON(!set->crtc->helper_private);
+
+	if (!set->mode)
+		set->fb = NULL;
+
+	/* The fb helper likes to play gross jokes with ->mode_set_config.
+	 * Unfortunately the crtc helper doesn't do much at all for this case,
+	 * so we have to cope with this madness until the fb helper is fixed up. */
+	if (set->fb && set->num_connectors == 0)
+		return 0;
+
+	if (set->fb) {
+		DRM_DEBUG_KMS("[CRTC:%d] [FB:%d] #connectors=%d (x y) (%i %i)\n",
+				set->crtc->base.id, set->fb->base.id,
+				(int)set->num_connectors, set->x, set->y);
+	} else {
+		DRM_DEBUG_KMS("[CRTC:%d] [NOFB]\n", set->crtc->base.id);
+	}
+
+	dev = set->crtc->dev;
+
+	ret = -ENOMEM;
+	config = kzalloc(sizeof(*config), GFP_KERNEL);
+	if (!config)
+		goto out_config;
+
+	ret = intel_set_config_save_state(dev, config);
+	if (ret)
+		goto out_config;
+
+	save_set.crtc = set->crtc;
+	save_set.mode = &set->crtc->mode;
+	save_set.x = set->crtc->x;
+	save_set.y = set->crtc->y;
+	save_set.fb = set->crtc->fb;
+
+	/* Compute whether we need a full modeset, only an fb base update or no
+	 * change at all. In the future we might also check whether only the
+	 * mode changed, e.g. for LVDS where we only change the panel fitter in
+	 * such cases. */
+	intel_set_config_compute_mode_changes(set, config);
+
+	ret = intel_modeset_stage_output_state(dev, set, config);
+	if (ret)
+		goto fail;
+
+	if (config->mode_changed) {
+		if (set->mode) {
+			DRM_DEBUG_KMS("attempting to set mode from"
+					" userspace\n");
+			drm_mode_debug_printmodeline(set->mode);
+		}
+
+		if (!intel_set_mode(set->crtc, set->mode,
+				    set->x, set->y, set->fb)) {
+			DRM_ERROR("failed to set mode on [CRTC:%d]\n",
+				  set->crtc->base.id);
+			ret = -EINVAL;
+			goto fail;
+		}
+	} else if (config->fb_changed) {
+		ret = intel_pipe_set_base(set->crtc,
+					  set->x, set->y, set->fb);
+	}
+
+	intel_set_config_free(config);
+
+	return 0;
+
+fail:
+	intel_set_config_restore_state(dev, config);
+
+	/* Try to restore the config */
+	if (config->mode_changed &&
+	    !intel_set_mode(save_set.crtc, save_set.mode,
+			    save_set.x, save_set.y, save_set.fb))
+		DRM_ERROR("failed to restore config after modeset failure\n");
+
+out_config:
+	intel_set_config_free(config);
+	return ret;
+}
 
 static const struct drm_crtc_funcs intel_crtc_funcs = {
-	.reset = intel_crtc_reset,
 	.cursor_set = intel_crtc_cursor_set,
 	.cursor_move = intel_crtc_cursor_move,
 	.gamma_set = intel_crtc_gamma_set,
-	.set_config = drm_crtc_helper_set_config,
+	.set_config = intel_crtc_set_config,
 	.destroy = intel_crtc_destroy,
 	.page_flip = intel_crtc_page_flip,
 };
@@ -6655,24 +7448,9 @@ static void intel_crtc_init(struct drm_device *dev, int pipe)
 	dev_priv->plane_to_crtc_mapping[intel_crtc->plane] = &intel_crtc->base;
 	dev_priv->pipe_to_crtc_mapping[intel_crtc->pipe] = &intel_crtc->base;
 
-	intel_crtc_reset(&intel_crtc->base);
-	intel_crtc->active = true; /* force the pipe off on setup_init_config */
 	intel_crtc->bpp = 24; /* default for pre-Ironlake */
 
-	if (HAS_PCH_SPLIT(dev)) {
-		intel_helper_funcs.prepare = ironlake_crtc_prepare;
-		intel_helper_funcs.commit = ironlake_crtc_commit;
-	} else {
-		intel_helper_funcs.prepare = i9xx_crtc_prepare;
-		intel_helper_funcs.commit = i9xx_crtc_commit;
-	}
-
 	drm_crtc_helper_add(&intel_crtc->base, &intel_helper_funcs);
-
-	intel_crtc->busy = false;
-
-	setup_timer(&intel_crtc->idle_timer, intel_crtc_idle_timer,
-		    (unsigned long)intel_crtc);
 }
 
 int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
@@ -6699,15 +7477,23 @@ int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
 	return 0;
 }
 
-static int intel_encoder_clones(struct drm_device *dev, int type_mask)
+static int intel_encoder_clones(struct intel_encoder *encoder)
 {
-	struct intel_encoder *encoder;
+	struct drm_device *dev = encoder->base.dev;
+	struct intel_encoder *source_encoder;
 	int index_mask = 0;
 	int entry = 0;
 
-	list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) {
-		if (type_mask & encoder->clone_mask)
+	list_for_each_entry(source_encoder,
+			    &dev->mode_config.encoder_list, base.head) {
+
+		if (encoder == source_encoder)
 			index_mask |= (1 << entry);
+
+		/* Intel hw has only one MUX where enocoders could be cloned. */
+		if (encoder->cloneable && source_encoder->cloneable)
+			index_mask |= (1 << entry);
+
 		entry++;
 	}
 
@@ -6748,10 +7534,10 @@ static void intel_setup_outputs(struct drm_device *dev)
 		dpd_is_edp = intel_dpd_is_edp(dev);
 
 		if (has_edp_a(dev))
-			intel_dp_init(dev, DP_A);
+			intel_dp_init(dev, DP_A, PORT_A);
 
 		if (dpd_is_edp && (I915_READ(PCH_DP_D) & DP_DETECTED))
-			intel_dp_init(dev, PCH_DP_D);
+			intel_dp_init(dev, PCH_DP_D, PORT_D);
 	}
 
 	intel_crt_init(dev);
@@ -6782,22 +7568,22 @@ static void intel_setup_outputs(struct drm_device *dev)
 			/* PCH SDVOB multiplex with HDMIB */
 			found = intel_sdvo_init(dev, PCH_SDVOB, true);
 			if (!found)
-				intel_hdmi_init(dev, HDMIB);
+				intel_hdmi_init(dev, HDMIB, PORT_B);
 			if (!found && (I915_READ(PCH_DP_B) & DP_DETECTED))
-				intel_dp_init(dev, PCH_DP_B);
+				intel_dp_init(dev, PCH_DP_B, PORT_B);
 		}
 
 		if (I915_READ(HDMIC) & PORT_DETECTED)
-			intel_hdmi_init(dev, HDMIC);
+			intel_hdmi_init(dev, HDMIC, PORT_C);
 
 		if (!dpd_is_edp && I915_READ(HDMID) & PORT_DETECTED)
-			intel_hdmi_init(dev, HDMID);
+			intel_hdmi_init(dev, HDMID, PORT_D);
 
 		if (I915_READ(PCH_DP_C) & DP_DETECTED)
-			intel_dp_init(dev, PCH_DP_C);
+			intel_dp_init(dev, PCH_DP_C, PORT_C);
 
 		if (!dpd_is_edp && (I915_READ(PCH_DP_D) & DP_DETECTED))
-			intel_dp_init(dev, PCH_DP_D);
+			intel_dp_init(dev, PCH_DP_D, PORT_D);
 	} else if (IS_VALLEYVIEW(dev)) {
 		int found;
 
@@ -6805,17 +7591,17 @@ static void intel_setup_outputs(struct drm_device *dev)
 			/* SDVOB multiplex with HDMIB */
 			found = intel_sdvo_init(dev, SDVOB, true);
 			if (!found)
-				intel_hdmi_init(dev, SDVOB);
+				intel_hdmi_init(dev, SDVOB, PORT_B);
 			if (!found && (I915_READ(DP_B) & DP_DETECTED))
-				intel_dp_init(dev, DP_B);
+				intel_dp_init(dev, DP_B, PORT_B);
 		}
 
 		if (I915_READ(SDVOC) & PORT_DETECTED)
-			intel_hdmi_init(dev, SDVOC);
+			intel_hdmi_init(dev, SDVOC, PORT_C);
 
 		/* Shares lanes with HDMI on SDVOC */
 		if (I915_READ(DP_C) & DP_DETECTED)
-			intel_dp_init(dev, DP_C);
+			intel_dp_init(dev, DP_C, PORT_C);
 	} else if (SUPPORTS_DIGITAL_OUTPUTS(dev)) {
 		bool found = false;
 
@@ -6824,12 +7610,12 @@ static void intel_setup_outputs(struct drm_device *dev)
 			found = intel_sdvo_init(dev, SDVOB, true);
 			if (!found && SUPPORTS_INTEGRATED_HDMI(dev)) {
 				DRM_DEBUG_KMS("probing HDMI on SDVOB\n");
-				intel_hdmi_init(dev, SDVOB);
+				intel_hdmi_init(dev, SDVOB, PORT_B);
 			}
 
 			if (!found && SUPPORTS_INTEGRATED_DP(dev)) {
 				DRM_DEBUG_KMS("probing DP_B\n");
-				intel_dp_init(dev, DP_B);
+				intel_dp_init(dev, DP_B, PORT_B);
 			}
 		}
 
@@ -6844,18 +7630,18 @@ static void intel_setup_outputs(struct drm_device *dev)
 
 			if (SUPPORTS_INTEGRATED_HDMI(dev)) {
 				DRM_DEBUG_KMS("probing HDMI on SDVOC\n");
-				intel_hdmi_init(dev, SDVOC);
+				intel_hdmi_init(dev, SDVOC, PORT_C);
 			}
 			if (SUPPORTS_INTEGRATED_DP(dev)) {
 				DRM_DEBUG_KMS("probing DP_C\n");
-				intel_dp_init(dev, DP_C);
+				intel_dp_init(dev, DP_C, PORT_C);
 			}
 		}
 
 		if (SUPPORTS_INTEGRATED_DP(dev) &&
 		    (I915_READ(DP_D) & DP_DETECTED)) {
 			DRM_DEBUG_KMS("probing DP_D\n");
-			intel_dp_init(dev, DP_D);
+			intel_dp_init(dev, DP_D, PORT_D);
 		}
 	} else if (IS_GEN2(dev))
 		intel_dvo_init(dev);
@@ -6866,12 +7652,9 @@ static void intel_setup_outputs(struct drm_device *dev)
 	list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) {
 		encoder->base.possible_crtcs = encoder->crtc_mask;
 		encoder->base.possible_clones =
-			intel_encoder_clones(dev, encoder->clone_mask);
+			intel_encoder_clones(encoder);
 	}
 
-	/* disable all the possible outputs/crtcs before entering KMS mode */
-	drm_helper_disable_unused_functions(dev);
-
 	if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev))
 		ironlake_init_pch_refclk(dev);
 }
@@ -6973,13 +7756,15 @@ static void intel_init_display(struct drm_device *dev)
 
 	/* We always want a DPMS function */
 	if (HAS_PCH_SPLIT(dev)) {
-		dev_priv->display.dpms = ironlake_crtc_dpms;
 		dev_priv->display.crtc_mode_set = ironlake_crtc_mode_set;
+		dev_priv->display.crtc_enable = ironlake_crtc_enable;
+		dev_priv->display.crtc_disable = ironlake_crtc_disable;
 		dev_priv->display.off = ironlake_crtc_off;
 		dev_priv->display.update_plane = ironlake_update_plane;
 	} else {
-		dev_priv->display.dpms = i9xx_crtc_dpms;
 		dev_priv->display.crtc_mode_set = i9xx_crtc_mode_set;
+		dev_priv->display.crtc_enable = i9xx_crtc_enable;
+		dev_priv->display.crtc_disable = i9xx_crtc_disable;
 		dev_priv->display.off = i9xx_crtc_off;
 		dev_priv->display.update_plane = i9xx_update_plane;
 	}
@@ -7023,7 +7808,7 @@ static void intel_init_display(struct drm_device *dev)
 			dev_priv->display.write_eld = ironlake_write_eld;
 		} else if (IS_HASWELL(dev)) {
 			dev_priv->display.fdi_link_train = hsw_fdi_link_train;
-			dev_priv->display.write_eld = ironlake_write_eld;
+			dev_priv->display.write_eld = haswell_write_eld;
 		} else
 			dev_priv->display.update_wm = NULL;
 	} else if (IS_G4X(dev)) {
@@ -7101,21 +7886,16 @@ static struct intel_quirk intel_quirks[] = {
 	/* HP Mini needs pipe A force quirk (LP: #322104) */
 	{ 0x27ae, 0x103c, 0x361a, quirk_pipea_force },
 
-	/* Thinkpad R31 needs pipe A force quirk */
-	{ 0x3577, 0x1014, 0x0505, quirk_pipea_force },
 	/* Toshiba Protege R-205, S-209 needs pipe A force quirk */
 	{ 0x2592, 0x1179, 0x0001, quirk_pipea_force },
 
-	/* ThinkPad X30 needs pipe A force quirk (LP: #304614) */
-	{ 0x3577,  0x1014, 0x0513, quirk_pipea_force },
-	/* ThinkPad X40 needs pipe A force quirk */
-
 	/* ThinkPad T60 needs pipe A force quirk (bug #16494) */
 	{ 0x2782, 0x17aa, 0x201a, quirk_pipea_force },
 
 	/* 855 & before need to leave pipe A & dpll A up */
 	{ 0x3582, PCI_ANY_ID, PCI_ANY_ID, quirk_pipea_force },
 	{ 0x2562, PCI_ANY_ID, PCI_ANY_ID, quirk_pipea_force },
+	{ 0x3577, PCI_ANY_ID, PCI_ANY_ID, quirk_pipea_force },
 
 	/* Lenovo U160 cannot use SSC on LVDS */
 	{ 0x0046, 0x17aa, 0x3920, quirk_ssc_force_disable },
@@ -7231,10 +8011,251 @@ void intel_modeset_init(struct drm_device *dev)
 	/* Just disable it once at startup */
 	i915_disable_vga(dev);
 	intel_setup_outputs(dev);
+}
 
-	INIT_WORK(&dev_priv->idle_work, intel_idle_update);
-	setup_timer(&dev_priv->idle_timer, intel_gpu_idle_timer,
-		    (unsigned long)dev);
+static void
+intel_connector_break_all_links(struct intel_connector *connector)
+{
+	connector->base.dpms = DRM_MODE_DPMS_OFF;
+	connector->base.encoder = NULL;
+	connector->encoder->connectors_active = false;
+	connector->encoder->base.crtc = NULL;
+}
+
+static void intel_enable_pipe_a(struct drm_device *dev)
+{
+	struct intel_connector *connector;
+	struct drm_connector *crt = NULL;
+	struct intel_load_detect_pipe load_detect_temp;
+
+	/* We can't just switch on the pipe A, we need to set things up with a
+	 * proper mode and output configuration. As a gross hack, enable pipe A
+	 * by enabling the load detect pipe once. */
+	list_for_each_entry(connector,
+			    &dev->mode_config.connector_list,
+			    base.head) {
+		if (connector->encoder->type == INTEL_OUTPUT_ANALOG) {
+			crt = &connector->base;
+			break;
+		}
+	}
+
+	if (!crt)
+		return;
+
+	if (intel_get_load_detect_pipe(crt, NULL, &load_detect_temp))
+		intel_release_load_detect_pipe(crt, &load_detect_temp);
+
+
+}
+
+static void intel_sanitize_crtc(struct intel_crtc *crtc)
+{
+	struct drm_device *dev = crtc->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	u32 reg, val;
+
+	/* Clear any frame start delays used for debugging left by the BIOS */
+	reg = PIPECONF(crtc->pipe);
+	I915_WRITE(reg, I915_READ(reg) & ~PIPECONF_FRAME_START_DELAY_MASK);
+
+	/* We need to sanitize the plane -> pipe mapping first because this will
+	 * disable the crtc (and hence change the state) if it is wrong. */
+	if (!HAS_PCH_SPLIT(dev)) {
+		struct intel_connector *connector;
+		bool plane;
+
+		reg = DSPCNTR(crtc->plane);
+		val = I915_READ(reg);
+
+		if ((val & DISPLAY_PLANE_ENABLE) == 0 &&
+		    (!!(val & DISPPLANE_SEL_PIPE_MASK) == crtc->pipe))
+			goto ok;
+
+		DRM_DEBUG_KMS("[CRTC:%d] wrong plane connection detected!\n",
+			      crtc->base.base.id);
+
+		/* Pipe has the wrong plane attached and the plane is active.
+		 * Temporarily change the plane mapping and disable everything
+		 * ...  */
+		plane = crtc->plane;
+		crtc->plane = !plane;
+		dev_priv->display.crtc_disable(&crtc->base);
+		crtc->plane = plane;
+
+		/* ... and break all links. */
+		list_for_each_entry(connector, &dev->mode_config.connector_list,
+				    base.head) {
+			if (connector->encoder->base.crtc != &crtc->base)
+				continue;
+
+			intel_connector_break_all_links(connector);
+		}
+
+		WARN_ON(crtc->active);
+		crtc->base.enabled = false;
+	}
+ok:
+
+	if (dev_priv->quirks & QUIRK_PIPEA_FORCE &&
+	    crtc->pipe == PIPE_A && !crtc->active) {
+		/* BIOS forgot to enable pipe A, this mostly happens after
+		 * resume. Force-enable the pipe to fix this, the update_dpms
+		 * call below we restore the pipe to the right state, but leave
+		 * the required bits on. */
+		intel_enable_pipe_a(dev);
+	}
+
+	/* Adjust the state of the output pipe according to whether we
+	 * have active connectors/encoders. */
+	intel_crtc_update_dpms(&crtc->base);
+
+	if (crtc->active != crtc->base.enabled) {
+		struct intel_encoder *encoder;
+
+		/* This can happen either due to bugs in the get_hw_state
+		 * functions or because the pipe is force-enabled due to the
+		 * pipe A quirk. */
+		DRM_DEBUG_KMS("[CRTC:%d] hw state adjusted, was %s, now %s\n",
+			      crtc->base.base.id,
+			      crtc->base.enabled ? "enabled" : "disabled",
+			      crtc->active ? "enabled" : "disabled");
+
+		crtc->base.enabled = crtc->active;
+
+		/* Because we only establish the connector -> encoder ->
+		 * crtc links if something is active, this means the
+		 * crtc is now deactivated. Break the links. connector
+		 * -> encoder links are only establish when things are
+		 *  actually up, hence no need to break them. */
+		WARN_ON(crtc->active);
+
+		for_each_encoder_on_crtc(dev, &crtc->base, encoder) {
+			WARN_ON(encoder->connectors_active);
+			encoder->base.crtc = NULL;
+		}
+	}
+}
+
+static void intel_sanitize_encoder(struct intel_encoder *encoder)
+{
+	struct intel_connector *connector;
+	struct drm_device *dev = encoder->base.dev;
+
+	/* We need to check both for a crtc link (meaning that the
+	 * encoder is active and trying to read from a pipe) and the
+	 * pipe itself being active. */
+	bool has_active_crtc = encoder->base.crtc &&
+		to_intel_crtc(encoder->base.crtc)->active;
+
+	if (encoder->connectors_active && !has_active_crtc) {
+		DRM_DEBUG_KMS("[ENCODER:%d:%s] has active connectors but no active pipe!\n",
+			      encoder->base.base.id,
+			      drm_get_encoder_name(&encoder->base));
+
+		/* Connector is active, but has no active pipe. This is
+		 * fallout from our resume register restoring. Disable
+		 * the encoder manually again. */
+		if (encoder->base.crtc) {
+			DRM_DEBUG_KMS("[ENCODER:%d:%s] manually disabled\n",
+				      encoder->base.base.id,
+				      drm_get_encoder_name(&encoder->base));
+			encoder->disable(encoder);
+		}
+
+		/* Inconsistent output/port/pipe state happens presumably due to
+		 * a bug in one of the get_hw_state functions. Or someplace else
+		 * in our code, like the register restore mess on resume. Clamp
+		 * things to off as a safer default. */
+		list_for_each_entry(connector,
+				    &dev->mode_config.connector_list,
+				    base.head) {
+			if (connector->encoder != encoder)
+				continue;
+
+			intel_connector_break_all_links(connector);
+		}
+	}
+	/* Enabled encoders without active connectors will be fixed in
+	 * the crtc fixup. */
+}
+
+/* Scan out the current hw modeset state, sanitizes it and maps it into the drm
+ * and i915 state tracking structures. */
+void intel_modeset_setup_hw_state(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	enum pipe pipe;
+	u32 tmp;
+	struct intel_crtc *crtc;
+	struct intel_encoder *encoder;
+	struct intel_connector *connector;
+
+	for_each_pipe(pipe) {
+		crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
+
+		tmp = I915_READ(PIPECONF(pipe));
+		if (tmp & PIPECONF_ENABLE)
+			crtc->active = true;
+		else
+			crtc->active = false;
+
+		crtc->base.enabled = crtc->active;
+
+		DRM_DEBUG_KMS("[CRTC:%d] hw state readout: %s\n",
+			      crtc->base.base.id,
+			      crtc->active ? "enabled" : "disabled");
+	}
+
+	list_for_each_entry(encoder, &dev->mode_config.encoder_list,
+			    base.head) {
+		pipe = 0;
+
+		if (encoder->get_hw_state(encoder, &pipe)) {
+			encoder->base.crtc =
+				dev_priv->pipe_to_crtc_mapping[pipe];
+		} else {
+			encoder->base.crtc = NULL;
+		}
+
+		encoder->connectors_active = false;
+		DRM_DEBUG_KMS("[ENCODER:%d:%s] hw state readout: %s, pipe=%i\n",
+			      encoder->base.base.id,
+			      drm_get_encoder_name(&encoder->base),
+			      encoder->base.crtc ? "enabled" : "disabled",
+			      pipe);
+	}
+
+	list_for_each_entry(connector, &dev->mode_config.connector_list,
+			    base.head) {
+		if (connector->get_hw_state(connector)) {
+			connector->base.dpms = DRM_MODE_DPMS_ON;
+			connector->encoder->connectors_active = true;
+			connector->base.encoder = &connector->encoder->base;
+		} else {
+			connector->base.dpms = DRM_MODE_DPMS_OFF;
+			connector->base.encoder = NULL;
+		}
+		DRM_DEBUG_KMS("[CONNECTOR:%d:%s] hw state readout: %s\n",
+			      connector->base.base.id,
+			      drm_get_connector_name(&connector->base),
+			      connector->base.encoder ? "enabled" : "disabled");
+	}
+
+	/* HW state is read out, now we need to sanitize this mess. */
+	list_for_each_entry(encoder, &dev->mode_config.encoder_list,
+			    base.head) {
+		intel_sanitize_encoder(encoder);
+	}
+
+	for_each_pipe(pipe) {
+		crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
+		intel_sanitize_crtc(crtc);
+	}
+
+	intel_modeset_update_staged_output_state(dev);
+
+	intel_modeset_check_state(dev);
 }
 
 void intel_modeset_gem_init(struct drm_device *dev)
@@ -7242,6 +8263,8 @@ void intel_modeset_gem_init(struct drm_device *dev)
 	intel_modeset_init_hw(dev);
 
 	intel_setup_overlay(dev);
+
+	intel_modeset_setup_hw_state(dev);
 }
 
 void intel_modeset_cleanup(struct drm_device *dev)
@@ -7280,19 +8303,11 @@ void intel_modeset_cleanup(struct drm_device *dev)
 	 * enqueue unpin/hotplug work. */
 	drm_irq_uninstall(dev);
 	cancel_work_sync(&dev_priv->hotplug_work);
-	cancel_work_sync(&dev_priv->rps_work);
+	cancel_work_sync(&dev_priv->rps.work);
 
 	/* flush any delayed tasks or pending work */
 	flush_scheduled_work();
 
-	/* Shut off idle work before the crtcs get freed. */
-	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
-		intel_crtc = to_intel_crtc(crtc);
-		del_timer_sync(&intel_crtc->idle_timer);
-	}
-	del_timer_sync(&dev_priv->idle_timer);
-	cancel_work_sync(&dev_priv->idle_work);
-
 	drm_mode_config_cleanup(dev);
 }
 
@@ -7338,7 +8353,7 @@ struct intel_display_error_state {
 		u32 position;
 		u32 base;
 		u32 size;
-	} cursor[2];
+	} cursor[I915_MAX_PIPES];
 
 	struct intel_pipe_error_state {
 		u32 conf;
@@ -7350,7 +8365,7 @@ struct intel_display_error_state {
 		u32 vtotal;
 		u32 vblank;
 		u32 vsync;
-	} pipe[2];
+	} pipe[I915_MAX_PIPES];
 
 	struct intel_plane_error_state {
 		u32 control;
@@ -7360,7 +8375,7 @@ struct intel_display_error_state {
 		u32 addr;
 		u32 surface;
 		u32 tile_offset;
-	} plane[2];
+	} plane[I915_MAX_PIPES];
 };
 
 struct intel_display_error_state *
@@ -7374,7 +8389,7 @@ intel_display_capture_error_state(struct drm_device *dev)
 	if (error == NULL)
 		return NULL;
 
-	for (i = 0; i < 2; i++) {
+	for_each_pipe(i) {
 		error->cursor[i].control = I915_READ(CURCNTR(i));
 		error->cursor[i].position = I915_READ(CURPOS(i));
 		error->cursor[i].base = I915_READ(CURBASE(i));
@@ -7407,9 +8422,11 @@ intel_display_print_error_state(struct seq_file *m,
 				struct drm_device *dev,
 				struct intel_display_error_state *error)
 {
+	drm_i915_private_t *dev_priv = dev->dev_private;
 	int i;
 
-	for (i = 0; i < 2; i++) {
+	seq_printf(m, "Num Pipes: %d\n", dev_priv->num_pipe);
+	for_each_pipe(i) {
 		seq_printf(m, "Pipe [%d]:\n", i);
 		seq_printf(m, "  CONF: %08x\n", error->pipe[i].conf);
 		seq_printf(m, "  SRC: %08x\n", error->pipe[i].source);
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index f1bd4f4cd667..6c8746c030c7 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -35,42 +35,10 @@
 #include "intel_drv.h"
 #include <drm/i915_drm.h>
 #include "i915_drv.h"
-#include <drm/drm_dp_helper.h>
 
-#define DP_RECEIVER_CAP_SIZE	0xf
 #define DP_LINK_STATUS_SIZE	6
 #define DP_LINK_CHECK_TIMEOUT	(10 * 1000)
 
-#define DP_LINK_CONFIGURATION_SIZE	9
-
-struct intel_dp {
-	struct intel_encoder base;
-	uint32_t output_reg;
-	uint32_t DP;
-	uint8_t  link_configuration[DP_LINK_CONFIGURATION_SIZE];
-	bool has_audio;
-	enum hdmi_force_audio force_audio;
-	uint32_t color_range;
-	int dpms_mode;
-	uint8_t link_bw;
-	uint8_t lane_count;
-	uint8_t dpcd[DP_RECEIVER_CAP_SIZE];
-	struct i2c_adapter adapter;
-	struct i2c_algo_dp_aux_data algo;
-	bool is_pch_edp;
-	uint8_t	train_set[4];
-	int panel_power_up_delay;
-	int panel_power_down_delay;
-	int panel_power_cycle_delay;
-	int backlight_on_delay;
-	int backlight_off_delay;
-	struct drm_display_mode *panel_fixed_mode;  /* for eDP */
-	struct delayed_work panel_vdd_work;
-	bool want_panel_vdd;
-	struct edid *edid; /* cached EDID for eDP */
-	int edid_mode_count;
-};
-
 /**
  * is_edp - is the given port attached to an eDP panel (either CPU or PCH)
  * @intel_dp: DP struct
@@ -839,9 +807,6 @@ intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode,
 	}
 }
 
-static void ironlake_edp_pll_on(struct drm_encoder *encoder);
-static void ironlake_edp_pll_off(struct drm_encoder *encoder);
-
 static void
 intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
 		  struct drm_display_mode *adjusted_mode)
@@ -852,14 +817,6 @@ intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
 	struct drm_crtc *crtc = intel_dp->base.base.crtc;
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 
-	/* Turn on the eDP PLL if needed */
-	if (is_edp(intel_dp)) {
-		if (!is_pch_edp(intel_dp))
-			ironlake_edp_pll_on(encoder);
-		else
-			ironlake_edp_pll_off(encoder);
-	}
-
 	/*
 	 * There are four kinds of DP registers:
 	 *
@@ -881,10 +838,8 @@ intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
 	 * supposed to be read-only.
 	 */
 	intel_dp->DP = I915_READ(intel_dp->output_reg) & DP_DETECTED;
-	intel_dp->DP |=  DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
 
 	/* Handle DP bits in common between all three register formats */
-
 	intel_dp->DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
 
 	switch (intel_dp->lane_count) {
@@ -931,7 +886,6 @@ intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
 		intel_dp->DP |= intel_crtc->pipe << 29;
 
 		/* don't miss out required setting for eDP */
-		intel_dp->DP |= DP_PLL_ENABLE;
 		if (adjusted_mode->clock < 200000)
 			intel_dp->DP |= DP_PLL_FREQ_160MHZ;
 		else
@@ -953,7 +907,6 @@ intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
 
 		if (is_cpu_edp(intel_dp)) {
 			/* don't miss out required setting for eDP */
-			intel_dp->DP |= DP_PLL_ENABLE;
 			if (adjusted_mode->clock < 200000)
 				intel_dp->DP |= DP_PLL_FREQ_160MHZ;
 			else
@@ -1224,27 +1177,49 @@ static void ironlake_edp_backlight_off(struct intel_dp *intel_dp)
 	msleep(intel_dp->backlight_off_delay);
 }
 
-static void ironlake_edp_pll_on(struct drm_encoder *encoder)
+static void ironlake_edp_pll_on(struct intel_dp *intel_dp)
 {
-	struct drm_device *dev = encoder->dev;
+	struct drm_device *dev = intel_dp->base.base.dev;
+	struct drm_crtc *crtc = intel_dp->base.base.crtc;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	u32 dpa_ctl;
 
+	assert_pipe_disabled(dev_priv,
+			     to_intel_crtc(crtc)->pipe);
+
 	DRM_DEBUG_KMS("\n");
 	dpa_ctl = I915_READ(DP_A);
-	dpa_ctl |= DP_PLL_ENABLE;
-	I915_WRITE(DP_A, dpa_ctl);
+	WARN(dpa_ctl & DP_PLL_ENABLE, "dp pll on, should be off\n");
+	WARN(dpa_ctl & DP_PORT_EN, "dp port still on, should be off\n");
+
+	/* We don't adjust intel_dp->DP while tearing down the link, to
+	 * facilitate link retraining (e.g. after hotplug). Hence clear all
+	 * enable bits here to ensure that we don't enable too much. */
+	intel_dp->DP &= ~(DP_PORT_EN | DP_AUDIO_OUTPUT_ENABLE);
+	intel_dp->DP |= DP_PLL_ENABLE;
+	I915_WRITE(DP_A, intel_dp->DP);
 	POSTING_READ(DP_A);
 	udelay(200);
 }
 
-static void ironlake_edp_pll_off(struct drm_encoder *encoder)
+static void ironlake_edp_pll_off(struct intel_dp *intel_dp)
 {
-	struct drm_device *dev = encoder->dev;
+	struct drm_device *dev = intel_dp->base.base.dev;
+	struct drm_crtc *crtc = intel_dp->base.base.crtc;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	u32 dpa_ctl;
 
+	assert_pipe_disabled(dev_priv,
+			     to_intel_crtc(crtc)->pipe);
+
 	dpa_ctl = I915_READ(DP_A);
+	WARN((dpa_ctl & DP_PLL_ENABLE) == 0,
+	     "dp pll off, should be on\n");
+	WARN(dpa_ctl & DP_PORT_EN, "dp port still on, should be off\n");
+
+	/* We can't rely on the value tracked for the DP register in
+	 * intel_dp->DP because link_down must not change that (otherwise link
+	 * re-training will fail. */
 	dpa_ctl &= ~DP_PLL_ENABLE;
 	I915_WRITE(DP_A, dpa_ctl);
 	POSTING_READ(DP_A);
@@ -1281,10 +1256,57 @@ static void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode)
 	}
 }
 
-static void intel_dp_prepare(struct drm_encoder *encoder)
+static bool intel_dp_get_hw_state(struct intel_encoder *encoder,
+				  enum pipe *pipe)
 {
-	struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
+	struct drm_device *dev = encoder->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	u32 tmp = I915_READ(intel_dp->output_reg);
 
+	if (!(tmp & DP_PORT_EN))
+		return false;
+
+	if (is_cpu_edp(intel_dp) && IS_GEN7(dev)) {
+		*pipe = PORT_TO_PIPE_CPT(tmp);
+	} else if (!HAS_PCH_CPT(dev) || is_cpu_edp(intel_dp)) {
+		*pipe = PORT_TO_PIPE(tmp);
+	} else {
+		u32 trans_sel;
+		u32 trans_dp;
+		int i;
+
+		switch (intel_dp->output_reg) {
+		case PCH_DP_B:
+			trans_sel = TRANS_DP_PORT_SEL_B;
+			break;
+		case PCH_DP_C:
+			trans_sel = TRANS_DP_PORT_SEL_C;
+			break;
+		case PCH_DP_D:
+			trans_sel = TRANS_DP_PORT_SEL_D;
+			break;
+		default:
+			return true;
+		}
+
+		for_each_pipe(i) {
+			trans_dp = I915_READ(TRANS_DP_CTL(i));
+			if ((trans_dp & TRANS_DP_PORT_SEL_MASK) == trans_sel) {
+				*pipe = i;
+				return true;
+			}
+		}
+	}
+
+	DRM_DEBUG_KMS("No pipe for dp port 0x%x found\n", intel_dp->output_reg);
+
+	return true;
+}
+
+static void intel_disable_dp(struct intel_encoder *encoder)
+{
+	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
 
 	/* Make sure the panel is off before trying to change the mode. But also
 	 * ensure that we have vdd while we switch off the panel. */
@@ -1292,14 +1314,31 @@ static void intel_dp_prepare(struct drm_encoder *encoder)
 	ironlake_edp_backlight_off(intel_dp);
 	intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
 	ironlake_edp_panel_off(intel_dp);
-	intel_dp_link_down(intel_dp);
+
+	/* cpu edp my only be disable _after_ the cpu pipe/plane is disabled. */
+	if (!is_cpu_edp(intel_dp))
+		intel_dp_link_down(intel_dp);
 }
 
-static void intel_dp_commit(struct drm_encoder *encoder)
+static void intel_post_disable_dp(struct intel_encoder *encoder)
 {
-	struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
-	struct drm_device *dev = encoder->dev;
-	struct intel_crtc *intel_crtc = to_intel_crtc(intel_dp->base.base.crtc);
+	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
+
+	if (is_cpu_edp(intel_dp)) {
+		intel_dp_link_down(intel_dp);
+		ironlake_edp_pll_off(intel_dp);
+	}
+}
+
+static void intel_enable_dp(struct intel_encoder *encoder)
+{
+	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
+	struct drm_device *dev = encoder->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	uint32_t dp_reg = I915_READ(intel_dp->output_reg);
+
+	if (WARN_ON(dp_reg & DP_PORT_EN))
+		return;
 
 	ironlake_edp_panel_vdd_on(intel_dp);
 	intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
@@ -1308,47 +1347,14 @@ static void intel_dp_commit(struct drm_encoder *encoder)
 	ironlake_edp_panel_vdd_off(intel_dp, true);
 	intel_dp_complete_link_train(intel_dp);
 	ironlake_edp_backlight_on(intel_dp);
-
-	intel_dp->dpms_mode = DRM_MODE_DPMS_ON;
-
-	if (HAS_PCH_CPT(dev))
-		intel_cpt_verify_modeset(dev, intel_crtc->pipe);
 }
 
-static void
-intel_dp_dpms(struct drm_encoder *encoder, int mode)
+static void intel_pre_enable_dp(struct intel_encoder *encoder)
 {
-	struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
-	struct drm_device *dev = encoder->dev;
-	struct drm_i915_private *dev_priv = dev->dev_private;
-	uint32_t dp_reg = I915_READ(intel_dp->output_reg);
+	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
 
-	if (mode != DRM_MODE_DPMS_ON) {
-		/* Switching the panel off requires vdd. */
-		ironlake_edp_panel_vdd_on(intel_dp);
-		ironlake_edp_backlight_off(intel_dp);
-		intel_dp_sink_dpms(intel_dp, mode);
-		ironlake_edp_panel_off(intel_dp);
-		intel_dp_link_down(intel_dp);
-
-		if (is_cpu_edp(intel_dp))
-			ironlake_edp_pll_off(encoder);
-	} else {
-		if (is_cpu_edp(intel_dp))
-			ironlake_edp_pll_on(encoder);
-
-		ironlake_edp_panel_vdd_on(intel_dp);
-		intel_dp_sink_dpms(intel_dp, mode);
-		if (!(dp_reg & DP_PORT_EN)) {
-			intel_dp_start_link_train(intel_dp);
-			ironlake_edp_panel_on(intel_dp);
-			ironlake_edp_panel_vdd_off(intel_dp, true);
-			intel_dp_complete_link_train(intel_dp);
-		} else
-			ironlake_edp_panel_vdd_off(intel_dp, false);
-		ironlake_edp_backlight_on(intel_dp);
-	}
-	intel_dp->dpms_mode = mode;
+	if (is_cpu_edp(intel_dp))
+		ironlake_edp_pll_on(intel_dp);
 }
 
 /*
@@ -1667,6 +1673,45 @@ intel_dp_set_link_train(struct intel_dp *intel_dp,
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	int ret;
 
+	if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || !is_cpu_edp(intel_dp))) {
+		dp_reg_value &= ~DP_LINK_TRAIN_MASK_CPT;
+
+		switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
+		case DP_TRAINING_PATTERN_DISABLE:
+			dp_reg_value |= DP_LINK_TRAIN_OFF_CPT;
+			break;
+		case DP_TRAINING_PATTERN_1:
+			dp_reg_value |= DP_LINK_TRAIN_PAT_1_CPT;
+			break;
+		case DP_TRAINING_PATTERN_2:
+			dp_reg_value |= DP_LINK_TRAIN_PAT_2_CPT;
+			break;
+		case DP_TRAINING_PATTERN_3:
+			DRM_ERROR("DP training pattern 3 not supported\n");
+			dp_reg_value |= DP_LINK_TRAIN_PAT_2_CPT;
+			break;
+		}
+
+	} else {
+		dp_reg_value &= ~DP_LINK_TRAIN_MASK;
+
+		switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
+		case DP_TRAINING_PATTERN_DISABLE:
+			dp_reg_value |= DP_LINK_TRAIN_OFF;
+			break;
+		case DP_TRAINING_PATTERN_1:
+			dp_reg_value |= DP_LINK_TRAIN_PAT_1;
+			break;
+		case DP_TRAINING_PATTERN_2:
+			dp_reg_value |= DP_LINK_TRAIN_PAT_2;
+			break;
+		case DP_TRAINING_PATTERN_3:
+			DRM_ERROR("DP training pattern 3 not supported\n");
+			dp_reg_value |= DP_LINK_TRAIN_PAT_2;
+			break;
+		}
+	}
+
 	I915_WRITE(intel_dp->output_reg, dp_reg_value);
 	POSTING_READ(intel_dp->output_reg);
 
@@ -1674,12 +1719,15 @@ intel_dp_set_link_train(struct intel_dp *intel_dp,
 				    DP_TRAINING_PATTERN_SET,
 				    dp_train_pat);
 
-	ret = intel_dp_aux_native_write(intel_dp,
-					DP_TRAINING_LANE0_SET,
-					intel_dp->train_set,
-					intel_dp->lane_count);
-	if (ret != intel_dp->lane_count)
-		return false;
+	if ((dp_train_pat & DP_TRAINING_PATTERN_MASK) !=
+	    DP_TRAINING_PATTERN_DISABLE) {
+		ret = intel_dp_aux_native_write(intel_dp,
+						DP_TRAINING_LANE0_SET,
+						intel_dp->train_set,
+						intel_dp->lane_count);
+		if (ret != intel_dp->lane_count)
+			return false;
+	}
 
 	return true;
 }
@@ -1689,26 +1737,12 @@ static void
 intel_dp_start_link_train(struct intel_dp *intel_dp)
 {
 	struct drm_device *dev = intel_dp->base.base.dev;
-	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct intel_crtc *intel_crtc = to_intel_crtc(intel_dp->base.base.crtc);
 	int i;
 	uint8_t voltage;
 	bool clock_recovery = false;
 	int voltage_tries, loop_tries;
-	u32 reg;
 	uint32_t DP = intel_dp->DP;
 
-	/*
-	 * On CPT we have to enable the port in training pattern 1, which
-	 * will happen below in intel_dp_set_link_train.  Otherwise, enable
-	 * the port and wait for it to become active.
-	 */
-	if (!HAS_PCH_CPT(dev)) {
-		I915_WRITE(intel_dp->output_reg, intel_dp->DP);
-		POSTING_READ(intel_dp->output_reg);
-		intel_wait_for_vblank(dev, intel_crtc->pipe);
-	}
-
 	/* Write the link configuration data */
 	intel_dp_aux_native_write(intel_dp, DP_LINK_BW_SET,
 				  intel_dp->link_configuration,
@@ -1716,10 +1750,6 @@ intel_dp_start_link_train(struct intel_dp *intel_dp)
 
 	DP |= DP_PORT_EN;
 
-	if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || !is_cpu_edp(intel_dp)))
-		DP &= ~DP_LINK_TRAIN_MASK_CPT;
-	else
-		DP &= ~DP_LINK_TRAIN_MASK;
 	memset(intel_dp->train_set, 0, 4);
 	voltage = 0xff;
 	voltage_tries = 0;
@@ -1743,12 +1773,7 @@ intel_dp_start_link_train(struct intel_dp *intel_dp)
 			DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels;
 		}
 
-		if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || !is_cpu_edp(intel_dp)))
-			reg = DP | DP_LINK_TRAIN_PAT_1_CPT;
-		else
-			reg = DP | DP_LINK_TRAIN_PAT_1;
-
-		if (!intel_dp_set_link_train(intel_dp, reg,
+		if (!intel_dp_set_link_train(intel_dp, DP,
 					     DP_TRAINING_PATTERN_1 |
 					     DP_LINK_SCRAMBLING_DISABLE))
 			break;
@@ -1803,10 +1828,8 @@ static void
 intel_dp_complete_link_train(struct intel_dp *intel_dp)
 {
 	struct drm_device *dev = intel_dp->base.base.dev;
-	struct drm_i915_private *dev_priv = dev->dev_private;
 	bool channel_eq = false;
 	int tries, cr_tries;
-	u32 reg;
 	uint32_t DP = intel_dp->DP;
 
 	/* channel equalization */
@@ -1835,13 +1858,8 @@ intel_dp_complete_link_train(struct intel_dp *intel_dp)
 			DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels;
 		}
 
-		if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || !is_cpu_edp(intel_dp)))
-			reg = DP | DP_LINK_TRAIN_PAT_2_CPT;
-		else
-			reg = DP | DP_LINK_TRAIN_PAT_2;
-
 		/* channel eq pattern */
-		if (!intel_dp_set_link_train(intel_dp, reg,
+		if (!intel_dp_set_link_train(intel_dp, DP,
 					     DP_TRAINING_PATTERN_2 |
 					     DP_LINK_SCRAMBLING_DISABLE))
 			break;
@@ -1876,15 +1894,7 @@ intel_dp_complete_link_train(struct intel_dp *intel_dp)
 		++tries;
 	}
 
-	if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || !is_cpu_edp(intel_dp)))
-		reg = DP | DP_LINK_TRAIN_OFF_CPT;
-	else
-		reg = DP | DP_LINK_TRAIN_OFF;
-
-	I915_WRITE(intel_dp->output_reg, reg);
-	POSTING_READ(intel_dp->output_reg);
-	intel_dp_aux_native_write_1(intel_dp,
-				    DP_TRAINING_PATTERN_SET, DP_TRAINING_PATTERN_DISABLE);
+	intel_dp_set_link_train(intel_dp, DP, DP_TRAINING_PATTERN_DISABLE);
 }
 
 static void
@@ -1894,18 +1904,11 @@ intel_dp_link_down(struct intel_dp *intel_dp)
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	uint32_t DP = intel_dp->DP;
 
-	if ((I915_READ(intel_dp->output_reg) & DP_PORT_EN) == 0)
+	if (WARN_ON((I915_READ(intel_dp->output_reg) & DP_PORT_EN) == 0))
 		return;
 
 	DRM_DEBUG_KMS("\n");
 
-	if (is_edp(intel_dp)) {
-		DP &= ~DP_PLL_ENABLE;
-		I915_WRITE(intel_dp->output_reg, DP);
-		POSTING_READ(intel_dp->output_reg);
-		udelay(100);
-	}
-
 	if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || !is_cpu_edp(intel_dp))) {
 		DP &= ~DP_LINK_TRAIN_MASK_CPT;
 		I915_WRITE(intel_dp->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE_CPT);
@@ -1917,13 +1920,6 @@ intel_dp_link_down(struct intel_dp *intel_dp)
 
 	msleep(17);
 
-	if (is_edp(intel_dp)) {
-		if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || !is_cpu_edp(intel_dp)))
-			DP |= DP_LINK_TRAIN_OFF_CPT;
-		else
-			DP |= DP_LINK_TRAIN_OFF;
-	}
-
 	if (HAS_PCH_IBX(dev) &&
 	    I915_READ(intel_dp->output_reg) & DP_PIPEB_SELECT) {
 		struct drm_crtc *crtc = intel_dp->base.base.crtc;
@@ -2032,10 +2028,10 @@ intel_dp_check_link_status(struct intel_dp *intel_dp)
 	u8 sink_irq_vector;
 	u8 link_status[DP_LINK_STATUS_SIZE];
 
-	if (intel_dp->dpms_mode != DRM_MODE_DPMS_ON)
+	if (!intel_dp->base.connectors_active)
 		return;
 
-	if (!intel_dp->base.base.crtc)
+	if (WARN_ON(!intel_dp->base.base.crtc))
 		return;
 
 	/* Try to read receiver status if the link appears to be up */
@@ -2159,7 +2155,6 @@ intel_dp_get_edid_modes(struct drm_connector *connector, struct i2c_adapter *ada
 		ret = drm_add_edid_modes(connector, intel_dp->edid);
 		drm_edid_to_eld(connector,
 				intel_dp->edid);
-		connector->display_info.raw_edid = NULL;
 		return intel_dp->edid_mode_count;
 	}
 
@@ -2205,7 +2200,6 @@ intel_dp_detect(struct drm_connector *connector, bool force)
 		edid = intel_dp_get_edid(connector, &intel_dp->adapter);
 		if (edid) {
 			intel_dp->has_audio = drm_detect_monitor_audio(edid);
-			connector->display_info.raw_edid = NULL;
 			kfree(edid);
 		}
 	}
@@ -2270,8 +2264,6 @@ intel_dp_detect_audio(struct drm_connector *connector)
 	edid = intel_dp_get_edid(connector, &intel_dp->adapter);
 	if (edid) {
 		has_audio = drm_detect_monitor_audio(edid);
-
-		connector->display_info.raw_edid = NULL;
 		kfree(edid);
 	}
 
@@ -2325,9 +2317,8 @@ intel_dp_set_property(struct drm_connector *connector,
 done:
 	if (intel_dp->base.base.crtc) {
 		struct drm_crtc *crtc = intel_dp->base.base.crtc;
-		drm_crtc_helper_set_mode(crtc, &crtc->mode,
-					 crtc->x, crtc->y,
-					 crtc->fb);
+		intel_set_mode(crtc, &crtc->mode,
+			       crtc->x, crtc->y, crtc->fb);
 	}
 
 	return 0;
@@ -2361,15 +2352,13 @@ static void intel_dp_encoder_destroy(struct drm_encoder *encoder)
 }
 
 static const struct drm_encoder_helper_funcs intel_dp_helper_funcs = {
-	.dpms = intel_dp_dpms,
 	.mode_fixup = intel_dp_mode_fixup,
-	.prepare = intel_dp_prepare,
 	.mode_set = intel_dp_mode_set,
-	.commit = intel_dp_commit,
+	.disable = intel_encoder_noop,
 };
 
 static const struct drm_connector_funcs intel_dp_connector_funcs = {
-	.dpms = drm_helper_connector_dpms,
+	.dpms = intel_connector_dpms,
 	.detect = intel_dp_detect,
 	.fill_modes = drm_helper_probe_single_connector_modes,
 	.set_property = intel_dp_set_property,
@@ -2440,7 +2429,7 @@ intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connect
 }
 
 void
-intel_dp_init(struct drm_device *dev, int output_reg)
+intel_dp_init(struct drm_device *dev, int output_reg, enum port port)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct drm_connector *connector;
@@ -2455,7 +2444,9 @@ intel_dp_init(struct drm_device *dev, int output_reg)
 		return;
 
 	intel_dp->output_reg = output_reg;
-	intel_dp->dpms_mode = -1;
+	intel_dp->port = port;
+	/* Preserve the current hw state. */
+	intel_dp->DP = I915_READ(intel_dp->output_reg);
 
 	intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL);
 	if (!intel_connector) {
@@ -2482,18 +2473,10 @@ intel_dp_init(struct drm_device *dev, int output_reg)
 
 	connector->polled = DRM_CONNECTOR_POLL_HPD;
 
-	if (output_reg == DP_B || output_reg == PCH_DP_B)
-		intel_encoder->clone_mask = (1 << INTEL_DP_B_CLONE_BIT);
-	else if (output_reg == DP_C || output_reg == PCH_DP_C)
-		intel_encoder->clone_mask = (1 << INTEL_DP_C_CLONE_BIT);
-	else if (output_reg == DP_D || output_reg == PCH_DP_D)
-		intel_encoder->clone_mask = (1 << INTEL_DP_D_CLONE_BIT);
+	intel_encoder->cloneable = false;
 
-	if (is_edp(intel_dp)) {
-		intel_encoder->clone_mask = (1 << INTEL_EDP_CLONE_BIT);
-		INIT_DELAYED_WORK(&intel_dp->panel_vdd_work,
-				  ironlake_panel_vdd_work);
-	}
+	INIT_DELAYED_WORK(&intel_dp->panel_vdd_work,
+			  ironlake_panel_vdd_work);
 
 	intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
 
@@ -2507,29 +2490,33 @@ intel_dp_init(struct drm_device *dev, int output_reg)
 	intel_connector_attach_encoder(intel_connector, intel_encoder);
 	drm_sysfs_connector_add(connector);
 
+	intel_encoder->enable = intel_enable_dp;
+	intel_encoder->pre_enable = intel_pre_enable_dp;
+	intel_encoder->disable = intel_disable_dp;
+	intel_encoder->post_disable = intel_post_disable_dp;
+	intel_encoder->get_hw_state = intel_dp_get_hw_state;
+	intel_connector->get_hw_state = intel_connector_get_hw_state;
+
 	/* Set up the DDC bus. */
-	switch (output_reg) {
-		case DP_A:
-			name = "DPDDC-A";
-			break;
-		case DP_B:
-		case PCH_DP_B:
-			dev_priv->hotplug_supported_mask |=
-				DPB_HOTPLUG_INT_STATUS;
-			name = "DPDDC-B";
-			break;
-		case DP_C:
-		case PCH_DP_C:
-			dev_priv->hotplug_supported_mask |=
-				DPC_HOTPLUG_INT_STATUS;
-			name = "DPDDC-C";
-			break;
-		case DP_D:
-		case PCH_DP_D:
-			dev_priv->hotplug_supported_mask |=
-				DPD_HOTPLUG_INT_STATUS;
-			name = "DPDDC-D";
-			break;
+	switch (port) {
+	case PORT_A:
+		name = "DPDDC-A";
+		break;
+	case PORT_B:
+		dev_priv->hotplug_supported_mask |= DPB_HOTPLUG_INT_STATUS;
+		name = "DPDDC-B";
+		break;
+	case PORT_C:
+		dev_priv->hotplug_supported_mask |= DPC_HOTPLUG_INT_STATUS;
+		name = "DPDDC-C";
+		break;
+	case PORT_D:
+		dev_priv->hotplug_supported_mask |= DPD_HOTPLUG_INT_STATUS;
+		name = "DPDDC-D";
+		break;
+	default:
+		WARN(1, "Invalid port %c\n", port_name(port));
+		break;
 	}
 
 	/* Cache some DPCD data in the eDP case */
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index 7db849052a98..05cc7c372fc5 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -31,6 +31,7 @@
 #include <drm/drm_crtc.h>
 #include <drm/drm_crtc_helper.h>
 #include <drm/drm_fb_helper.h>
+#include <drm/drm_dp_helper.h>
 
 #define _wait_for(COND, MS, W) ({ \
 	unsigned long timeout__ = jiffies + msecs_to_jiffies(MS);	\
@@ -40,7 +41,11 @@
 			ret__ = -ETIMEDOUT;				\
 			break;						\
 		}							\
-		if (W && drm_can_sleep()) msleep(W);	\
+		if (W && drm_can_sleep())  {				\
+			msleep(W);					\
+		} else {						\
+			cpu_relax();					\
+		}							\
 	}								\
 	ret__;								\
 })
@@ -90,25 +95,6 @@
 #define INTEL_OUTPUT_DISPLAYPORT 7
 #define INTEL_OUTPUT_EDP 8
 
-/* Intel Pipe Clone Bit */
-#define INTEL_HDMIB_CLONE_BIT 1
-#define INTEL_HDMIC_CLONE_BIT 2
-#define INTEL_HDMID_CLONE_BIT 3
-#define INTEL_HDMIE_CLONE_BIT 4
-#define INTEL_HDMIF_CLONE_BIT 5
-#define INTEL_SDVO_NON_TV_CLONE_BIT 6
-#define INTEL_SDVO_TV_CLONE_BIT 7
-#define INTEL_SDVO_LVDS_CLONE_BIT 8
-#define INTEL_ANALOG_CLONE_BIT 9
-#define INTEL_TV_CLONE_BIT 10
-#define INTEL_DP_B_CLONE_BIT 11
-#define INTEL_DP_C_CLONE_BIT 12
-#define INTEL_DP_D_CLONE_BIT 13
-#define INTEL_LVDS_CLONE_BIT 14
-#define INTEL_DVO_TMDS_CLONE_BIT 15
-#define INTEL_DVO_LVDS_CLONE_BIT 16
-#define INTEL_EDP_CLONE_BIT 17
-
 #define INTEL_DVO_CHIP_NONE 0
 #define INTEL_DVO_CHIP_LVDS 1
 #define INTEL_DVO_CHIP_TMDS 2
@@ -151,16 +137,48 @@ struct intel_fbdev {
 
 struct intel_encoder {
 	struct drm_encoder base;
+	/*
+	 * The new crtc this encoder will be driven from. Only differs from
+	 * base->crtc while a modeset is in progress.
+	 */
+	struct intel_crtc *new_crtc;
+
 	int type;
 	bool needs_tv_clock;
+	/*
+	 * Intel hw has only one MUX where encoders could be clone, hence a
+	 * simple flag is enough to compute the possible_clones mask.
+	 */
+	bool cloneable;
+	bool connectors_active;
 	void (*hot_plug)(struct intel_encoder *);
+	void (*pre_enable)(struct intel_encoder *);
+	void (*enable)(struct intel_encoder *);
+	void (*disable)(struct intel_encoder *);
+	void (*post_disable)(struct intel_encoder *);
+	/* Read out the current hw state of this connector, returning true if
+	 * the encoder is active. If the encoder is enabled it also set the pipe
+	 * it is connected to in the pipe parameter. */
+	bool (*get_hw_state)(struct intel_encoder *, enum pipe *pipe);
 	int crtc_mask;
-	int clone_mask;
 };
 
 struct intel_connector {
 	struct drm_connector base;
+	/*
+	 * The fixed encoder this connector is connected to.
+	 */
 	struct intel_encoder *encoder;
+
+	/*
+	 * The new encoder this connector will be driven. Only differs from
+	 * encoder while a modeset is in progress.
+	 */
+	struct intel_encoder *new_encoder;
+
+	/* Reads out the current hw, returning true if the connector is enabled
+	 * and active (i.e. dpms ON state). */
+	bool (*get_hw_state)(struct intel_connector *);
 };
 
 struct intel_crtc {
@@ -168,11 +186,13 @@ struct intel_crtc {
 	enum pipe pipe;
 	enum plane plane;
 	u8 lut_r[256], lut_g[256], lut_b[256];
-	int dpms_mode;
-	bool active; /* is the crtc on? independent of the dpms mode */
+	/*
+	 * Whether the crtc and the connected output pipeline is active. Implies
+	 * that crtc->enabled is set, i.e. the current mode configuration has
+	 * some outputs connected to this crtc.
+	 */
+	bool active;
 	bool primary_disabled; /* is the crtc obscured by a plane? */
-	bool busy; /* is scanout buffer being updated frequently? */
-	struct timer_list idle_timer;
 	bool lowfreq_avail;
 	struct intel_overlay *overlay;
 	struct intel_unpin_work *unpin_work;
@@ -311,6 +331,37 @@ struct intel_hdmi {
 			       struct drm_display_mode *adjusted_mode);
 };
 
+#define DP_RECEIVER_CAP_SIZE		0xf
+#define DP_LINK_CONFIGURATION_SIZE	9
+
+struct intel_dp {
+	struct intel_encoder base;
+	uint32_t output_reg;
+	uint32_t DP;
+	uint8_t  link_configuration[DP_LINK_CONFIGURATION_SIZE];
+	bool has_audio;
+	enum hdmi_force_audio force_audio;
+	enum port port;
+	uint32_t color_range;
+	uint8_t link_bw;
+	uint8_t lane_count;
+	uint8_t dpcd[DP_RECEIVER_CAP_SIZE];
+	struct i2c_adapter adapter;
+	struct i2c_algo_dp_aux_data algo;
+	bool is_pch_edp;
+	uint8_t train_set[4];
+	int panel_power_up_delay;
+	int panel_power_down_delay;
+	int panel_power_cycle_delay;
+	int backlight_on_delay;
+	int backlight_off_delay;
+	struct drm_display_mode *panel_fixed_mode;  /* for eDP */
+	struct delayed_work panel_vdd_work;
+	bool want_panel_vdd;
+	struct edid *edid; /* cached EDID for eDP */
+	int edid_mode_count;
+};
+
 static inline struct drm_crtc *
 intel_get_crtc_for_pipe(struct drm_device *dev, int pipe)
 {
@@ -350,17 +401,21 @@ extern void intel_attach_force_audio_property(struct drm_connector *connector);
 extern void intel_attach_broadcast_rgb_property(struct drm_connector *connector);
 
 extern void intel_crt_init(struct drm_device *dev);
-extern void intel_hdmi_init(struct drm_device *dev, int sdvox_reg);
+extern void intel_hdmi_init(struct drm_device *dev,
+			    int sdvox_reg, enum port port);
 extern struct intel_hdmi *enc_to_intel_hdmi(struct drm_encoder *encoder);
 extern void intel_dip_infoframe_csum(struct dip_infoframe *avi_if);
 extern bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg,
 			    bool is_sdvob);
 extern void intel_dvo_init(struct drm_device *dev);
 extern void intel_tv_init(struct drm_device *dev);
-extern void intel_mark_busy(struct drm_device *dev,
-			    struct drm_i915_gem_object *obj);
+extern void intel_mark_busy(struct drm_device *dev);
+extern void intel_mark_idle(struct drm_device *dev);
+extern void intel_mark_fb_busy(struct drm_i915_gem_object *obj);
+extern void intel_mark_fb_idle(struct drm_i915_gem_object *obj);
 extern bool intel_lvds_init(struct drm_device *dev);
-extern void intel_dp_init(struct drm_device *dev, int dp_reg);
+extern void intel_dp_init(struct drm_device *dev, int output_reg,
+			  enum port port);
 void
 intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode,
 		 struct drm_display_mode *adjusted_mode);
@@ -373,8 +428,6 @@ extern int intel_plane_init(struct drm_device *dev, enum pipe pipe);
 extern void intel_flush_display_plane(struct drm_i915_private *dev_priv,
 				      enum plane plane);
 
-void intel_sanitize_pm(struct drm_device *dev);
-
 /* intel_panel.c */
 extern void intel_fixed_panel_mode(struct drm_display_mode *fixed_mode,
 				   struct drm_display_mode *adjusted_mode);
@@ -391,10 +444,27 @@ extern void intel_panel_disable_backlight(struct drm_device *dev);
 extern void intel_panel_destroy_backlight(struct drm_device *dev);
 extern enum drm_connector_status intel_panel_detect(struct drm_device *dev);
 
+struct intel_set_config {
+	struct drm_encoder **save_connector_encoders;
+	struct drm_crtc **save_encoder_crtcs;
+
+	bool fb_changed;
+	bool mode_changed;
+};
+
+extern bool intel_set_mode(struct drm_crtc *crtc, struct drm_display_mode *mode,
+			   int x, int y, struct drm_framebuffer *old_fb);
+extern void intel_modeset_disable(struct drm_device *dev);
 extern void intel_crtc_load_lut(struct drm_crtc *crtc);
-extern void intel_encoder_prepare(struct drm_encoder *encoder);
-extern void intel_encoder_commit(struct drm_encoder *encoder);
+extern void intel_crtc_update_dpms(struct drm_crtc *crtc);
+extern void intel_encoder_noop(struct drm_encoder *encoder);
 extern void intel_encoder_destroy(struct drm_encoder *encoder);
+extern void intel_encoder_dpms(struct intel_encoder *encoder, int mode);
+extern bool intel_encoder_check_is_cloned(struct intel_encoder *encoder);
+extern void intel_connector_dpms(struct drm_connector *, int mode);
+extern bool intel_connector_get_hw_state(struct intel_connector *connector);
+extern void intel_modeset_check_state(struct drm_device *dev);
+
 
 static inline struct intel_encoder *intel_attached_encoder(struct drm_connector *connector)
 {
@@ -417,12 +487,10 @@ struct intel_load_detect_pipe {
 	bool load_detect_temp;
 	int dpms_mode;
 };
-extern bool intel_get_load_detect_pipe(struct intel_encoder *intel_encoder,
-				       struct drm_connector *connector,
+extern bool intel_get_load_detect_pipe(struct drm_connector *connector,
 				       struct drm_display_mode *mode,
 				       struct intel_load_detect_pipe *old);
-extern void intel_release_load_detect_pipe(struct intel_encoder *intel_encoder,
-					   struct drm_connector *connector,
+extern void intel_release_load_detect_pipe(struct drm_connector *connector,
 					   struct intel_load_detect_pipe *old);
 
 extern void intelfb_restore(void);
@@ -503,7 +571,10 @@ extern void intel_disable_gt_powersave(struct drm_device *dev);
 extern void gen6_gt_check_fifodbg(struct drm_i915_private *dev_priv);
 extern void ironlake_teardown_rc6(struct drm_device *dev);
 
-extern void intel_ddi_dpms(struct drm_encoder *encoder, int mode);
+extern void intel_enable_ddi(struct intel_encoder *encoder);
+extern void intel_disable_ddi(struct intel_encoder *encoder);
+extern bool intel_ddi_get_hw_state(struct intel_encoder *encoder,
+				   enum pipe *pipe);
 extern void intel_ddi_mode_set(struct drm_encoder *encoder,
 				struct drm_display_mode *mode,
 				struct drm_display_mode *adjusted_mode);
diff --git a/drivers/gpu/drm/i915/intel_dvo.c b/drivers/gpu/drm/i915/intel_dvo.c
index ac9f2dd5648a..15da99533e5b 100644
--- a/drivers/gpu/drm/i915/intel_dvo.c
+++ b/drivers/gpu/drm/i915/intel_dvo.c
@@ -36,6 +36,7 @@
 #define SIL164_ADDR	0x38
 #define CH7xxx_ADDR	0x76
 #define TFP410_ADDR	0x38
+#define NS2501_ADDR     0x38
 
 static const struct intel_dvo_device intel_dvo_devices[] = {
 	{
@@ -73,7 +74,14 @@ static const struct intel_dvo_device intel_dvo_devices[] = {
 		.slave_addr = 0x75,
 		.gpio = GMBUS_PORT_DPB,
 		.dev_ops = &ch7017_ops,
-	}
+	},
+	{
+	        .type = INTEL_DVO_CHIP_TMDS,
+		.name = "ns2501",
+		.dvo_reg = DVOC,
+		.slave_addr = NS2501_ADDR,
+		.dev_ops = &ns2501_ops,
+       }
 };
 
 struct intel_dvo {
@@ -96,22 +104,91 @@ static struct intel_dvo *intel_attached_dvo(struct drm_connector *connector)
 			    struct intel_dvo, base);
 }
 
-static void intel_dvo_dpms(struct drm_encoder *encoder, int mode)
+static bool intel_dvo_connector_get_hw_state(struct intel_connector *connector)
 {
-	struct drm_i915_private *dev_priv = encoder->dev->dev_private;
-	struct intel_dvo *intel_dvo = enc_to_intel_dvo(encoder);
+	struct intel_dvo *intel_dvo = intel_attached_dvo(&connector->base);
+
+	return intel_dvo->dev.dev_ops->get_hw_state(&intel_dvo->dev);
+}
+
+static bool intel_dvo_get_hw_state(struct intel_encoder *encoder,
+				   enum pipe *pipe)
+{
+	struct drm_device *dev = encoder->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_dvo *intel_dvo = enc_to_intel_dvo(&encoder->base);
+	u32 tmp;
+
+	tmp = I915_READ(intel_dvo->dev.dvo_reg);
+
+	if (!(tmp & DVO_ENABLE))
+		return false;
+
+	*pipe = PORT_TO_PIPE(tmp);
+
+	return true;
+}
+
+static void intel_disable_dvo(struct intel_encoder *encoder)
+{
+	struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
+	struct intel_dvo *intel_dvo = enc_to_intel_dvo(&encoder->base);
+	u32 dvo_reg = intel_dvo->dev.dvo_reg;
+	u32 temp = I915_READ(dvo_reg);
+
+	intel_dvo->dev.dev_ops->dpms(&intel_dvo->dev, false);
+	I915_WRITE(dvo_reg, temp & ~DVO_ENABLE);
+	I915_READ(dvo_reg);
+}
+
+static void intel_enable_dvo(struct intel_encoder *encoder)
+{
+	struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
+	struct intel_dvo *intel_dvo = enc_to_intel_dvo(&encoder->base);
 	u32 dvo_reg = intel_dvo->dev.dvo_reg;
 	u32 temp = I915_READ(dvo_reg);
 
+	I915_WRITE(dvo_reg, temp | DVO_ENABLE);
+	I915_READ(dvo_reg);
+	intel_dvo->dev.dev_ops->dpms(&intel_dvo->dev, true);
+}
+
+static void intel_dvo_dpms(struct drm_connector *connector, int mode)
+{
+	struct intel_dvo *intel_dvo = intel_attached_dvo(connector);
+	struct drm_crtc *crtc;
+
+	/* dvo supports only 2 dpms states. */
+	if (mode != DRM_MODE_DPMS_ON)
+		mode = DRM_MODE_DPMS_OFF;
+
+	if (mode == connector->dpms)
+		return;
+
+	connector->dpms = mode;
+
+	/* Only need to change hw state when actually enabled */
+	crtc = intel_dvo->base.base.crtc;
+	if (!crtc) {
+		intel_dvo->base.connectors_active = false;
+		return;
+	}
+
 	if (mode == DRM_MODE_DPMS_ON) {
-		I915_WRITE(dvo_reg, temp | DVO_ENABLE);
-		I915_READ(dvo_reg);
-		intel_dvo->dev.dev_ops->dpms(&intel_dvo->dev, mode);
+		intel_dvo->base.connectors_active = true;
+
+		intel_crtc_update_dpms(crtc);
+
+		intel_dvo->dev.dev_ops->dpms(&intel_dvo->dev, true);
 	} else {
-		intel_dvo->dev.dev_ops->dpms(&intel_dvo->dev, mode);
-		I915_WRITE(dvo_reg, temp & ~DVO_ENABLE);
-		I915_READ(dvo_reg);
+		intel_dvo->dev.dev_ops->dpms(&intel_dvo->dev, false);
+
+		intel_dvo->base.connectors_active = false;
+
+		intel_crtc_update_dpms(crtc);
 	}
+
+	intel_modeset_check_state(connector->dev);
 }
 
 static int intel_dvo_mode_valid(struct drm_connector *connector,
@@ -266,15 +343,13 @@ static void intel_dvo_destroy(struct drm_connector *connector)
 }
 
 static const struct drm_encoder_helper_funcs intel_dvo_helper_funcs = {
-	.dpms = intel_dvo_dpms,
 	.mode_fixup = intel_dvo_mode_fixup,
-	.prepare = intel_encoder_prepare,
 	.mode_set = intel_dvo_mode_set,
-	.commit = intel_encoder_commit,
+	.disable = intel_encoder_noop,
 };
 
 static const struct drm_connector_funcs intel_dvo_connector_funcs = {
-	.dpms = drm_helper_connector_dpms,
+	.dpms = intel_dvo_dpms,
 	.detect = intel_dvo_detect,
 	.destroy = intel_dvo_destroy,
 	.fill_modes = drm_helper_probe_single_connector_modes,
@@ -363,6 +438,11 @@ void intel_dvo_init(struct drm_device *dev)
 	drm_encoder_init(dev, &intel_encoder->base,
 			 &intel_dvo_enc_funcs, encoder_type);
 
+	intel_encoder->disable = intel_disable_dvo;
+	intel_encoder->enable = intel_enable_dvo;
+	intel_encoder->get_hw_state = intel_dvo_get_hw_state;
+	intel_connector->get_hw_state = intel_dvo_connector_get_hw_state;
+
 	/* Now, try to find a controller */
 	for (i = 0; i < ARRAY_SIZE(intel_dvo_devices); i++) {
 		struct drm_connector *connector = &intel_connector->base;
@@ -395,17 +475,14 @@ void intel_dvo_init(struct drm_device *dev)
 		intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
 		switch (dvo->type) {
 		case INTEL_DVO_CHIP_TMDS:
-			intel_encoder->clone_mask =
-				(1 << INTEL_DVO_TMDS_CLONE_BIT) |
-				(1 << INTEL_ANALOG_CLONE_BIT);
+			intel_encoder->cloneable = true;
 			drm_connector_init(dev, connector,
 					   &intel_dvo_connector_funcs,
 					   DRM_MODE_CONNECTOR_DVII);
 			encoder_type = DRM_MODE_ENCODER_TMDS;
 			break;
 		case INTEL_DVO_CHIP_LVDS:
-			intel_encoder->clone_mask =
-				(1 << INTEL_DVO_LVDS_CLONE_BIT);
+			intel_encoder->cloneable = false;
 			drm_connector_init(dev, connector,
 					   &intel_dvo_connector_funcs,
 					   DRM_MODE_CONNECTOR_LVDS);
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index 025be7dd2a27..9ba0aaed7ee8 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -150,6 +150,9 @@ static void g4x_write_infoframe(struct drm_encoder *encoder,
 		I915_WRITE(VIDEO_DIP_DATA, *data);
 		data++;
 	}
+	/* Write every possible data byte to force correct ECC calculation. */
+	for (; i < VIDEO_DIP_DATA_SIZE; i += 4)
+		I915_WRITE(VIDEO_DIP_DATA, 0);
 	mmiowb();
 
 	val |= g4x_infoframe_enable(frame);
@@ -185,6 +188,9 @@ static void ibx_write_infoframe(struct drm_encoder *encoder,
 		I915_WRITE(TVIDEO_DIP_DATA(intel_crtc->pipe), *data);
 		data++;
 	}
+	/* Write every possible data byte to force correct ECC calculation. */
+	for (; i < VIDEO_DIP_DATA_SIZE; i += 4)
+		I915_WRITE(TVIDEO_DIP_DATA(intel_crtc->pipe), 0);
 	mmiowb();
 
 	val |= g4x_infoframe_enable(frame);
@@ -223,6 +229,9 @@ static void cpt_write_infoframe(struct drm_encoder *encoder,
 		I915_WRITE(TVIDEO_DIP_DATA(intel_crtc->pipe), *data);
 		data++;
 	}
+	/* Write every possible data byte to force correct ECC calculation. */
+	for (; i < VIDEO_DIP_DATA_SIZE; i += 4)
+		I915_WRITE(TVIDEO_DIP_DATA(intel_crtc->pipe), 0);
 	mmiowb();
 
 	val |= g4x_infoframe_enable(frame);
@@ -258,6 +267,9 @@ static void vlv_write_infoframe(struct drm_encoder *encoder,
 		I915_WRITE(VLV_TVIDEO_DIP_DATA(intel_crtc->pipe), *data);
 		data++;
 	}
+	/* Write every possible data byte to force correct ECC calculation. */
+	for (; i < VIDEO_DIP_DATA_SIZE; i += 4)
+		I915_WRITE(VLV_TVIDEO_DIP_DATA(intel_crtc->pipe), 0);
 	mmiowb();
 
 	val |= g4x_infoframe_enable(frame);
@@ -291,6 +303,9 @@ static void hsw_write_infoframe(struct drm_encoder *encoder,
 		I915_WRITE(data_reg + i, *data);
 		data++;
 	}
+	/* Write every possible data byte to force correct ECC calculation. */
+	for (; i < VIDEO_DIP_DATA_SIZE; i += 4)
+		I915_WRITE(data_reg + i, 0);
 	mmiowb();
 
 	val |= hsw_infoframe_enable(frame);
@@ -376,6 +391,7 @@ static void g4x_set_infoframes(struct drm_encoder *encoder,
 		port = VIDEO_DIP_PORT_C;
 		break;
 	default:
+		BUG();
 		return;
 	}
 
@@ -434,6 +450,7 @@ static void ibx_set_infoframes(struct drm_encoder *encoder,
 		port = VIDEO_DIP_PORT_D;
 		break;
 	default:
+		BUG();
 		return;
 	}
 
@@ -600,15 +617,36 @@ static void intel_hdmi_mode_set(struct drm_encoder *encoder,
 	intel_hdmi->set_infoframes(encoder, adjusted_mode);
 }
 
-static void intel_hdmi_dpms(struct drm_encoder *encoder, int mode)
+static bool intel_hdmi_get_hw_state(struct intel_encoder *encoder,
+				    enum pipe *pipe)
 {
-	struct drm_device *dev = encoder->dev;
+	struct drm_device *dev = encoder->base.dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
+	struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
+	u32 tmp;
+
+	tmp = I915_READ(intel_hdmi->sdvox_reg);
+
+	if (!(tmp & SDVO_ENABLE))
+		return false;
+
+	if (HAS_PCH_CPT(dev))
+		*pipe = PORT_TO_PIPE_CPT(tmp);
+	else
+		*pipe = PORT_TO_PIPE(tmp);
+
+	return true;
+}
+
+static void intel_enable_hdmi(struct intel_encoder *encoder)
+{
+	struct drm_device *dev = encoder->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
 	u32 temp;
 	u32 enable_bits = SDVO_ENABLE;
 
-	if (intel_hdmi->has_audio || mode != DRM_MODE_DPMS_ON)
+	if (intel_hdmi->has_audio)
 		enable_bits |= SDVO_AUDIO_ENABLE;
 
 	temp = I915_READ(intel_hdmi->sdvox_reg);
@@ -616,31 +654,12 @@ static void intel_hdmi_dpms(struct drm_encoder *encoder, int mode)
 	/* HW workaround for IBX, we need to move the port to transcoder A
 	 * before disabling it. */
 	if (HAS_PCH_IBX(dev)) {
-		struct drm_crtc *crtc = encoder->crtc;
+		struct drm_crtc *crtc = encoder->base.crtc;
 		int pipe = crtc ? to_intel_crtc(crtc)->pipe : -1;
 
-		if (mode != DRM_MODE_DPMS_ON) {
-			if (temp & SDVO_PIPE_B_SELECT) {
-				temp &= ~SDVO_PIPE_B_SELECT;
-				I915_WRITE(intel_hdmi->sdvox_reg, temp);
-				POSTING_READ(intel_hdmi->sdvox_reg);
-
-				/* Again we need to write this twice. */
-				I915_WRITE(intel_hdmi->sdvox_reg, temp);
-				POSTING_READ(intel_hdmi->sdvox_reg);
-
-				/* Transcoder selection bits only update
-				 * effectively on vblank. */
-				if (crtc)
-					intel_wait_for_vblank(dev, pipe);
-				else
-					msleep(50);
-			}
-		} else {
-			/* Restore the transcoder select bit. */
-			if (pipe == PIPE_B)
-				enable_bits |= SDVO_PIPE_B_SELECT;
-		}
+		/* Restore the transcoder select bit. */
+		if (pipe == PIPE_B)
+			enable_bits |= SDVO_PIPE_B_SELECT;
 	}
 
 	/* HW workaround, need to toggle enable bit off and on for 12bpc, but
@@ -651,12 +670,64 @@ static void intel_hdmi_dpms(struct drm_encoder *encoder, int mode)
 		POSTING_READ(intel_hdmi->sdvox_reg);
 	}
 
-	if (mode != DRM_MODE_DPMS_ON) {
-		temp &= ~enable_bits;
-	} else {
-		temp |= enable_bits;
+	temp |= enable_bits;
+
+	I915_WRITE(intel_hdmi->sdvox_reg, temp);
+	POSTING_READ(intel_hdmi->sdvox_reg);
+
+	/* HW workaround, need to write this twice for issue that may result
+	 * in first write getting masked.
+	 */
+	if (HAS_PCH_SPLIT(dev)) {
+		I915_WRITE(intel_hdmi->sdvox_reg, temp);
+		POSTING_READ(intel_hdmi->sdvox_reg);
+	}
+}
+
+static void intel_disable_hdmi(struct intel_encoder *encoder)
+{
+	struct drm_device *dev = encoder->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
+	u32 temp;
+	u32 enable_bits = SDVO_ENABLE | SDVO_AUDIO_ENABLE;
+
+	temp = I915_READ(intel_hdmi->sdvox_reg);
+
+	/* HW workaround for IBX, we need to move the port to transcoder A
+	 * before disabling it. */
+	if (HAS_PCH_IBX(dev)) {
+		struct drm_crtc *crtc = encoder->base.crtc;
+		int pipe = crtc ? to_intel_crtc(crtc)->pipe : -1;
+
+		if (temp & SDVO_PIPE_B_SELECT) {
+			temp &= ~SDVO_PIPE_B_SELECT;
+			I915_WRITE(intel_hdmi->sdvox_reg, temp);
+			POSTING_READ(intel_hdmi->sdvox_reg);
+
+			/* Again we need to write this twice. */
+			I915_WRITE(intel_hdmi->sdvox_reg, temp);
+			POSTING_READ(intel_hdmi->sdvox_reg);
+
+			/* Transcoder selection bits only update
+			 * effectively on vblank. */
+			if (crtc)
+				intel_wait_for_vblank(dev, pipe);
+			else
+				msleep(50);
+		}
+	}
+
+	/* HW workaround, need to toggle enable bit off and on for 12bpc, but
+	 * we do this anyway which shows more stable in testing.
+	 */
+	if (HAS_PCH_SPLIT(dev)) {
+		I915_WRITE(intel_hdmi->sdvox_reg, temp & ~SDVO_ENABLE);
+		POSTING_READ(intel_hdmi->sdvox_reg);
 	}
 
+	temp &= ~enable_bits;
+
 	I915_WRITE(intel_hdmi->sdvox_reg, temp);
 	POSTING_READ(intel_hdmi->sdvox_reg);
 
@@ -736,7 +807,6 @@ intel_hdmi_detect(struct drm_connector *connector, bool force)
 						drm_detect_hdmi_monitor(edid);
 			intel_hdmi->has_audio = drm_detect_monitor_audio(edid);
 		}
-		connector->display_info.raw_edid = NULL;
 		kfree(edid);
 	}
 
@@ -777,8 +847,6 @@ intel_hdmi_detect_audio(struct drm_connector *connector)
 	if (edid) {
 		if (edid->input & DRM_EDID_INPUT_DIGITAL)
 			has_audio = drm_detect_monitor_audio(edid);
-
-		connector->display_info.raw_edid = NULL;
 		kfree(edid);
 	}
 
@@ -832,9 +900,8 @@ intel_hdmi_set_property(struct drm_connector *connector,
 done:
 	if (intel_hdmi->base.base.crtc) {
 		struct drm_crtc *crtc = intel_hdmi->base.base.crtc;
-		drm_crtc_helper_set_mode(crtc, &crtc->mode,
-					 crtc->x, crtc->y,
-					 crtc->fb);
+		intel_set_mode(crtc, &crtc->mode,
+			       crtc->x, crtc->y, crtc->fb);
 	}
 
 	return 0;
@@ -848,23 +915,19 @@ static void intel_hdmi_destroy(struct drm_connector *connector)
 }
 
 static const struct drm_encoder_helper_funcs intel_hdmi_helper_funcs_hsw = {
-	.dpms = intel_ddi_dpms,
 	.mode_fixup = intel_hdmi_mode_fixup,
-	.prepare = intel_encoder_prepare,
 	.mode_set = intel_ddi_mode_set,
-	.commit = intel_encoder_commit,
+	.disable = intel_encoder_noop,
 };
 
 static const struct drm_encoder_helper_funcs intel_hdmi_helper_funcs = {
-	.dpms = intel_hdmi_dpms,
 	.mode_fixup = intel_hdmi_mode_fixup,
-	.prepare = intel_encoder_prepare,
 	.mode_set = intel_hdmi_mode_set,
-	.commit = intel_encoder_commit,
+	.disable = intel_encoder_noop,
 };
 
 static const struct drm_connector_funcs intel_hdmi_connector_funcs = {
-	.dpms = drm_helper_connector_dpms,
+	.dpms = intel_connector_dpms,
 	.detect = intel_hdmi_detect,
 	.fill_modes = drm_helper_probe_single_connector_modes,
 	.set_property = intel_hdmi_set_property,
@@ -888,7 +951,7 @@ intel_hdmi_add_properties(struct intel_hdmi *intel_hdmi, struct drm_connector *c
 	intel_attach_broadcast_rgb_property(connector);
 }
 
-void intel_hdmi_init(struct drm_device *dev, int sdvox_reg)
+void intel_hdmi_init(struct drm_device *dev, int sdvox_reg, enum port port)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct drm_connector *connector;
@@ -922,48 +985,25 @@ void intel_hdmi_init(struct drm_device *dev, int sdvox_reg)
 	connector->doublescan_allowed = 0;
 	intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
 
-	/* Set up the DDC bus. */
-	if (sdvox_reg == SDVOB) {
-		intel_encoder->clone_mask = (1 << INTEL_HDMIB_CLONE_BIT);
-		intel_hdmi->ddc_bus = GMBUS_PORT_DPB;
-		dev_priv->hotplug_supported_mask |= HDMIB_HOTPLUG_INT_STATUS;
-	} else if (sdvox_reg == SDVOC) {
-		intel_encoder->clone_mask = (1 << INTEL_HDMIC_CLONE_BIT);
-		intel_hdmi->ddc_bus = GMBUS_PORT_DPC;
-		dev_priv->hotplug_supported_mask |= HDMIC_HOTPLUG_INT_STATUS;
-	} else if (sdvox_reg == HDMIB) {
-		intel_encoder->clone_mask = (1 << INTEL_HDMID_CLONE_BIT);
-		intel_hdmi->ddc_bus = GMBUS_PORT_DPB;
-		dev_priv->hotplug_supported_mask |= HDMIB_HOTPLUG_INT_STATUS;
-	} else if (sdvox_reg == HDMIC) {
-		intel_encoder->clone_mask = (1 << INTEL_HDMIE_CLONE_BIT);
-		intel_hdmi->ddc_bus = GMBUS_PORT_DPC;
-		dev_priv->hotplug_supported_mask |= HDMIC_HOTPLUG_INT_STATUS;
-	} else if (sdvox_reg == HDMID) {
-		intel_encoder->clone_mask = (1 << INTEL_HDMIF_CLONE_BIT);
-		intel_hdmi->ddc_bus = GMBUS_PORT_DPD;
-		dev_priv->hotplug_supported_mask |= HDMID_HOTPLUG_INT_STATUS;
-	} else if (sdvox_reg == DDI_BUF_CTL(PORT_B)) {
-		DRM_DEBUG_DRIVER("LPT: detected output on DDI B\n");
-		intel_encoder->clone_mask = (1 << INTEL_HDMIB_CLONE_BIT);
+	intel_encoder->cloneable = false;
+
+	intel_hdmi->ddi_port = port;
+	switch (port) {
+	case PORT_B:
 		intel_hdmi->ddc_bus = GMBUS_PORT_DPB;
-		intel_hdmi->ddi_port = PORT_B;
 		dev_priv->hotplug_supported_mask |= HDMIB_HOTPLUG_INT_STATUS;
-	} else if (sdvox_reg == DDI_BUF_CTL(PORT_C)) {
-		DRM_DEBUG_DRIVER("LPT: detected output on DDI C\n");
-		intel_encoder->clone_mask = (1 << INTEL_HDMIC_CLONE_BIT);
+		break;
+	case PORT_C:
 		intel_hdmi->ddc_bus = GMBUS_PORT_DPC;
-		intel_hdmi->ddi_port = PORT_C;
 		dev_priv->hotplug_supported_mask |= HDMIC_HOTPLUG_INT_STATUS;
-	} else if (sdvox_reg == DDI_BUF_CTL(PORT_D)) {
-		DRM_DEBUG_DRIVER("LPT: detected output on DDI D\n");
-		intel_encoder->clone_mask = (1 << INTEL_HDMID_CLONE_BIT);
+		break;
+	case PORT_D:
 		intel_hdmi->ddc_bus = GMBUS_PORT_DPD;
-		intel_hdmi->ddi_port = PORT_D;
 		dev_priv->hotplug_supported_mask |= HDMID_HOTPLUG_INT_STATUS;
-	} else {
-		/* If we got an unknown sdvox_reg, things are pretty much broken
-		 * in a way that we should let the kernel know about it */
+		break;
+	case PORT_A:
+		/* Internal port only for eDP. */
+	default:
 		BUG();
 	}
 
@@ -986,10 +1026,21 @@ void intel_hdmi_init(struct drm_device *dev, int sdvox_reg)
 		intel_hdmi->set_infoframes = cpt_set_infoframes;
 	}
 
-	if (IS_HASWELL(dev))
-		drm_encoder_helper_add(&intel_encoder->base, &intel_hdmi_helper_funcs_hsw);
-	else
-		drm_encoder_helper_add(&intel_encoder->base, &intel_hdmi_helper_funcs);
+	if (IS_HASWELL(dev)) {
+		intel_encoder->enable = intel_enable_ddi;
+		intel_encoder->disable = intel_disable_ddi;
+		intel_encoder->get_hw_state = intel_ddi_get_hw_state;
+		drm_encoder_helper_add(&intel_encoder->base,
+				       &intel_hdmi_helper_funcs_hsw);
+	} else {
+		intel_encoder->enable = intel_enable_hdmi;
+		intel_encoder->disable = intel_disable_hdmi;
+		intel_encoder->get_hw_state = intel_hdmi_get_hw_state;
+		drm_encoder_helper_add(&intel_encoder->base,
+				       &intel_hdmi_helper_funcs);
+	}
+	intel_connector->get_hw_state = intel_connector_get_hw_state;
+
 
 	intel_hdmi_add_properties(intel_hdmi, connector);
 
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index 8552be9f5db1..e3166df55daa 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -64,13 +64,40 @@ static struct intel_lvds *intel_attached_lvds(struct drm_connector *connector)
 			    struct intel_lvds, base);
 }
 
+static bool intel_lvds_get_hw_state(struct intel_encoder *encoder,
+				    enum pipe *pipe)
+{
+	struct drm_device *dev = encoder->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	u32 lvds_reg, tmp;
+
+	if (HAS_PCH_SPLIT(dev)) {
+		lvds_reg = PCH_LVDS;
+	} else {
+		lvds_reg = LVDS;
+	}
+
+	tmp = I915_READ(lvds_reg);
+
+	if (!(tmp & LVDS_PORT_EN))
+		return false;
+
+	if (HAS_PCH_CPT(dev))
+		*pipe = PORT_TO_PIPE_CPT(tmp);
+	else
+		*pipe = PORT_TO_PIPE(tmp);
+
+	return true;
+}
+
 /**
  * Sets the power state for the panel.
  */
-static void intel_lvds_enable(struct intel_lvds *intel_lvds)
+static void intel_enable_lvds(struct intel_encoder *encoder)
 {
-	struct drm_device *dev = intel_lvds->base.base.dev;
-	struct intel_crtc *intel_crtc = to_intel_crtc(intel_lvds->base.base.crtc);
+	struct drm_device *dev = encoder->base.dev;
+	struct intel_lvds *intel_lvds = to_intel_lvds(&encoder->base);
+	struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	u32 ctl_reg, lvds_reg, stat_reg;
 
@@ -110,9 +137,10 @@ static void intel_lvds_enable(struct intel_lvds *intel_lvds)
 	intel_panel_enable_backlight(dev, intel_crtc->pipe);
 }
 
-static void intel_lvds_disable(struct intel_lvds *intel_lvds)
+static void intel_disable_lvds(struct intel_encoder *encoder)
 {
-	struct drm_device *dev = intel_lvds->base.base.dev;
+	struct drm_device *dev = encoder->base.dev;
+	struct intel_lvds *intel_lvds = to_intel_lvds(&encoder->base);
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	u32 ctl_reg, lvds_reg, stat_reg;
 
@@ -141,18 +169,6 @@ static void intel_lvds_disable(struct intel_lvds *intel_lvds)
 	POSTING_READ(lvds_reg);
 }
 
-static void intel_lvds_dpms(struct drm_encoder *encoder, int mode)
-{
-	struct intel_lvds *intel_lvds = to_intel_lvds(encoder);
-
-	if (mode == DRM_MODE_DPMS_ON)
-		intel_lvds_enable(intel_lvds);
-	else
-		intel_lvds_disable(intel_lvds);
-
-	/* XXX: We never power down the LVDS pairs. */
-}
-
 static int intel_lvds_mode_valid(struct drm_connector *connector,
 				 struct drm_display_mode *mode)
 {
@@ -233,9 +249,8 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder,
 {
 	struct drm_device *dev = encoder->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
 	struct intel_lvds *intel_lvds = to_intel_lvds(encoder);
-	struct intel_encoder *tmp_encoder;
+	struct intel_crtc *intel_crtc = intel_lvds->base.new_crtc;
 	u32 pfit_control = 0, pfit_pgm_ratios = 0, border = 0;
 	int pipe;
 
@@ -245,14 +260,8 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder,
 		return false;
 	}
 
-	/* Should never happen!! */
-	for_each_encoder_on_crtc(dev, encoder->crtc, tmp_encoder) {
-		if (&tmp_encoder->base != encoder) {
-			DRM_ERROR("Can't enable LVDS and another "
-			       "encoder on the same pipe\n");
-			return false;
-		}
-	}
+	if (intel_encoder_check_is_cloned(&intel_lvds->base))
+		return false;
 
 	/*
 	 * We have timings from the BIOS for the panel, put them in
@@ -404,23 +413,6 @@ out:
 	return true;
 }
 
-static void intel_lvds_prepare(struct drm_encoder *encoder)
-{
-	struct intel_lvds *intel_lvds = to_intel_lvds(encoder);
-
-	intel_lvds_disable(intel_lvds);
-}
-
-static void intel_lvds_commit(struct drm_encoder *encoder)
-{
-	struct intel_lvds *intel_lvds = to_intel_lvds(encoder);
-
-	/* Always do a full power on as we do not know what state
-	 * we were left in.
-	 */
-	intel_lvds_enable(intel_lvds);
-}
-
 static void intel_lvds_mode_set(struct drm_encoder *encoder,
 				struct drm_display_mode *mode,
 				struct drm_display_mode *adjusted_mode)
@@ -534,7 +526,7 @@ static int intel_lid_notify(struct notifier_block *nb, unsigned long val,
 	dev_priv->modeset_on_lid = 0;
 
 	mutex_lock(&dev->mode_config.mutex);
-	drm_helper_resume_force_mode(dev);
+	intel_modeset_check_state(dev);
 	mutex_unlock(&dev->mode_config.mutex);
 
 	return NOTIFY_OK;
@@ -586,8 +578,8 @@ static int intel_lvds_set_property(struct drm_connector *connector,
 			 * If the CRTC is enabled, the display will be changed
 			 * according to the new panel fitting mode.
 			 */
-			drm_crtc_helper_set_mode(crtc, &crtc->mode,
-				crtc->x, crtc->y, crtc->fb);
+			intel_set_mode(crtc, &crtc->mode,
+				       crtc->x, crtc->y, crtc->fb);
 		}
 	}
 
@@ -595,11 +587,9 @@ static int intel_lvds_set_property(struct drm_connector *connector,
 }
 
 static const struct drm_encoder_helper_funcs intel_lvds_helper_funcs = {
-	.dpms = intel_lvds_dpms,
 	.mode_fixup = intel_lvds_mode_fixup,
-	.prepare = intel_lvds_prepare,
 	.mode_set = intel_lvds_mode_set,
-	.commit = intel_lvds_commit,
+	.disable = intel_encoder_noop,
 };
 
 static const struct drm_connector_helper_funcs intel_lvds_connector_helper_funcs = {
@@ -609,7 +599,7 @@ static const struct drm_connector_helper_funcs intel_lvds_connector_helper_funcs
 };
 
 static const struct drm_connector_funcs intel_lvds_connector_funcs = {
-	.dpms = drm_helper_connector_dpms,
+	.dpms = intel_connector_dpms,
 	.detect = intel_lvds_detect,
 	.fill_modes = drm_helper_probe_single_connector_modes,
 	.set_property = intel_lvds_set_property,
@@ -971,10 +961,15 @@ bool intel_lvds_init(struct drm_device *dev)
 	drm_encoder_init(dev, &intel_encoder->base, &intel_lvds_enc_funcs,
 			 DRM_MODE_ENCODER_LVDS);
 
+	intel_encoder->enable = intel_enable_lvds;
+	intel_encoder->disable = intel_disable_lvds;
+	intel_encoder->get_hw_state = intel_lvds_get_hw_state;
+	intel_connector->get_hw_state = intel_connector_get_hw_state;
+
 	intel_connector_attach_encoder(intel_connector, intel_encoder);
 	intel_encoder->type = INTEL_OUTPUT_LVDS;
 
-	intel_encoder->clone_mask = (1 << INTEL_LVDS_CLONE_BIT);
+	intel_encoder->cloneable = false;
 	if (HAS_PCH_SPLIT(dev))
 		intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
 	else if (IS_GEN4(dev))
diff --git a/drivers/gpu/drm/i915/intel_modes.c b/drivers/gpu/drm/i915/intel_modes.c
index cc71fd9aaed5..cabd84bf66eb 100644
--- a/drivers/gpu/drm/i915/intel_modes.c
+++ b/drivers/gpu/drm/i915/intel_modes.c
@@ -45,7 +45,6 @@ int intel_connector_update_modes(struct drm_connector *connector,
 	drm_mode_connector_update_edid_property(connector, edid);
 	ret = drm_add_edid_modes(connector, edid);
 	drm_edid_to_eld(connector, edid);
-	connector->display_info.raw_edid = NULL;
 	kfree(edid);
 
 	return ret;
diff --git a/drivers/gpu/drm/i915/intel_opregion.c b/drivers/gpu/drm/i915/intel_opregion.c
index 5cc624eb6133..5530413213d8 100644
--- a/drivers/gpu/drm/i915/intel_opregion.c
+++ b/drivers/gpu/drm/i915/intel_opregion.c
@@ -427,6 +427,25 @@ blind_set:
 	goto end;
 }
 
+static void intel_setup_cadls(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_opregion *opregion = &dev_priv->opregion;
+	int i = 0;
+	u32 disp_id;
+
+	/* Initialize the CADL field by duplicating the DIDL values.
+	 * Technically, this is not always correct as display outputs may exist,
+	 * but not active. This initialization is necessary for some Clevo
+	 * laptops that check this field before processing the brightness and
+	 * display switching hotkeys. Just like DIDL, CADL is NULL-terminated if
+	 * there are less than eight devices. */
+	do {
+		disp_id = ioread32(&opregion->acpi->didl[i]);
+		iowrite32(disp_id, &opregion->acpi->cadl[i]);
+	} while (++i < 8 && disp_id != 0);
+}
+
 void intel_opregion_init(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
@@ -436,8 +455,10 @@ void intel_opregion_init(struct drm_device *dev)
 		return;
 
 	if (opregion->acpi) {
-		if (drm_core_check_feature(dev, DRIVER_MODESET))
+		if (drm_core_check_feature(dev, DRIVER_MODESET)) {
 			intel_didl_outputs(dev);
+			intel_setup_cadls(dev);
+		}
 
 		/* Notify BIOS we are ready to handle ACPI video ext notifs.
 		 * Right now, all the events are handled by the ACPI video module.
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c
index 10510221d763..ebff850a9ab6 100644
--- a/drivers/gpu/drm/i915/intel_overlay.c
+++ b/drivers/gpu/drm/i915/intel_overlay.c
@@ -234,54 +234,6 @@ static int intel_overlay_do_wait_request(struct intel_overlay *overlay,
 	return 0;
 }
 
-/* Workaround for i830 bug where pipe a must be enable to change control regs */
-static int
-i830_activate_pipe_a(struct drm_device *dev)
-{
-	drm_i915_private_t *dev_priv = dev->dev_private;
-	struct intel_crtc *crtc;
-	struct drm_crtc_helper_funcs *crtc_funcs;
-	struct drm_display_mode vesa_640x480 = {
-		DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 25175, 640, 656,
-			 752, 800, 0, 480, 489, 492, 525, 0,
-			 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC)
-	}, *mode;
-
-	crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[0]);
-	if (crtc->dpms_mode == DRM_MODE_DPMS_ON)
-		return 0;
-
-	/* most i8xx have pipe a forced on, so don't trust dpms mode */
-	if (I915_READ(_PIPEACONF) & PIPECONF_ENABLE)
-		return 0;
-
-	crtc_funcs = crtc->base.helper_private;
-	if (crtc_funcs->dpms == NULL)
-		return 0;
-
-	DRM_DEBUG_DRIVER("Enabling pipe A in order to enable overlay\n");
-
-	mode = drm_mode_duplicate(dev, &vesa_640x480);
-
-	if (!drm_crtc_helper_set_mode(&crtc->base, mode,
-				       crtc->base.x, crtc->base.y,
-				       crtc->base.fb))
-		return 0;
-
-	crtc_funcs->dpms(&crtc->base, DRM_MODE_DPMS_ON);
-	return 1;
-}
-
-static void
-i830_deactivate_pipe_a(struct drm_device *dev)
-{
-	drm_i915_private_t *dev_priv = dev->dev_private;
-	struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[0];
-	struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
-
-	crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
-}
-
 /* overlay needs to be disable in OCMD reg */
 static int intel_overlay_on(struct intel_overlay *overlay)
 {
@@ -289,17 +241,12 @@ static int intel_overlay_on(struct intel_overlay *overlay)
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
 	struct drm_i915_gem_request *request;
-	int pipe_a_quirk = 0;
 	int ret;
 
 	BUG_ON(overlay->active);
 	overlay->active = 1;
 
-	if (IS_I830(dev)) {
-		pipe_a_quirk = i830_activate_pipe_a(dev);
-		if (pipe_a_quirk < 0)
-			return pipe_a_quirk;
-	}
+	WARN_ON(IS_I830(dev) && !(dev_priv->quirks & QUIRK_PIPEA_FORCE));
 
 	request = kzalloc(sizeof(*request), GFP_KERNEL);
 	if (request == NULL) {
@@ -321,9 +268,6 @@ static int intel_overlay_on(struct intel_overlay *overlay)
 
 	ret = intel_overlay_do_wait_request(overlay, request, NULL);
 out:
-	if (pipe_a_quirk)
-		i830_deactivate_pipe_a(dev);
-
 	return ret;
 }
 
@@ -1438,7 +1382,7 @@ void intel_setup_overlay(struct drm_device *dev)
 		}
 		overlay->flip_addr = reg_bo->phys_obj->handle->busaddr;
 	} else {
-		ret = i915_gem_object_pin(reg_bo, PAGE_SIZE, true);
+		ret = i915_gem_object_pin(reg_bo, PAGE_SIZE, true, false);
 		if (ret) {
 			DRM_ERROR("failed to pin overlay register bo\n");
 			goto out_free_bo;
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index ba8a27b1757a..d69f8f49beb5 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -31,6 +31,8 @@
 #include "../../../platform/x86/intel_ips.h"
 #include <linux/module.h>
 
+#define FORCEWAKE_ACK_TIMEOUT_MS 2
+
 /* FBC, or Frame Buffer Compression, is a technique employed to compress the
  * framebuffer contents in-memory, aiming at reducing the required bandwidth
  * during in-memory transfers and, therefore, reduce the power packet.
@@ -593,7 +595,7 @@ static void i915_ironlake_get_mem_freq(struct drm_device *dev)
 		break;
 	}
 
-	dev_priv->r_t = dev_priv->mem_freq;
+	dev_priv->ips.r_t = dev_priv->mem_freq;
 
 	switch (csipll & 0x3ff) {
 	case 0x00c:
@@ -625,11 +627,11 @@ static void i915_ironlake_get_mem_freq(struct drm_device *dev)
 	}
 
 	if (dev_priv->fsb_freq == 3200) {
-		dev_priv->c_m = 0;
+		dev_priv->ips.c_m = 0;
 	} else if (dev_priv->fsb_freq > 3200 && dev_priv->fsb_freq <= 4800) {
-		dev_priv->c_m = 1;
+		dev_priv->ips.c_m = 1;
 	} else {
-		dev_priv->c_m = 2;
+		dev_priv->ips.c_m = 2;
 	}
 }
 
@@ -2138,7 +2140,7 @@ intel_alloc_context_page(struct drm_device *dev)
 		return NULL;
 	}
 
-	ret = i915_gem_object_pin(ctx, 4096, true);
+	ret = i915_gem_object_pin(ctx, 4096, true, false);
 	if (ret) {
 		DRM_ERROR("failed to pin power context: %d\n", ret);
 		goto err_unref;
@@ -2160,11 +2162,22 @@ err_unref:
 	return NULL;
 }
 
+/**
+ * Lock protecting IPS related data structures
+ */
+DEFINE_SPINLOCK(mchdev_lock);
+
+/* Global for IPS driver to get at the current i915 device. Protected by
+ * mchdev_lock. */
+static struct drm_i915_private *i915_mch_dev;
+
 bool ironlake_set_drps(struct drm_device *dev, u8 val)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	u16 rgvswctl;
 
+	assert_spin_locked(&mchdev_lock);
+
 	rgvswctl = I915_READ16(MEMSWCTL);
 	if (rgvswctl & MEMCTL_CMD_STS) {
 		DRM_DEBUG("gpu busy, RCS change rejected\n");
@@ -2188,6 +2201,8 @@ static void ironlake_enable_drps(struct drm_device *dev)
 	u32 rgvmodectl = I915_READ(MEMMODECTL);
 	u8 fmax, fmin, fstart, vstart;
 
+	spin_lock_irq(&mchdev_lock);
+
 	/* Enable temp reporting */
 	I915_WRITE16(PMMISC, I915_READ(PMMISC) | MCPPCE_EN);
 	I915_WRITE16(TSC1, I915_READ(TSC1) | TSE);
@@ -2211,12 +2226,12 @@ static void ironlake_enable_drps(struct drm_device *dev)
 	vstart = (I915_READ(PXVFREQ_BASE + (fstart * 4)) & PXVFREQ_PX_MASK) >>
 		PXVFREQ_PX_SHIFT;
 
-	dev_priv->fmax = fmax; /* IPS callback will increase this */
-	dev_priv->fstart = fstart;
+	dev_priv->ips.fmax = fmax; /* IPS callback will increase this */
+	dev_priv->ips.fstart = fstart;
 
-	dev_priv->max_delay = fstart;
-	dev_priv->min_delay = fmin;
-	dev_priv->cur_delay = fstart;
+	dev_priv->ips.max_delay = fstart;
+	dev_priv->ips.min_delay = fmin;
+	dev_priv->ips.cur_delay = fstart;
 
 	DRM_DEBUG_DRIVER("fmax: %d, fmin: %d, fstart: %d\n",
 			 fmax, fmin, fstart);
@@ -2233,23 +2248,29 @@ static void ironlake_enable_drps(struct drm_device *dev)
 	rgvmodectl |= MEMMODE_SWMODE_EN;
 	I915_WRITE(MEMMODECTL, rgvmodectl);
 
-	if (wait_for((I915_READ(MEMSWCTL) & MEMCTL_CMD_STS) == 0, 10))
+	if (wait_for_atomic((I915_READ(MEMSWCTL) & MEMCTL_CMD_STS) == 0, 10))
 		DRM_ERROR("stuck trying to change perf mode\n");
-	msleep(1);
+	mdelay(1);
 
 	ironlake_set_drps(dev, fstart);
 
-	dev_priv->last_count1 = I915_READ(0x112e4) + I915_READ(0x112e8) +
+	dev_priv->ips.last_count1 = I915_READ(0x112e4) + I915_READ(0x112e8) +
 		I915_READ(0x112e0);
-	dev_priv->last_time1 = jiffies_to_msecs(jiffies);
-	dev_priv->last_count2 = I915_READ(0x112f4);
-	getrawmonotonic(&dev_priv->last_time2);
+	dev_priv->ips.last_time1 = jiffies_to_msecs(jiffies);
+	dev_priv->ips.last_count2 = I915_READ(0x112f4);
+	getrawmonotonic(&dev_priv->ips.last_time2);
+
+	spin_unlock_irq(&mchdev_lock);
 }
 
 static void ironlake_disable_drps(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	u16 rgvswctl = I915_READ16(MEMSWCTL);
+	u16 rgvswctl;
+
+	spin_lock_irq(&mchdev_lock);
+
+	rgvswctl = I915_READ16(MEMSWCTL);
 
 	/* Ack interrupts, disable EFC interrupt */
 	I915_WRITE(MEMINTREN, I915_READ(MEMINTREN) & ~MEMINT_EVAL_CHG_EN);
@@ -2259,31 +2280,54 @@ static void ironlake_disable_drps(struct drm_device *dev)
 	I915_WRITE(DEIMR, I915_READ(DEIMR) | DE_PCU_EVENT);
 
 	/* Go back to the starting frequency */
-	ironlake_set_drps(dev, dev_priv->fstart);
-	msleep(1);
+	ironlake_set_drps(dev, dev_priv->ips.fstart);
+	mdelay(1);
 	rgvswctl |= MEMCTL_CMD_STS;
 	I915_WRITE(MEMSWCTL, rgvswctl);
-	msleep(1);
+	mdelay(1);
 
+	spin_unlock_irq(&mchdev_lock);
 }
 
-void gen6_set_rps(struct drm_device *dev, u8 val)
+/* There's a funny hw issue where the hw returns all 0 when reading from
+ * GEN6_RP_INTERRUPT_LIMITS. Hence we always need to compute the desired value
+ * ourselves, instead of doing a rmw cycle (which might result in us clearing
+ * all limits and the gpu stuck at whatever frequency it is at atm).
+ */
+static u32 gen6_rps_limits(struct drm_i915_private *dev_priv, u8 *val)
 {
-	struct drm_i915_private *dev_priv = dev->dev_private;
 	u32 limits;
 
 	limits = 0;
-	if (val >= dev_priv->max_delay)
-		val = dev_priv->max_delay;
-	else
-		limits |= dev_priv->max_delay << 24;
 
-	if (val <= dev_priv->min_delay)
-		val = dev_priv->min_delay;
-	else
-		limits |= dev_priv->min_delay << 16;
+	if (*val >= dev_priv->rps.max_delay)
+		*val = dev_priv->rps.max_delay;
+	limits |= dev_priv->rps.max_delay << 24;
+
+	/* Only set the down limit when we've reached the lowest level to avoid
+	 * getting more interrupts, otherwise leave this clear. This prevents a
+	 * race in the hw when coming out of rc6: There's a tiny window where
+	 * the hw runs at the minimal clock before selecting the desired
+	 * frequency, if the down threshold expires in that window we will not
+	 * receive a down interrupt. */
+	if (*val <= dev_priv->rps.min_delay) {
+		*val = dev_priv->rps.min_delay;
+		limits |= dev_priv->rps.min_delay << 16;
+	}
 
-	if (val == dev_priv->cur_delay)
+	return limits;
+}
+
+void gen6_set_rps(struct drm_device *dev, u8 val)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	u32 limits = gen6_rps_limits(dev_priv, &val);
+
+	WARN_ON(!mutex_is_locked(&dev->struct_mutex));
+	WARN_ON(val > dev_priv->rps.max_delay);
+	WARN_ON(val < dev_priv->rps.min_delay);
+
+	if (val == dev_priv->rps.cur_delay)
 		return;
 
 	I915_WRITE(GEN6_RPNSWREQ,
@@ -2296,7 +2340,11 @@ void gen6_set_rps(struct drm_device *dev, u8 val)
 	 */
 	I915_WRITE(GEN6_RP_INTERRUPT_LIMITS, limits);
 
-	dev_priv->cur_delay = val;
+	POSTING_READ(GEN6_RPNSWREQ);
+
+	dev_priv->rps.cur_delay = val;
+
+	trace_intel_gpu_freq_change(val * 50);
 }
 
 static void gen6_disable_rps(struct drm_device *dev)
@@ -2312,40 +2360,40 @@ static void gen6_disable_rps(struct drm_device *dev)
 	 * register (PMIMR) to mask PM interrupts. The only risk is in leaving
 	 * stale bits in PMIIR and PMIMR which gen6_enable_rps will clean up. */
 
-	spin_lock_irq(&dev_priv->rps_lock);
-	dev_priv->pm_iir = 0;
-	spin_unlock_irq(&dev_priv->rps_lock);
+	spin_lock_irq(&dev_priv->rps.lock);
+	dev_priv->rps.pm_iir = 0;
+	spin_unlock_irq(&dev_priv->rps.lock);
 
 	I915_WRITE(GEN6_PMIIR, I915_READ(GEN6_PMIIR));
 }
 
 int intel_enable_rc6(const struct drm_device *dev)
 {
-	/*
-	 * Respect the kernel parameter if it is set
-	 */
+	/* Respect the kernel parameter if it is set */
 	if (i915_enable_rc6 >= 0)
 		return i915_enable_rc6;
 
-	/*
-	 * Disable RC6 on Ironlake
-	 */
-	if (INTEL_INFO(dev)->gen == 5)
-		return 0;
+	if (INTEL_INFO(dev)->gen == 5) {
+#ifdef CONFIG_INTEL_IOMMU
+		/* Disable rc6 on ilk if VT-d is on. */
+		if (intel_iommu_gfx_mapped)
+			return false;
+#endif
+		DRM_DEBUG_DRIVER("Ironlake: only RC6 available\n");
+		return INTEL_RC6_ENABLE;
+	}
 
-	/* On Haswell, only RC6 is available. So let's enable it by default to
-	 * provide better testing and coverage since the beginning.
-	 */
-	if (IS_HASWELL(dev))
+	if (IS_HASWELL(dev)) {
+		DRM_DEBUG_DRIVER("Haswell: only RC6 available\n");
 		return INTEL_RC6_ENABLE;
+	}
 
-	/*
-	 * Disable rc6 on Sandybridge
-	 */
+	/* snb/ivb have more than one rc6 state. */
 	if (INTEL_INFO(dev)->gen == 6) {
 		DRM_DEBUG_DRIVER("Sandybridge: deep RC6 disabled\n");
 		return INTEL_RC6_ENABLE;
 	}
+
 	DRM_DEBUG_DRIVER("RC6 and deep RC6 enabled\n");
 	return (INTEL_RC6_ENABLE | INTEL_RC6p_ENABLE);
 }
@@ -2383,9 +2431,9 @@ static void gen6_enable_rps(struct drm_device *dev)
 	gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
 
 	/* In units of 100MHz */
-	dev_priv->max_delay = rp_state_cap & 0xff;
-	dev_priv->min_delay = (rp_state_cap & 0xff0000) >> 16;
-	dev_priv->cur_delay = 0;
+	dev_priv->rps.max_delay = rp_state_cap & 0xff;
+	dev_priv->rps.min_delay = (rp_state_cap & 0xff0000) >> 16;
+	dev_priv->rps.cur_delay = 0;
 
 	/* disable the counters and set deterministic thresholds */
 	I915_WRITE(GEN6_RC_CONTROL, 0);
@@ -2438,8 +2486,8 @@ static void gen6_enable_rps(struct drm_device *dev)
 
 	I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 1000000);
 	I915_WRITE(GEN6_RP_INTERRUPT_LIMITS,
-		   dev_priv->max_delay << 24 |
-		   dev_priv->min_delay << 16);
+		   dev_priv->rps.max_delay << 24 |
+		   dev_priv->rps.min_delay << 16);
 
 	I915_WRITE(GEN6_RP_UP_THRESHOLD, 59400);
 	I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 245000);
@@ -2477,7 +2525,7 @@ static void gen6_enable_rps(struct drm_device *dev)
 		     500))
 		DRM_ERROR("timeout waiting for pcode mailbox to finish\n");
 	if (pcu_mbox & (1<<31)) { /* OC supported */
-		dev_priv->max_delay = pcu_mbox & 0xff;
+		dev_priv->rps.max_delay = pcu_mbox & 0xff;
 		DRM_DEBUG_DRIVER("overclocking supported, adjusting frequency max to %dMHz\n", pcu_mbox * 50);
 	}
 
@@ -2485,10 +2533,10 @@ static void gen6_enable_rps(struct drm_device *dev)
 
 	/* requires MSI enabled */
 	I915_WRITE(GEN6_PMIER, GEN6_PM_DEFERRED_EVENTS);
-	spin_lock_irq(&dev_priv->rps_lock);
-	WARN_ON(dev_priv->pm_iir != 0);
+	spin_lock_irq(&dev_priv->rps.lock);
+	WARN_ON(dev_priv->rps.pm_iir != 0);
 	I915_WRITE(GEN6_PMIMR, 0);
-	spin_unlock_irq(&dev_priv->rps_lock);
+	spin_unlock_irq(&dev_priv->rps.lock);
 	/* enable all PM interrupts */
 	I915_WRITE(GEN6_PMINTRMSK, 0);
 
@@ -2520,9 +2568,9 @@ static void gen6_update_ring_freq(struct drm_device *dev)
 	 * to use for memory access.  We do this by specifying the IA frequency
 	 * the PCU should use as a reference to determine the ring frequency.
 	 */
-	for (gpu_freq = dev_priv->max_delay; gpu_freq >= dev_priv->min_delay;
+	for (gpu_freq = dev_priv->rps.max_delay; gpu_freq >= dev_priv->rps.min_delay;
 	     gpu_freq--) {
-		int diff = dev_priv->max_delay - gpu_freq;
+		int diff = dev_priv->rps.max_delay - gpu_freq;
 
 		/*
 		 * For GPU frequencies less than 750MHz, just use the lowest
@@ -2686,14 +2734,16 @@ static const struct cparams {
 	{ 0, 800, 231, 23784 },
 };
 
-unsigned long i915_chipset_val(struct drm_i915_private *dev_priv)
+static unsigned long __i915_chipset_val(struct drm_i915_private *dev_priv)
 {
 	u64 total_count, diff, ret;
 	u32 count1, count2, count3, m = 0, c = 0;
 	unsigned long now = jiffies_to_msecs(jiffies), diff1;
 	int i;
 
-	diff1 = now - dev_priv->last_time1;
+	assert_spin_locked(&mchdev_lock);
+
+	diff1 = now - dev_priv->ips.last_time1;
 
 	/* Prevent division-by-zero if we are asking too fast.
 	 * Also, we don't get interesting results if we are polling
@@ -2701,7 +2751,7 @@ unsigned long i915_chipset_val(struct drm_i915_private *dev_priv)
 	 * in such cases.
 	 */
 	if (diff1 <= 10)
-		return dev_priv->chipset_power;
+		return dev_priv->ips.chipset_power;
 
 	count1 = I915_READ(DMIEC);
 	count2 = I915_READ(DDREC);
@@ -2710,16 +2760,16 @@ unsigned long i915_chipset_val(struct drm_i915_private *dev_priv)
 	total_count = count1 + count2 + count3;
 
 	/* FIXME: handle per-counter overflow */
-	if (total_count < dev_priv->last_count1) {
-		diff = ~0UL - dev_priv->last_count1;
+	if (total_count < dev_priv->ips.last_count1) {
+		diff = ~0UL - dev_priv->ips.last_count1;
 		diff += total_count;
 	} else {
-		diff = total_count - dev_priv->last_count1;
+		diff = total_count - dev_priv->ips.last_count1;
 	}
 
 	for (i = 0; i < ARRAY_SIZE(cparams); i++) {
-		if (cparams[i].i == dev_priv->c_m &&
-		    cparams[i].t == dev_priv->r_t) {
+		if (cparams[i].i == dev_priv->ips.c_m &&
+		    cparams[i].t == dev_priv->ips.r_t) {
 			m = cparams[i].m;
 			c = cparams[i].c;
 			break;
@@ -2730,14 +2780,30 @@ unsigned long i915_chipset_val(struct drm_i915_private *dev_priv)
 	ret = ((m * diff) + c);
 	ret = div_u64(ret, 10);
 
-	dev_priv->last_count1 = total_count;
-	dev_priv->last_time1 = now;
+	dev_priv->ips.last_count1 = total_count;
+	dev_priv->ips.last_time1 = now;
 
-	dev_priv->chipset_power = ret;
+	dev_priv->ips.chipset_power = ret;
 
 	return ret;
 }
 
+unsigned long i915_chipset_val(struct drm_i915_private *dev_priv)
+{
+	unsigned long val;
+
+	if (dev_priv->info->gen != 5)
+		return 0;
+
+	spin_lock_irq(&mchdev_lock);
+
+	val = __i915_chipset_val(dev_priv);
+
+	spin_unlock_irq(&mchdev_lock);
+
+	return val;
+}
+
 unsigned long i915_mch_val(struct drm_i915_private *dev_priv)
 {
 	unsigned long m, x, b;
@@ -2894,18 +2960,17 @@ static u16 pvid_to_extvid(struct drm_i915_private *dev_priv, u8 pxvid)
 		return v_table[pxvid].vd;
 }
 
-void i915_update_gfx_val(struct drm_i915_private *dev_priv)
+static void __i915_update_gfx_val(struct drm_i915_private *dev_priv)
 {
 	struct timespec now, diff1;
 	u64 diff;
 	unsigned long diffms;
 	u32 count;
 
-	if (dev_priv->info->gen != 5)
-		return;
+	assert_spin_locked(&mchdev_lock);
 
 	getrawmonotonic(&now);
-	diff1 = timespec_sub(now, dev_priv->last_time2);
+	diff1 = timespec_sub(now, dev_priv->ips.last_time2);
 
 	/* Don't divide by 0 */
 	diffms = diff1.tv_sec * 1000 + diff1.tv_nsec / 1000000;
@@ -2914,28 +2979,42 @@ void i915_update_gfx_val(struct drm_i915_private *dev_priv)
 
 	count = I915_READ(GFXEC);
 
-	if (count < dev_priv->last_count2) {
-		diff = ~0UL - dev_priv->last_count2;
+	if (count < dev_priv->ips.last_count2) {
+		diff = ~0UL - dev_priv->ips.last_count2;
 		diff += count;
 	} else {
-		diff = count - dev_priv->last_count2;
+		diff = count - dev_priv->ips.last_count2;
 	}
 
-	dev_priv->last_count2 = count;
-	dev_priv->last_time2 = now;
+	dev_priv->ips.last_count2 = count;
+	dev_priv->ips.last_time2 = now;
 
 	/* More magic constants... */
 	diff = diff * 1181;
 	diff = div_u64(diff, diffms * 10);
-	dev_priv->gfx_power = diff;
+	dev_priv->ips.gfx_power = diff;
 }
 
-unsigned long i915_gfx_val(struct drm_i915_private *dev_priv)
+void i915_update_gfx_val(struct drm_i915_private *dev_priv)
+{
+	if (dev_priv->info->gen != 5)
+		return;
+
+	spin_lock_irq(&mchdev_lock);
+
+	__i915_update_gfx_val(dev_priv);
+
+	spin_unlock_irq(&mchdev_lock);
+}
+
+static unsigned long __i915_gfx_val(struct drm_i915_private *dev_priv)
 {
 	unsigned long t, corr, state1, corr2, state2;
 	u32 pxvid, ext_v;
 
-	pxvid = I915_READ(PXVFREQ_BASE + (dev_priv->cur_delay * 4));
+	assert_spin_locked(&mchdev_lock);
+
+	pxvid = I915_READ(PXVFREQ_BASE + (dev_priv->rps.cur_delay * 4));
 	pxvid = (pxvid >> 24) & 0x7f;
 	ext_v = pvid_to_extvid(dev_priv, pxvid);
 
@@ -2955,27 +3034,31 @@ unsigned long i915_gfx_val(struct drm_i915_private *dev_priv)
 
 	corr = corr * ((150142 * state1) / 10000 - 78642);
 	corr /= 100000;
-	corr2 = (corr * dev_priv->corr);
+	corr2 = (corr * dev_priv->ips.corr);
 
 	state2 = (corr2 * state1) / 10000;
 	state2 /= 100; /* convert to mW */
 
-	i915_update_gfx_val(dev_priv);
+	__i915_update_gfx_val(dev_priv);
 
-	return dev_priv->gfx_power + state2;
+	return dev_priv->ips.gfx_power + state2;
 }
 
-/* Global for IPS driver to get at the current i915 device */
-static struct drm_i915_private *i915_mch_dev;
-/*
- * Lock protecting IPS related data structures
- *   - i915_mch_dev
- *   - dev_priv->max_delay
- *   - dev_priv->min_delay
- *   - dev_priv->fmax
- *   - dev_priv->gpu_busy
- */
-static DEFINE_SPINLOCK(mchdev_lock);
+unsigned long i915_gfx_val(struct drm_i915_private *dev_priv)
+{
+	unsigned long val;
+
+	if (dev_priv->info->gen != 5)
+		return 0;
+
+	spin_lock_irq(&mchdev_lock);
+
+	val = __i915_gfx_val(dev_priv);
+
+	spin_unlock_irq(&mchdev_lock);
+
+	return val;
+}
 
 /**
  * i915_read_mch_val - return value for IPS use
@@ -2988,18 +3071,18 @@ unsigned long i915_read_mch_val(void)
 	struct drm_i915_private *dev_priv;
 	unsigned long chipset_val, graphics_val, ret = 0;
 
-	spin_lock(&mchdev_lock);
+	spin_lock_irq(&mchdev_lock);
 	if (!i915_mch_dev)
 		goto out_unlock;
 	dev_priv = i915_mch_dev;
 
-	chipset_val = i915_chipset_val(dev_priv);
-	graphics_val = i915_gfx_val(dev_priv);
+	chipset_val = __i915_chipset_val(dev_priv);
+	graphics_val = __i915_gfx_val(dev_priv);
 
 	ret = chipset_val + graphics_val;
 
 out_unlock:
-	spin_unlock(&mchdev_lock);
+	spin_unlock_irq(&mchdev_lock);
 
 	return ret;
 }
@@ -3015,18 +3098,18 @@ bool i915_gpu_raise(void)
 	struct drm_i915_private *dev_priv;
 	bool ret = true;
 
-	spin_lock(&mchdev_lock);
+	spin_lock_irq(&mchdev_lock);
 	if (!i915_mch_dev) {
 		ret = false;
 		goto out_unlock;
 	}
 	dev_priv = i915_mch_dev;
 
-	if (dev_priv->max_delay > dev_priv->fmax)
-		dev_priv->max_delay--;
+	if (dev_priv->ips.max_delay > dev_priv->ips.fmax)
+		dev_priv->ips.max_delay--;
 
 out_unlock:
-	spin_unlock(&mchdev_lock);
+	spin_unlock_irq(&mchdev_lock);
 
 	return ret;
 }
@@ -3043,18 +3126,18 @@ bool i915_gpu_lower(void)
 	struct drm_i915_private *dev_priv;
 	bool ret = true;
 
-	spin_lock(&mchdev_lock);
+	spin_lock_irq(&mchdev_lock);
 	if (!i915_mch_dev) {
 		ret = false;
 		goto out_unlock;
 	}
 	dev_priv = i915_mch_dev;
 
-	if (dev_priv->max_delay < dev_priv->min_delay)
-		dev_priv->max_delay++;
+	if (dev_priv->ips.max_delay < dev_priv->ips.min_delay)
+		dev_priv->ips.max_delay++;
 
 out_unlock:
-	spin_unlock(&mchdev_lock);
+	spin_unlock_irq(&mchdev_lock);
 
 	return ret;
 }
@@ -3068,17 +3151,20 @@ EXPORT_SYMBOL_GPL(i915_gpu_lower);
 bool i915_gpu_busy(void)
 {
 	struct drm_i915_private *dev_priv;
+	struct intel_ring_buffer *ring;
 	bool ret = false;
+	int i;
 
-	spin_lock(&mchdev_lock);
+	spin_lock_irq(&mchdev_lock);
 	if (!i915_mch_dev)
 		goto out_unlock;
 	dev_priv = i915_mch_dev;
 
-	ret = dev_priv->busy;
+	for_each_ring(ring, dev_priv, i)
+		ret |= !list_empty(&ring->request_list);
 
 out_unlock:
-	spin_unlock(&mchdev_lock);
+	spin_unlock_irq(&mchdev_lock);
 
 	return ret;
 }
@@ -3095,20 +3181,20 @@ bool i915_gpu_turbo_disable(void)
 	struct drm_i915_private *dev_priv;
 	bool ret = true;
 
-	spin_lock(&mchdev_lock);
+	spin_lock_irq(&mchdev_lock);
 	if (!i915_mch_dev) {
 		ret = false;
 		goto out_unlock;
 	}
 	dev_priv = i915_mch_dev;
 
-	dev_priv->max_delay = dev_priv->fstart;
+	dev_priv->ips.max_delay = dev_priv->ips.fstart;
 
-	if (!ironlake_set_drps(dev_priv->dev, dev_priv->fstart))
+	if (!ironlake_set_drps(dev_priv->dev, dev_priv->ips.fstart))
 		ret = false;
 
 out_unlock:
-	spin_unlock(&mchdev_lock);
+	spin_unlock_irq(&mchdev_lock);
 
 	return ret;
 }
@@ -3136,19 +3222,20 @@ ips_ping_for_i915_load(void)
 
 void intel_gpu_ips_init(struct drm_i915_private *dev_priv)
 {
-	spin_lock(&mchdev_lock);
+	/* We only register the i915 ips part with intel-ips once everything is
+	 * set up, to avoid intel-ips sneaking in and reading bogus values. */
+	spin_lock_irq(&mchdev_lock);
 	i915_mch_dev = dev_priv;
-	dev_priv->mchdev_lock = &mchdev_lock;
-	spin_unlock(&mchdev_lock);
+	spin_unlock_irq(&mchdev_lock);
 
 	ips_ping_for_i915_load();
 }
 
 void intel_gpu_ips_teardown(void)
 {
-	spin_lock(&mchdev_lock);
+	spin_lock_irq(&mchdev_lock);
 	i915_mch_dev = NULL;
-	spin_unlock(&mchdev_lock);
+	spin_unlock_irq(&mchdev_lock);
 }
 static void intel_init_emon(struct drm_device *dev)
 {
@@ -3218,7 +3305,7 @@ static void intel_init_emon(struct drm_device *dev)
 
 	lcfuse = I915_READ(LCFUSE02);
 
-	dev_priv->corr = (lcfuse & LCFUSE_HIV_MASK);
+	dev_priv->ips.corr = (lcfuse & LCFUSE_HIV_MASK);
 }
 
 void intel_disable_gt_powersave(struct drm_device *dev)
@@ -3731,42 +3818,6 @@ void intel_init_clock_gating(struct drm_device *dev)
 		dev_priv->display.init_pch_clock_gating(dev);
 }
 
-static void gen6_sanitize_pm(struct drm_device *dev)
-{
-	struct drm_i915_private *dev_priv = dev->dev_private;
-	u32 limits, delay, old;
-
-	gen6_gt_force_wake_get(dev_priv);
-
-	old = limits = I915_READ(GEN6_RP_INTERRUPT_LIMITS);
-	/* Make sure we continue to get interrupts
-	 * until we hit the minimum or maximum frequencies.
-	 */
-	limits &= ~(0x3f << 16 | 0x3f << 24);
-	delay = dev_priv->cur_delay;
-	if (delay < dev_priv->max_delay)
-		limits |= (dev_priv->max_delay & 0x3f) << 24;
-	if (delay > dev_priv->min_delay)
-		limits |= (dev_priv->min_delay & 0x3f) << 16;
-
-	if (old != limits) {
-		/* Note that the known failure case is to read back 0. */
-		DRM_DEBUG_DRIVER("Power management discrepancy: GEN6_RP_INTERRUPT_LIMITS "
-				 "expected %08x, was %08x\n", limits, old);
-		I915_WRITE(GEN6_RP_INTERRUPT_LIMITS, limits);
-	}
-
-	gen6_gt_force_wake_put(dev_priv);
-}
-
-void intel_sanitize_pm(struct drm_device *dev)
-{
-	struct drm_i915_private *dev_priv = dev->dev_private;
-
-	if (dev_priv->display.sanitize_pm)
-		dev_priv->display.sanitize_pm(dev);
-}
-
 /* Starting with Haswell, we have different power wells for
  * different parts of the GPU. This attempts to enable them all.
  */
@@ -3852,7 +3903,6 @@ void intel_init_pm(struct drm_device *dev)
 				dev_priv->display.update_wm = NULL;
 			}
 			dev_priv->display.init_clock_gating = gen6_init_clock_gating;
-			dev_priv->display.sanitize_pm = gen6_sanitize_pm;
 		} else if (IS_IVYBRIDGE(dev)) {
 			/* FIXME: detect B0+ stepping and use auto training */
 			if (SNB_READ_WM0_LATENCY()) {
@@ -3864,7 +3914,6 @@ void intel_init_pm(struct drm_device *dev)
 				dev_priv->display.update_wm = NULL;
 			}
 			dev_priv->display.init_clock_gating = ivybridge_init_clock_gating;
-			dev_priv->display.sanitize_pm = gen6_sanitize_pm;
 		} else if (IS_HASWELL(dev)) {
 			if (SNB_READ_WM0_LATENCY()) {
 				dev_priv->display.update_wm = sandybridge_update_wm;
@@ -3876,7 +3925,6 @@ void intel_init_pm(struct drm_device *dev)
 				dev_priv->display.update_wm = NULL;
 			}
 			dev_priv->display.init_clock_gating = haswell_init_clock_gating;
-			dev_priv->display.sanitize_pm = gen6_sanitize_pm;
 		} else
 			dev_priv->display.update_wm = NULL;
 	} else if (IS_VALLEYVIEW(dev)) {
@@ -3955,14 +4003,16 @@ static void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv)
 	else
 		forcewake_ack = FORCEWAKE_ACK;
 
-	if (wait_for_atomic_us((I915_READ_NOTRACE(forcewake_ack) & 1) == 0, 500))
-		DRM_ERROR("Force wake wait timed out\n");
+	if (wait_for_atomic((I915_READ_NOTRACE(forcewake_ack) & 1) == 0,
+			    FORCEWAKE_ACK_TIMEOUT_MS))
+		DRM_ERROR("Timed out waiting for forcewake old ack to clear.\n");
 
 	I915_WRITE_NOTRACE(FORCEWAKE, 1);
-	POSTING_READ(FORCEWAKE);
+	POSTING_READ(ECOBUS); /* something from same cacheline, but !FORCEWAKE */
 
-	if (wait_for_atomic_us((I915_READ_NOTRACE(forcewake_ack) & 1), 500))
-		DRM_ERROR("Force wake wait timed out\n");
+	if (wait_for_atomic((I915_READ_NOTRACE(forcewake_ack) & 1),
+			    FORCEWAKE_ACK_TIMEOUT_MS))
+		DRM_ERROR("Timed out waiting for forcewake to ack request.\n");
 
 	__gen6_gt_wait_for_thread_c0(dev_priv);
 }
@@ -3976,14 +4026,16 @@ static void __gen6_gt_force_wake_mt_get(struct drm_i915_private *dev_priv)
 	else
 		forcewake_ack = FORCEWAKE_MT_ACK;
 
-	if (wait_for_atomic_us((I915_READ_NOTRACE(forcewake_ack) & 1) == 0, 500))
-		DRM_ERROR("Force wake wait timed out\n");
+	if (wait_for_atomic((I915_READ_NOTRACE(forcewake_ack) & 1) == 0,
+			    FORCEWAKE_ACK_TIMEOUT_MS))
+		DRM_ERROR("Timed out waiting for forcewake old ack to clear.\n");
 
 	I915_WRITE_NOTRACE(FORCEWAKE_MT, _MASKED_BIT_ENABLE(1));
-	POSTING_READ(FORCEWAKE_MT);
+	POSTING_READ(ECOBUS); /* something from same cacheline, but !FORCEWAKE */
 
-	if (wait_for_atomic_us((I915_READ_NOTRACE(forcewake_ack) & 1), 500))
-		DRM_ERROR("Force wake wait timed out\n");
+	if (wait_for_atomic((I915_READ_NOTRACE(forcewake_ack) & 1),
+			    FORCEWAKE_ACK_TIMEOUT_MS))
+		DRM_ERROR("Timed out waiting for forcewake to ack request.\n");
 
 	__gen6_gt_wait_for_thread_c0(dev_priv);
 }
@@ -4016,14 +4068,14 @@ void gen6_gt_check_fifodbg(struct drm_i915_private *dev_priv)
 static void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv)
 {
 	I915_WRITE_NOTRACE(FORCEWAKE, 0);
-	POSTING_READ(FORCEWAKE);
+	/* gen6_gt_check_fifodbg doubles as the POSTING_READ */
 	gen6_gt_check_fifodbg(dev_priv);
 }
 
 static void __gen6_gt_force_wake_mt_put(struct drm_i915_private *dev_priv)
 {
 	I915_WRITE_NOTRACE(FORCEWAKE_MT, _MASKED_BIT_DISABLE(1));
-	POSTING_READ(FORCEWAKE_MT);
+	/* gen6_gt_check_fifodbg doubles as the POSTING_READ */
 	gen6_gt_check_fifodbg(dev_priv);
 }
 
@@ -4062,24 +4114,24 @@ int __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv)
 
 static void vlv_force_wake_get(struct drm_i915_private *dev_priv)
 {
-	/* Already awake? */
-	if ((I915_READ(0x130094) & 0xa1) == 0xa1)
-		return;
+	if (wait_for_atomic((I915_READ_NOTRACE(FORCEWAKE_ACK_VLV) & 1) == 0,
+			    FORCEWAKE_ACK_TIMEOUT_MS))
+		DRM_ERROR("Timed out waiting for forcewake old ack to clear.\n");
 
-	I915_WRITE_NOTRACE(FORCEWAKE_VLV, 0xffffffff);
-	POSTING_READ(FORCEWAKE_VLV);
+	I915_WRITE_NOTRACE(FORCEWAKE_VLV, _MASKED_BIT_ENABLE(1));
 
-	if (wait_for_atomic_us((I915_READ_NOTRACE(FORCEWAKE_ACK_VLV) & 1), 500))
-		DRM_ERROR("Force wake wait timed out\n");
+	if (wait_for_atomic((I915_READ_NOTRACE(FORCEWAKE_ACK_VLV) & 1),
+			    FORCEWAKE_ACK_TIMEOUT_MS))
+		DRM_ERROR("Timed out waiting for forcewake to ack request.\n");
 
 	__gen6_gt_wait_for_thread_c0(dev_priv);
 }
 
 static void vlv_force_wake_put(struct drm_i915_private *dev_priv)
 {
-	I915_WRITE_NOTRACE(FORCEWAKE_VLV, 0xffff0000);
-	/* FIXME: confirm VLV behavior with Punit folks */
-	POSTING_READ(FORCEWAKE_VLV);
+	I915_WRITE_NOTRACE(FORCEWAKE_VLV, _MASKED_BIT_DISABLE(1));
+	/* The below doubles as a POSTING_READ */
+	gen6_gt_check_fifodbg(dev_priv);
 }
 
 void intel_gt_init(struct drm_device *dev)
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 1aef516cc6fa..ecbc5c5dbbbc 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -261,6 +261,83 @@ gen6_render_ring_flush(struct intel_ring_buffer *ring,
 	return 0;
 }
 
+static int
+gen7_render_ring_cs_stall_wa(struct intel_ring_buffer *ring)
+{
+	int ret;
+
+	ret = intel_ring_begin(ring, 4);
+	if (ret)
+		return ret;
+
+	intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4));
+	intel_ring_emit(ring, PIPE_CONTROL_CS_STALL |
+			      PIPE_CONTROL_STALL_AT_SCOREBOARD);
+	intel_ring_emit(ring, 0);
+	intel_ring_emit(ring, 0);
+	intel_ring_advance(ring);
+
+	return 0;
+}
+
+static int
+gen7_render_ring_flush(struct intel_ring_buffer *ring,
+		       u32 invalidate_domains, u32 flush_domains)
+{
+	u32 flags = 0;
+	struct pipe_control *pc = ring->private;
+	u32 scratch_addr = pc->gtt_offset + 128;
+	int ret;
+
+	/*
+	 * Ensure that any following seqno writes only happen when the render
+	 * cache is indeed flushed.
+	 *
+	 * Workaround: 4th PIPE_CONTROL command (except the ones with only
+	 * read-cache invalidate bits set) must have the CS_STALL bit set. We
+	 * don't try to be clever and just set it unconditionally.
+	 */
+	flags |= PIPE_CONTROL_CS_STALL;
+
+	/* Just flush everything.  Experiments have shown that reducing the
+	 * number of bits based on the write domains has little performance
+	 * impact.
+	 */
+	if (flush_domains) {
+		flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
+		flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
+	}
+	if (invalidate_domains) {
+		flags |= PIPE_CONTROL_TLB_INVALIDATE;
+		flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE;
+		flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
+		flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
+		flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
+		flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
+		/*
+		 * TLB invalidate requires a post-sync write.
+		 */
+		flags |= PIPE_CONTROL_QW_WRITE;
+
+		/* Workaround: we must issue a pipe_control with CS-stall bit
+		 * set before a pipe_control command that has the state cache
+		 * invalidate bit set. */
+		gen7_render_ring_cs_stall_wa(ring);
+	}
+
+	ret = intel_ring_begin(ring, 4);
+	if (ret)
+		return ret;
+
+	intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4));
+	intel_ring_emit(ring, flags);
+	intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT);
+	intel_ring_emit(ring, 0);
+	intel_ring_advance(ring);
+
+	return 0;
+}
+
 static void ring_write_tail(struct intel_ring_buffer *ring,
 			    u32 value)
 {
@@ -381,12 +458,12 @@ init_pipe_control(struct intel_ring_buffer *ring)
 
 	i915_gem_object_set_cache_level(obj, I915_CACHE_LLC);
 
-	ret = i915_gem_object_pin(obj, 4096, true);
+	ret = i915_gem_object_pin(obj, 4096, true, false);
 	if (ret)
 		goto err_unref;
 
 	pc->gtt_offset = obj->gtt_offset;
-	pc->cpu_page =  kmap(obj->pages[0]);
+	pc->cpu_page =  kmap(sg_page(obj->pages->sgl));
 	if (pc->cpu_page == NULL)
 		goto err_unpin;
 
@@ -413,7 +490,8 @@ cleanup_pipe_control(struct intel_ring_buffer *ring)
 		return;
 
 	obj = pc->obj;
-	kunmap(obj->pages[0]);
+
+	kunmap(sg_page(obj->pages->sgl));
 	i915_gem_object_unpin(obj);
 	drm_gem_object_unreference(&obj->base);
 
@@ -461,7 +539,7 @@ static int init_render_ring(struct intel_ring_buffer *ring)
 	if (INTEL_INFO(dev)->gen >= 6)
 		I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_FORCE_ORDERING));
 
-	if (IS_IVYBRIDGE(dev))
+	if (HAS_L3_GPU_CACHE(dev))
 		I915_WRITE_IMR(ring, ~GEN6_RENDER_L3_PARITY_ERROR);
 
 	return ret;
@@ -627,26 +705,24 @@ pc_render_add_request(struct intel_ring_buffer *ring,
 }
 
 static u32
-gen6_ring_get_seqno(struct intel_ring_buffer *ring)
+gen6_ring_get_seqno(struct intel_ring_buffer *ring, bool lazy_coherency)
 {
-	struct drm_device *dev = ring->dev;
-
 	/* Workaround to force correct ordering between irq and seqno writes on
 	 * ivb (and maybe also on snb) by reading from a CS register (like
 	 * ACTHD) before reading the status page. */
-	if (IS_GEN6(dev) || IS_GEN7(dev))
+	if (!lazy_coherency)
 		intel_ring_get_active_head(ring);
 	return intel_read_status_page(ring, I915_GEM_HWS_INDEX);
 }
 
 static u32
-ring_get_seqno(struct intel_ring_buffer *ring)
+ring_get_seqno(struct intel_ring_buffer *ring, bool lazy_coherency)
 {
 	return intel_read_status_page(ring, I915_GEM_HWS_INDEX);
 }
 
 static u32
-pc_render_get_seqno(struct intel_ring_buffer *ring)
+pc_render_get_seqno(struct intel_ring_buffer *ring, bool lazy_coherency)
 {
 	struct pipe_control *pc = ring->private;
 	return pc->cpu_page[0];
@@ -851,7 +927,7 @@ gen6_ring_get_irq(struct intel_ring_buffer *ring)
 
 	spin_lock_irqsave(&dev_priv->irq_lock, flags);
 	if (ring->irq_refcount++ == 0) {
-		if (IS_IVYBRIDGE(dev) && ring->id == RCS)
+		if (HAS_L3_GPU_CACHE(dev) && ring->id == RCS)
 			I915_WRITE_IMR(ring, ~(ring->irq_enable_mask |
 						GEN6_RENDER_L3_PARITY_ERROR));
 		else
@@ -874,7 +950,7 @@ gen6_ring_put_irq(struct intel_ring_buffer *ring)
 
 	spin_lock_irqsave(&dev_priv->irq_lock, flags);
 	if (--ring->irq_refcount == 0) {
-		if (IS_IVYBRIDGE(dev) && ring->id == RCS)
+		if (HAS_L3_GPU_CACHE(dev) && ring->id == RCS)
 			I915_WRITE_IMR(ring, ~GEN6_RENDER_L3_PARITY_ERROR);
 		else
 			I915_WRITE_IMR(ring, ~0);
@@ -950,7 +1026,7 @@ static void cleanup_status_page(struct intel_ring_buffer *ring)
 	if (obj == NULL)
 		return;
 
-	kunmap(obj->pages[0]);
+	kunmap(sg_page(obj->pages->sgl));
 	i915_gem_object_unpin(obj);
 	drm_gem_object_unreference(&obj->base);
 	ring->status_page.obj = NULL;
@@ -971,13 +1047,13 @@ static int init_status_page(struct intel_ring_buffer *ring)
 
 	i915_gem_object_set_cache_level(obj, I915_CACHE_LLC);
 
-	ret = i915_gem_object_pin(obj, 4096, true);
+	ret = i915_gem_object_pin(obj, 4096, true, false);
 	if (ret != 0) {
 		goto err_unref;
 	}
 
 	ring->status_page.gfx_addr = obj->gtt_offset;
-	ring->status_page.page_addr = kmap(obj->pages[0]);
+	ring->status_page.page_addr = kmap(sg_page(obj->pages->sgl));
 	if (ring->status_page.page_addr == NULL) {
 		ret = -ENOMEM;
 		goto err_unpin;
@@ -1009,7 +1085,6 @@ static int intel_init_ring_buffer(struct drm_device *dev,
 	ring->dev = dev;
 	INIT_LIST_HEAD(&ring->active_list);
 	INIT_LIST_HEAD(&ring->request_list);
-	INIT_LIST_HEAD(&ring->gpu_write_list);
 	ring->size = 32 * PAGE_SIZE;
 
 	init_waitqueue_head(&ring->irq_queue);
@@ -1029,7 +1104,7 @@ static int intel_init_ring_buffer(struct drm_device *dev,
 
 	ring->obj = obj;
 
-	ret = i915_gem_object_pin(obj, PAGE_SIZE, true);
+	ret = i915_gem_object_pin(obj, PAGE_SIZE, true, false);
 	if (ret)
 		goto err_unref;
 
@@ -1378,7 +1453,9 @@ int intel_init_render_ring_buffer(struct drm_device *dev)
 
 	if (INTEL_INFO(dev)->gen >= 6) {
 		ring->add_request = gen6_add_request;
-		ring->flush = gen6_render_ring_flush;
+		ring->flush = gen7_render_ring_flush;
+		if (INTEL_INFO(dev)->gen == 6)
+			ring->flush = gen6_render_ring_flush;
 		ring->irq_get = gen6_ring_get_irq;
 		ring->irq_put = gen6_ring_put_irq;
 		ring->irq_enable_mask = GT_USER_INTERRUPT;
@@ -1480,7 +1557,6 @@ int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size)
 	ring->dev = dev;
 	INIT_LIST_HEAD(&ring->active_list);
 	INIT_LIST_HEAD(&ring->request_list);
-	INIT_LIST_HEAD(&ring->gpu_write_list);
 
 	ring->size = size;
 	ring->effective_size = ring->size;
@@ -1573,3 +1649,41 @@ int intel_init_blt_ring_buffer(struct drm_device *dev)
 
 	return intel_init_ring_buffer(dev, ring);
 }
+
+int
+intel_ring_flush_all_caches(struct intel_ring_buffer *ring)
+{
+	int ret;
+
+	if (!ring->gpu_caches_dirty)
+		return 0;
+
+	ret = ring->flush(ring, 0, I915_GEM_GPU_DOMAINS);
+	if (ret)
+		return ret;
+
+	trace_i915_gem_ring_flush(ring, 0, I915_GEM_GPU_DOMAINS);
+
+	ring->gpu_caches_dirty = false;
+	return 0;
+}
+
+int
+intel_ring_invalidate_all_caches(struct intel_ring_buffer *ring)
+{
+	uint32_t flush_domains;
+	int ret;
+
+	flush_domains = 0;
+	if (ring->gpu_caches_dirty)
+		flush_domains = I915_GEM_GPU_DOMAINS;
+
+	ret = ring->flush(ring, I915_GEM_GPU_DOMAINS, flush_domains);
+	if (ret)
+		return ret;
+
+	trace_i915_gem_ring_flush(ring, I915_GEM_GPU_DOMAINS, flush_domains);
+
+	ring->gpu_caches_dirty = false;
+	return 0;
+}
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index 1d3c81fdad92..2ea7a311a1f0 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -72,7 +72,14 @@ struct  intel_ring_buffer {
 				  u32	flush_domains);
 	int		(*add_request)(struct intel_ring_buffer *ring,
 				       u32 *seqno);
-	u32		(*get_seqno)(struct intel_ring_buffer *ring);
+	/* Some chipsets are not quite as coherent as advertised and need
+	 * an expensive kick to force a true read of the up-to-date seqno.
+	 * However, the up-to-date seqno is not always required and the last
+	 * seen value is good enough. Note that the seqno will always be
+	 * monotonic, even if not coherent.
+	 */
+	u32		(*get_seqno)(struct intel_ring_buffer *ring,
+				     bool lazy_coherency);
 	int		(*dispatch_execbuffer)(struct intel_ring_buffer *ring,
 					       u32 offset, u32 length);
 	void		(*cleanup)(struct intel_ring_buffer *ring);
@@ -101,15 +108,6 @@ struct  intel_ring_buffer {
 	struct list_head request_list;
 
 	/**
-	 * List of objects currently pending a GPU write flush.
-	 *
-	 * All elements on this list will belong to either the
-	 * active_list or flushing_list, last_rendering_seqno can
-	 * be used to differentiate between the two elements.
-	 */
-	struct list_head gpu_write_list;
-
-	/**
 	 * Do we have some not yet emitted requests outstanding?
 	 */
 	u32 outstanding_lazy_request;
@@ -204,6 +202,8 @@ static inline void intel_ring_emit(struct intel_ring_buffer *ring,
 void intel_ring_advance(struct intel_ring_buffer *ring);
 
 u32 intel_ring_get_seqno(struct intel_ring_buffer *ring);
+int intel_ring_flush_all_caches(struct intel_ring_buffer *ring);
+int intel_ring_invalidate_all_caches(struct intel_ring_buffer *ring);
 
 int intel_init_render_ring_buffer(struct drm_device *dev);
 int intel_init_bsd_ring_buffer(struct drm_device *dev);
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index d251d9d7a06c..0007a4d9bf6e 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -96,7 +96,7 @@ struct intel_sdvo {
 	/*
 	 * Hotplug activation bits for this device
 	 */
-	uint8_t hotplug_active[2];
+	uint16_t hotplug_active;
 
 	/**
 	 * This is used to select the color range of RBG outputs in HDMI mode.
@@ -627,6 +627,14 @@ static bool intel_sdvo_set_active_outputs(struct intel_sdvo *intel_sdvo,
 				    &outputs, sizeof(outputs));
 }
 
+static bool intel_sdvo_get_active_outputs(struct intel_sdvo *intel_sdvo,
+					  u16 *outputs)
+{
+	return intel_sdvo_get_value(intel_sdvo,
+				    SDVO_CMD_GET_ACTIVE_OUTPUTS,
+				    outputs, sizeof(*outputs));
+}
+
 static bool intel_sdvo_set_encoder_power_state(struct intel_sdvo *intel_sdvo,
 					       int mode)
 {
@@ -1141,51 +1149,132 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder,
 	intel_sdvo_write_sdvox(intel_sdvo, sdvox);
 }
 
-static void intel_sdvo_dpms(struct drm_encoder *encoder, int mode)
+static bool intel_sdvo_connector_get_hw_state(struct intel_connector *connector)
 {
-	struct drm_device *dev = encoder->dev;
+	struct intel_sdvo_connector *intel_sdvo_connector =
+		to_intel_sdvo_connector(&connector->base);
+	struct intel_sdvo *intel_sdvo = intel_attached_sdvo(&connector->base);
+	u16 active_outputs;
+
+	intel_sdvo_get_active_outputs(intel_sdvo, &active_outputs);
+
+	if (active_outputs & intel_sdvo_connector->output_flag)
+		return true;
+	else
+		return false;
+}
+
+static bool intel_sdvo_get_hw_state(struct intel_encoder *encoder,
+				    enum pipe *pipe)
+{
+	struct drm_device *dev = encoder->base.dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct intel_sdvo *intel_sdvo = to_intel_sdvo(encoder);
-	struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
+	struct intel_sdvo *intel_sdvo = to_intel_sdvo(&encoder->base);
+	u32 tmp;
+
+	tmp = I915_READ(intel_sdvo->sdvo_reg);
+
+	if (!(tmp & SDVO_ENABLE))
+		return false;
+
+	if (HAS_PCH_CPT(dev))
+		*pipe = PORT_TO_PIPE_CPT(tmp);
+	else
+		*pipe = PORT_TO_PIPE(tmp);
+
+	return true;
+}
+
+static void intel_disable_sdvo(struct intel_encoder *encoder)
+{
+	struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
+	struct intel_sdvo *intel_sdvo = to_intel_sdvo(&encoder->base);
+	u32 temp;
+
+	intel_sdvo_set_active_outputs(intel_sdvo, 0);
+	if (0)
+		intel_sdvo_set_encoder_power_state(intel_sdvo,
+						   DRM_MODE_DPMS_OFF);
+
+	temp = I915_READ(intel_sdvo->sdvo_reg);
+	if ((temp & SDVO_ENABLE) != 0) {
+		intel_sdvo_write_sdvox(intel_sdvo, temp & ~SDVO_ENABLE);
+	}
+}
+
+static void intel_enable_sdvo(struct intel_encoder *encoder)
+{
+	struct drm_device *dev = encoder->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_sdvo *intel_sdvo = to_intel_sdvo(&encoder->base);
+	struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
 	u32 temp;
+	bool input1, input2;
+	int i;
+	u8 status;
+
+	temp = I915_READ(intel_sdvo->sdvo_reg);
+	if ((temp & SDVO_ENABLE) == 0)
+		intel_sdvo_write_sdvox(intel_sdvo, temp | SDVO_ENABLE);
+	for (i = 0; i < 2; i++)
+		intel_wait_for_vblank(dev, intel_crtc->pipe);
+
+	status = intel_sdvo_get_trained_inputs(intel_sdvo, &input1, &input2);
+	/* Warn if the device reported failure to sync.
+	 * A lot of SDVO devices fail to notify of sync, but it's
+	 * a given it the status is a success, we succeeded.
+	 */
+	if (status == SDVO_CMD_STATUS_SUCCESS && !input1) {
+		DRM_DEBUG_KMS("First %s output reported failure to "
+				"sync\n", SDVO_NAME(intel_sdvo));
+	}
+
+	if (0)
+		intel_sdvo_set_encoder_power_state(intel_sdvo,
+						   DRM_MODE_DPMS_ON);
+	intel_sdvo_set_active_outputs(intel_sdvo, intel_sdvo->attached_output);
+}
+
+static void intel_sdvo_dpms(struct drm_connector *connector, int mode)
+{
+	struct drm_crtc *crtc;
+	struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector);
+
+	/* dvo supports only 2 dpms states. */
+	if (mode != DRM_MODE_DPMS_ON)
+		mode = DRM_MODE_DPMS_OFF;
+
+	if (mode == connector->dpms)
+		return;
+
+	connector->dpms = mode;
+
+	/* Only need to change hw state when actually enabled */
+	crtc = intel_sdvo->base.base.crtc;
+	if (!crtc) {
+		intel_sdvo->base.connectors_active = false;
+		return;
+	}
 
 	if (mode != DRM_MODE_DPMS_ON) {
 		intel_sdvo_set_active_outputs(intel_sdvo, 0);
 		if (0)
 			intel_sdvo_set_encoder_power_state(intel_sdvo, mode);
 
-		if (mode == DRM_MODE_DPMS_OFF) {
-			temp = I915_READ(intel_sdvo->sdvo_reg);
-			if ((temp & SDVO_ENABLE) != 0) {
-				intel_sdvo_write_sdvox(intel_sdvo, temp & ~SDVO_ENABLE);
-			}
-		}
+		intel_sdvo->base.connectors_active = false;
+
+		intel_crtc_update_dpms(crtc);
 	} else {
-		bool input1, input2;
-		int i;
-		u8 status;
-
-		temp = I915_READ(intel_sdvo->sdvo_reg);
-		if ((temp & SDVO_ENABLE) == 0)
-			intel_sdvo_write_sdvox(intel_sdvo, temp | SDVO_ENABLE);
-		for (i = 0; i < 2; i++)
-			intel_wait_for_vblank(dev, intel_crtc->pipe);
-
-		status = intel_sdvo_get_trained_inputs(intel_sdvo, &input1, &input2);
-		/* Warn if the device reported failure to sync.
-		 * A lot of SDVO devices fail to notify of sync, but it's
-		 * a given it the status is a success, we succeeded.
-		 */
-		if (status == SDVO_CMD_STATUS_SUCCESS && !input1) {
-			DRM_DEBUG_KMS("First %s output reported failure to "
-					"sync\n", SDVO_NAME(intel_sdvo));
-		}
+		intel_sdvo->base.connectors_active = true;
+
+		intel_crtc_update_dpms(crtc);
 
 		if (0)
 			intel_sdvo_set_encoder_power_state(intel_sdvo, mode);
 		intel_sdvo_set_active_outputs(intel_sdvo, intel_sdvo->attached_output);
 	}
-	return;
+
+	intel_modeset_check_state(connector->dev);
 }
 
 static int intel_sdvo_mode_valid(struct drm_connector *connector,
@@ -1250,25 +1339,29 @@ static bool intel_sdvo_get_capabilities(struct intel_sdvo *intel_sdvo, struct in
 	return true;
 }
 
-static int intel_sdvo_supports_hotplug(struct intel_sdvo *intel_sdvo)
+static uint16_t intel_sdvo_get_hotplug_support(struct intel_sdvo *intel_sdvo)
 {
 	struct drm_device *dev = intel_sdvo->base.base.dev;
-	u8 response[2];
+	uint16_t hotplug;
 
 	/* HW Erratum: SDVO Hotplug is broken on all i945G chips, there's noise
 	 * on the line. */
 	if (IS_I945G(dev) || IS_I945GM(dev))
-		return false;
+		return 0;
 
-	return intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_HOT_PLUG_SUPPORT,
-				    &response, 2) && response[0];
+	if (!intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_HOT_PLUG_SUPPORT,
+					&hotplug, sizeof(hotplug)))
+		return 0;
+
+	return hotplug;
 }
 
 static void intel_sdvo_enable_hotplug(struct intel_encoder *encoder)
 {
 	struct intel_sdvo *intel_sdvo = to_intel_sdvo(&encoder->base);
 
-	intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_SET_ACTIVE_HOT_PLUG, &intel_sdvo->hotplug_active, 2);
+	intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_SET_ACTIVE_HOT_PLUG,
+			&intel_sdvo->hotplug_active, 2);
 }
 
 static bool
@@ -1344,7 +1437,6 @@ intel_sdvo_tmds_sink_detect(struct drm_connector *connector)
 			}
 		} else
 			status = connector_status_disconnected;
-		connector->display_info.raw_edid = NULL;
 		kfree(edid);
 	}
 
@@ -1418,7 +1510,6 @@ intel_sdvo_detect(struct drm_connector *connector, bool force)
 			else
 				ret = connector_status_disconnected;
 
-			connector->display_info.raw_edid = NULL;
 			kfree(edid);
 		} else
 			ret = connector_status_connected;
@@ -1464,7 +1555,6 @@ static void intel_sdvo_get_ddc_modes(struct drm_connector *connector)
 			drm_add_edid_modes(connector, edid);
 		}
 
-		connector->display_info.raw_edid = NULL;
 		kfree(edid);
 	}
 }
@@ -1836,8 +1926,8 @@ set_value:
 done:
 	if (intel_sdvo->base.base.crtc) {
 		struct drm_crtc *crtc = intel_sdvo->base.base.crtc;
-		drm_crtc_helper_set_mode(crtc, &crtc->mode, crtc->x,
-					 crtc->y, crtc->fb);
+		intel_set_mode(crtc, &crtc->mode,
+			       crtc->x, crtc->y, crtc->fb);
 	}
 
 	return 0;
@@ -1845,15 +1935,13 @@ done:
 }
 
 static const struct drm_encoder_helper_funcs intel_sdvo_helper_funcs = {
-	.dpms = intel_sdvo_dpms,
 	.mode_fixup = intel_sdvo_mode_fixup,
-	.prepare = intel_encoder_prepare,
 	.mode_set = intel_sdvo_mode_set,
-	.commit = intel_encoder_commit,
+	.disable = intel_encoder_noop,
 };
 
 static const struct drm_connector_funcs intel_sdvo_connector_funcs = {
-	.dpms = drm_helper_connector_dpms,
+	.dpms = intel_sdvo_dpms,
 	.detect = intel_sdvo_detect,
 	.fill_modes = drm_helper_probe_single_connector_modes,
 	.set_property = intel_sdvo_set_property,
@@ -2025,6 +2113,7 @@ intel_sdvo_connector_init(struct intel_sdvo_connector *connector,
 	connector->base.base.interlace_allowed = 1;
 	connector->base.base.doublescan_allowed = 0;
 	connector->base.base.display_info.subpixel_order = SubPixelHorizontalRGB;
+	connector->base.get_hw_state = intel_sdvo_connector_get_hw_state;
 
 	intel_connector_attach_encoder(&connector->base, &encoder->base);
 	drm_sysfs_connector_add(&connector->base.base);
@@ -2063,17 +2152,18 @@ intel_sdvo_dvi_init(struct intel_sdvo *intel_sdvo, int device)
 
 	intel_connector = &intel_sdvo_connector->base;
 	connector = &intel_connector->base;
-	if (intel_sdvo_supports_hotplug(intel_sdvo) & (1 << device)) {
+	if (intel_sdvo_get_hotplug_support(intel_sdvo) &
+		intel_sdvo_connector->output_flag) {
 		connector->polled = DRM_CONNECTOR_POLL_HPD;
-		intel_sdvo->hotplug_active[0] |= 1 << device;
+		intel_sdvo->hotplug_active |= intel_sdvo_connector->output_flag;
 		/* Some SDVO devices have one-shot hotplug interrupts.
 		 * Ensure that they get re-enabled when an interrupt happens.
 		 */
 		intel_encoder->hot_plug = intel_sdvo_enable_hotplug;
 		intel_sdvo_enable_hotplug(intel_encoder);
-	}
-	else
+	} else {
 		connector->polled = DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT;
+	}
 	encoder->encoder_type = DRM_MODE_ENCODER_TMDS;
 	connector->connector_type = DRM_MODE_CONNECTOR_DVID;
 
@@ -2081,8 +2171,7 @@ intel_sdvo_dvi_init(struct intel_sdvo *intel_sdvo, int device)
 		connector->connector_type = DRM_MODE_CONNECTOR_HDMIA;
 		intel_sdvo->is_hdmi = true;
 	}
-	intel_sdvo->base.clone_mask = ((1 << INTEL_SDVO_NON_TV_CLONE_BIT) |
-				       (1 << INTEL_ANALOG_CLONE_BIT));
+	intel_sdvo->base.cloneable = true;
 
 	intel_sdvo_connector_init(intel_sdvo_connector, intel_sdvo);
 	if (intel_sdvo->is_hdmi)
@@ -2113,7 +2202,7 @@ intel_sdvo_tv_init(struct intel_sdvo *intel_sdvo, int type)
 
 	intel_sdvo->is_tv = true;
 	intel_sdvo->base.needs_tv_clock = true;
-	intel_sdvo->base.clone_mask = 1 << INTEL_SDVO_TV_CLONE_BIT;
+	intel_sdvo->base.cloneable = false;
 
 	intel_sdvo_connector_init(intel_sdvo_connector, intel_sdvo);
 
@@ -2156,8 +2245,7 @@ intel_sdvo_analog_init(struct intel_sdvo *intel_sdvo, int device)
 		intel_sdvo_connector->output_flag = SDVO_OUTPUT_RGB1;
 	}
 
-	intel_sdvo->base.clone_mask = ((1 << INTEL_SDVO_NON_TV_CLONE_BIT) |
-				       (1 << INTEL_ANALOG_CLONE_BIT));
+	intel_sdvo->base.cloneable = true;
 
 	intel_sdvo_connector_init(intel_sdvo_connector,
 				  intel_sdvo);
@@ -2189,8 +2277,10 @@ intel_sdvo_lvds_init(struct intel_sdvo *intel_sdvo, int device)
 		intel_sdvo_connector->output_flag = SDVO_OUTPUT_LVDS1;
 	}
 
-	intel_sdvo->base.clone_mask = ((1 << INTEL_ANALOG_CLONE_BIT) |
-				       (1 << INTEL_SDVO_LVDS_CLONE_BIT));
+	/* SDVO LVDS is cloneable because the SDVO encoder does the upscaling,
+	 * as opposed to native LVDS, where we upscale with the panel-fitter
+	 * (and hence only the native LVDS resolution could be cloned). */
+	intel_sdvo->base.cloneable = true;
 
 	intel_sdvo_connector_init(intel_sdvo_connector, intel_sdvo);
 	if (!intel_sdvo_create_enhance_property(intel_sdvo, intel_sdvo_connector))
@@ -2575,6 +2665,10 @@ bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg, bool is_sdvob)
 
 	drm_encoder_helper_add(&intel_encoder->base, &intel_sdvo_helper_funcs);
 
+	intel_encoder->disable = intel_disable_sdvo;
+	intel_encoder->enable = intel_enable_sdvo;
+	intel_encoder->get_hw_state = intel_sdvo_get_hw_state;
+
 	/* In default case sdvo lvds is false */
 	if (!intel_sdvo_get_capabilities(intel_sdvo, &intel_sdvo->caps))
 		goto err;
@@ -2589,7 +2683,7 @@ bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg, bool is_sdvob)
 	/* Only enable the hotplug irq if we need it, to work around noisy
 	 * hotplug lines.
 	 */
-	if (intel_sdvo->hotplug_active[0])
+	if (intel_sdvo->hotplug_active)
 		dev_priv->hotplug_supported_mask |= hotplug_mask;
 
 	intel_sdvo_select_ddc_bus(dev_priv, intel_sdvo, sdvo_reg);
diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c
index ccfb2ff4c31d..62bb048c135e 100644
--- a/drivers/gpu/drm/i915/intel_tv.c
+++ b/drivers/gpu/drm/i915/intel_tv.c
@@ -835,22 +835,37 @@ static struct intel_tv *intel_attached_tv(struct drm_connector *connector)
 			    base);
 }
 
+static bool
+intel_tv_get_hw_state(struct intel_encoder *encoder, enum pipe *pipe)
+{
+	struct drm_device *dev = encoder->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	u32 tmp = I915_READ(TV_CTL);
+
+	if (!(tmp & TV_ENC_ENABLE))
+		return false;
+
+	*pipe = PORT_TO_PIPE(tmp);
+
+	return true;
+}
+
 static void
-intel_tv_dpms(struct drm_encoder *encoder, int mode)
+intel_enable_tv(struct intel_encoder *encoder)
 {
-	struct drm_device *dev = encoder->dev;
+	struct drm_device *dev = encoder->base.dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 
-	switch (mode) {
-	case DRM_MODE_DPMS_ON:
-		I915_WRITE(TV_CTL, I915_READ(TV_CTL) | TV_ENC_ENABLE);
-		break;
-	case DRM_MODE_DPMS_STANDBY:
-	case DRM_MODE_DPMS_SUSPEND:
-	case DRM_MODE_DPMS_OFF:
-		I915_WRITE(TV_CTL, I915_READ(TV_CTL) & ~TV_ENC_ENABLE);
-		break;
-	}
+	I915_WRITE(TV_CTL, I915_READ(TV_CTL) | TV_ENC_ENABLE);
+}
+
+static void
+intel_disable_tv(struct intel_encoder *encoder)
+{
+	struct drm_device *dev = encoder->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	I915_WRITE(TV_CTL, I915_READ(TV_CTL) & ~TV_ENC_ENABLE);
 }
 
 static const struct tv_mode *
@@ -894,17 +909,14 @@ intel_tv_mode_fixup(struct drm_encoder *encoder,
 		    const struct drm_display_mode *mode,
 		    struct drm_display_mode *adjusted_mode)
 {
-	struct drm_device *dev = encoder->dev;
 	struct intel_tv *intel_tv = enc_to_intel_tv(encoder);
 	const struct tv_mode *tv_mode = intel_tv_mode_find(intel_tv);
-	struct intel_encoder *other_encoder;
 
 	if (!tv_mode)
 		return false;
 
-	for_each_encoder_on_crtc(dev, encoder->crtc, other_encoder)
-		if (&other_encoder->base != encoder)
-			return false;
+	if (intel_encoder_check_is_cloned(&intel_tv->base))
+		return false;
 
 	adjusted_mode->clock = tv_mode->clock;
 	return true;
@@ -1302,12 +1314,9 @@ intel_tv_detect(struct drm_connector *connector, bool force)
 	if (force) {
 		struct intel_load_detect_pipe tmp;
 
-		if (intel_get_load_detect_pipe(&intel_tv->base, connector,
-					       &mode, &tmp)) {
+		if (intel_get_load_detect_pipe(connector, &mode, &tmp)) {
 			type = intel_tv_detect_type(intel_tv, connector);
-			intel_release_load_detect_pipe(&intel_tv->base,
-						       connector,
-						       &tmp);
+			intel_release_load_detect_pipe(connector, &tmp);
 		} else
 			return connector_status_unknown;
 	} else
@@ -1473,22 +1482,20 @@ intel_tv_set_property(struct drm_connector *connector, struct drm_property *prop
 	}
 
 	if (changed && crtc)
-		drm_crtc_helper_set_mode(crtc, &crtc->mode, crtc->x,
-				crtc->y, crtc->fb);
+		intel_set_mode(crtc, &crtc->mode,
+			       crtc->x, crtc->y, crtc->fb);
 out:
 	return ret;
 }
 
 static const struct drm_encoder_helper_funcs intel_tv_helper_funcs = {
-	.dpms = intel_tv_dpms,
 	.mode_fixup = intel_tv_mode_fixup,
-	.prepare = intel_encoder_prepare,
 	.mode_set = intel_tv_mode_set,
-	.commit = intel_encoder_commit,
+	.disable = intel_encoder_noop,
 };
 
 static const struct drm_connector_funcs intel_tv_connector_funcs = {
-	.dpms = drm_helper_connector_dpms,
+	.dpms = intel_connector_dpms,
 	.detect = intel_tv_detect,
 	.destroy = intel_tv_destroy,
 	.set_property = intel_tv_set_property,
@@ -1618,10 +1625,15 @@ intel_tv_init(struct drm_device *dev)
 	drm_encoder_init(dev, &intel_encoder->base, &intel_tv_enc_funcs,
 			 DRM_MODE_ENCODER_TVDAC);
 
+	intel_encoder->enable = intel_enable_tv;
+	intel_encoder->disable = intel_disable_tv;
+	intel_encoder->get_hw_state = intel_tv_get_hw_state;
+	intel_connector->get_hw_state = intel_connector_get_hw_state;
+
 	intel_connector_attach_encoder(intel_connector, intel_encoder);
 	intel_encoder->type = INTEL_OUTPUT_TVOUT;
 	intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
-	intel_encoder->clone_mask = (1 << INTEL_TV_CLONE_BIT);
+	intel_encoder->cloneable = false;
 	intel_encoder->base.possible_crtcs = ((1 << 0) | (1 << 1));
 	intel_encoder->base.possible_clones = (1 << INTEL_OUTPUT_TVOUT);
 	intel_tv->type = DRM_MODE_CONNECTOR_Unknown;
diff --git a/drivers/gpu/drm/mgag200/mgag200_drv.h b/drivers/gpu/drm/mgag200/mgag200_drv.h
index 73868d0c25ae..5ea5033eae0a 100644
--- a/drivers/gpu/drm/mgag200/mgag200_drv.h
+++ b/drivers/gpu/drm/mgag200/mgag200_drv.h
@@ -195,7 +195,6 @@ struct mga_device {
 		struct drm_global_reference mem_global_ref;
 		struct ttm_bo_global_ref bo_global_ref;
 		struct ttm_bo_device bdev;
-		atomic_t validate_sequence;
 	} ttm;
 
 	u32 reg_1e24; /* SE model number */
diff --git a/drivers/gpu/drm/mgag200/mgag200_mode.c b/drivers/gpu/drm/mgag200/mgag200_mode.c
index 3d429de0771a..d3d99a28ddef 100644
--- a/drivers/gpu/drm/mgag200/mgag200_mode.c
+++ b/drivers/gpu/drm/mgag200/mgag200_mode.c
@@ -1398,7 +1398,6 @@ static int mga_vga_get_modes(struct drm_connector *connector)
 	if (edid) {
 		drm_mode_connector_update_edid_property(connector, edid);
 		ret = drm_add_edid_modes(connector, edid);
-		connector->display_info.raw_edid = NULL;
 		kfree(edid);
 	}
 	return ret;
diff --git a/drivers/gpu/drm/nouveau/Kconfig b/drivers/gpu/drm/nouveau/Kconfig
index 97a81260485a..8a55beeb8bdc 100644
--- a/drivers/gpu/drm/nouveau/Kconfig
+++ b/drivers/gpu/drm/nouveau/Kconfig
@@ -17,6 +17,34 @@ config DRM_NOUVEAU
 	help
 	  Choose this option for open-source nVidia support.
 
+config NOUVEAU_DEBUG
+	int "Maximum debug level"
+	depends on DRM_NOUVEAU
+	range 0 7
+	default 5
+	help
+	  Selects the maximum debug level to compile support for.
+
+	  0 - fatal
+	  1 - error
+	  2 - warning
+	  3 - info
+	  4 - debug
+	  5 - trace (recommended)
+	  6 - paranoia
+	  7 - spam
+
+	  The paranoia and spam levels will add a lot of extra checks which
+	  may potentially slow down driver operation.
+
+config NOUVEAU_DEBUG_DEFAULT
+	int "Default debug level"
+	depends on DRM_NOUVEAU
+	range 0 7
+	default 3
+	help
+	  Selects the default debug level
+
 config DRM_NOUVEAU_BACKLIGHT
 	bool "Support for backlight control"
 	depends on DRM_NOUVEAU
@@ -25,14 +53,6 @@ config DRM_NOUVEAU_BACKLIGHT
 	  Say Y here if you want to control the backlight of your display
 	  (e.g. a laptop panel).
 
-config DRM_NOUVEAU_DEBUG
-	bool "Build in Nouveau's debugfs support"
-	depends on DRM_NOUVEAU && DEBUG_FS
-	default y
-	help
-	  Say Y here if you want Nouveau to output debugging information
-	  via debugfs.
-
 menu "I2C encoder or helper chips"
      depends on DRM && DRM_KMS_HELPER && I2C
 
diff --git a/drivers/gpu/drm/nouveau/Makefile b/drivers/gpu/drm/nouveau/Makefile
index 1cece6a78f39..a990df4d6c04 100644
--- a/drivers/gpu/drm/nouveau/Makefile
+++ b/drivers/gpu/drm/nouveau/Makefile
@@ -3,49 +3,190 @@
 # Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
 
 ccflags-y := -Iinclude/drm
-nouveau-y := nouveau_drv.o nouveau_state.o nouveau_channel.o nouveau_mem.o \
-             nouveau_gpuobj.o nouveau_irq.o nouveau_notifier.o \
-             nouveau_sgdma.o nouveau_dma.o nouveau_util.o \
-             nouveau_bo.o nouveau_fence.o nouveau_gem.o nouveau_ttm.o \
-             nouveau_hw.o nouveau_calc.o nouveau_bios.o nouveau_i2c.o \
-             nouveau_display.o nouveau_connector.o nouveau_fbcon.o \
-             nouveau_hdmi.o nouveau_dp.o nouveau_ramht.o \
-	     nouveau_pm.o nouveau_volt.o nouveau_perf.o nouveau_temp.o \
-	     nouveau_mm.o nouveau_vm.o nouveau_mxm.o nouveau_gpio.o \
-	     nouveau_abi16.o \
-             nv04_timer.o \
-             nv04_mc.o nv40_mc.o nv50_mc.o \
-             nv04_fb.o nv10_fb.o nv20_fb.o nv30_fb.o nv40_fb.o \
-             nv50_fb.o nvc0_fb.o \
-             nv04_fifo.o nv10_fifo.o nv17_fifo.o nv40_fifo.o nv50_fifo.o \
-             nv84_fifo.o nvc0_fifo.o nve0_fifo.o \
-             nv04_fence.o nv10_fence.o nv84_fence.o nvc0_fence.o \
-             nv04_software.o nv50_software.o nvc0_software.o \
-             nv04_graph.o nv10_graph.o nv20_graph.o \
-             nv40_graph.o nv50_graph.o nvc0_graph.o nve0_graph.o \
-             nv40_grctx.o nv50_grctx.o nvc0_grctx.o nve0_grctx.o \
-             nv84_crypt.o nv98_crypt.o \
-             nva3_copy.o nvc0_copy.o \
-             nv31_mpeg.o nv50_mpeg.o \
-             nv84_bsp.o \
-             nv84_vp.o \
-             nv98_ppp.o \
-             nv04_instmem.o nv50_instmem.o nvc0_instmem.o \
-             nv04_dac.o nv04_dfp.o nv04_tv.o nv17_tv.o nv17_tv_modes.o \
-             nv04_crtc.o nv04_display.o nv04_cursor.o \
-             nv50_evo.o nv50_crtc.o nv50_dac.o nv50_sor.o \
-             nv50_cursor.o nv50_display.o \
-             nvd0_display.o \
-             nv04_fbcon.o nv50_fbcon.o nvc0_fbcon.o \
-             nv10_gpio.o nv50_gpio.o \
-	     nv50_calc.o \
-	     nv04_pm.o nv40_pm.o nv50_pm.o nva3_pm.o nvc0_pm.o \
-	     nv50_vram.o nvc0_vram.o \
-	     nv50_vm.o nvc0_vm.o nouveau_prime.o
-
-nouveau-$(CONFIG_DRM_NOUVEAU_DEBUG) += nouveau_debugfs.o
+ccflags-y += -I$(src)/core/include
+ccflags-y += -I$(src)/core
+ccflags-y += -I$(src)
+
+nouveau-y := core/core/client.o
+nouveau-y += core/core/engctx.o
+nouveau-y += core/core/engine.o
+nouveau-y += core/core/enum.o
+nouveau-y += core/core/gpuobj.o
+nouveau-y += core/core/handle.o
+nouveau-y += core/core/mm.o
+nouveau-y += core/core/namedb.o
+nouveau-y += core/core/object.o
+nouveau-y += core/core/option.o
+nouveau-y += core/core/parent.o
+nouveau-y += core/core/printk.o
+nouveau-y += core/core/ramht.o
+nouveau-y += core/core/subdev.o
+
+nouveau-y += core/subdev/bar/base.o
+nouveau-y += core/subdev/bar/nv50.o
+nouveau-y += core/subdev/bar/nvc0.o
+nouveau-y += core/subdev/bios/base.o
+nouveau-y += core/subdev/bios/bit.o
+nouveau-y += core/subdev/bios/conn.o
+nouveau-y += core/subdev/bios/dcb.o
+nouveau-y += core/subdev/bios/dp.o
+nouveau-y += core/subdev/bios/extdev.o
+nouveau-y += core/subdev/bios/gpio.o
+nouveau-y += core/subdev/bios/i2c.o
+nouveau-y += core/subdev/bios/init.o
+nouveau-y += core/subdev/bios/mxm.o
+nouveau-y += core/subdev/bios/perf.o
+nouveau-y += core/subdev/bios/pll.o
+nouveau-y += core/subdev/bios/therm.o
+nouveau-y += core/subdev/clock/nv04.o
+nouveau-y += core/subdev/clock/nv40.o
+nouveau-y += core/subdev/clock/nv50.o
+nouveau-y += core/subdev/clock/nva3.o
+nouveau-y += core/subdev/clock/nvc0.o
+nouveau-y += core/subdev/clock/pllnv04.o
+nouveau-y += core/subdev/clock/pllnva3.o
+nouveau-y += core/subdev/device/base.o
+nouveau-y += core/subdev/device/nv04.o
+nouveau-y += core/subdev/device/nv10.o
+nouveau-y += core/subdev/device/nv20.o
+nouveau-y += core/subdev/device/nv30.o
+nouveau-y += core/subdev/device/nv40.o
+nouveau-y += core/subdev/device/nv50.o
+nouveau-y += core/subdev/device/nvc0.o
+nouveau-y += core/subdev/device/nve0.o
+nouveau-y += core/subdev/devinit/base.o
+nouveau-y += core/subdev/devinit/nv04.o
+nouveau-y += core/subdev/devinit/nv05.o
+nouveau-y += core/subdev/devinit/nv10.o
+nouveau-y += core/subdev/devinit/nv1a.o
+nouveau-y += core/subdev/devinit/nv20.o
+nouveau-y += core/subdev/devinit/nv50.o
+nouveau-y += core/subdev/fb/base.o
+nouveau-y += core/subdev/fb/nv04.o
+nouveau-y += core/subdev/fb/nv10.o
+nouveau-y += core/subdev/fb/nv20.o
+nouveau-y += core/subdev/fb/nv30.o
+nouveau-y += core/subdev/fb/nv40.o
+nouveau-y += core/subdev/fb/nv50.o
+nouveau-y += core/subdev/fb/nvc0.o
+nouveau-y += core/subdev/gpio/base.o
+nouveau-y += core/subdev/gpio/nv10.o
+nouveau-y += core/subdev/gpio/nv50.o
+nouveau-y += core/subdev/gpio/nvd0.o
+nouveau-y += core/subdev/i2c/base.o
+nouveau-y += core/subdev/i2c/aux.o
+nouveau-y += core/subdev/i2c/bit.o
+nouveau-y += core/subdev/ibus/nvc0.o
+nouveau-y += core/subdev/ibus/nve0.o
+nouveau-y += core/subdev/instmem/base.o
+nouveau-y += core/subdev/instmem/nv04.o
+nouveau-y += core/subdev/instmem/nv40.o
+nouveau-y += core/subdev/instmem/nv50.o
+nouveau-y += core/subdev/ltcg/nvc0.o
+nouveau-y += core/subdev/mc/base.o
+nouveau-y += core/subdev/mc/nv04.o
+nouveau-y += core/subdev/mc/nv44.o
+nouveau-y += core/subdev/mc/nv50.o
+nouveau-y += core/subdev/mc/nv98.o
+nouveau-y += core/subdev/mc/nvc0.o
+nouveau-y += core/subdev/mxm/base.o
+nouveau-y += core/subdev/mxm/mxms.o
+nouveau-y += core/subdev/mxm/nv50.o
+nouveau-y += core/subdev/therm/base.o
+nouveau-y += core/subdev/therm/fan.o
+nouveau-y += core/subdev/therm/ic.o
+nouveau-y += core/subdev/therm/nv40.o
+nouveau-y += core/subdev/therm/nv50.o
+nouveau-y += core/subdev/therm/temp.o
+nouveau-y += core/subdev/timer/base.o
+nouveau-y += core/subdev/timer/nv04.o
+nouveau-y += core/subdev/vm/base.o
+nouveau-y += core/subdev/vm/nv04.o
+nouveau-y += core/subdev/vm/nv41.o
+nouveau-y += core/subdev/vm/nv44.o
+nouveau-y += core/subdev/vm/nv50.o
+nouveau-y += core/subdev/vm/nvc0.o
+
+nouveau-y += core/engine/dmaobj/base.o
+nouveau-y += core/engine/dmaobj/nv04.o
+nouveau-y += core/engine/dmaobj/nv50.o
+nouveau-y += core/engine/dmaobj/nvc0.o
+nouveau-y += core/engine/bsp/nv84.o
+nouveau-y += core/engine/copy/nva3.o
+nouveau-y += core/engine/copy/nvc0.o
+nouveau-y += core/engine/copy/nve0.o
+nouveau-y += core/engine/crypt/nv84.o
+nouveau-y += core/engine/crypt/nv98.o
+nouveau-y += core/engine/disp/nv04.o
+nouveau-y += core/engine/disp/nv50.o
+nouveau-y += core/engine/disp/nvd0.o
+nouveau-y += core/engine/disp/vga.o
+nouveau-y += core/engine/fifo/base.o
+nouveau-y += core/engine/fifo/nv04.o
+nouveau-y += core/engine/fifo/nv10.o
+nouveau-y += core/engine/fifo/nv17.o
+nouveau-y += core/engine/fifo/nv40.o
+nouveau-y += core/engine/fifo/nv50.o
+nouveau-y += core/engine/fifo/nv84.o
+nouveau-y += core/engine/fifo/nvc0.o
+nouveau-y += core/engine/fifo/nve0.o
+nouveau-y += core/engine/graph/ctxnv40.o
+nouveau-y += core/engine/graph/ctxnv50.o
+nouveau-y += core/engine/graph/ctxnvc0.o
+nouveau-y += core/engine/graph/ctxnve0.o
+nouveau-y += core/engine/graph/nv04.o
+nouveau-y += core/engine/graph/nv10.o
+nouveau-y += core/engine/graph/nv20.o
+nouveau-y += core/engine/graph/nv25.o
+nouveau-y += core/engine/graph/nv2a.o
+nouveau-y += core/engine/graph/nv30.o
+nouveau-y += core/engine/graph/nv34.o
+nouveau-y += core/engine/graph/nv35.o
+nouveau-y += core/engine/graph/nv40.o
+nouveau-y += core/engine/graph/nv50.o
+nouveau-y += core/engine/graph/nvc0.o
+nouveau-y += core/engine/graph/nve0.o
+nouveau-y += core/engine/mpeg/nv31.o
+nouveau-y += core/engine/mpeg/nv40.o
+nouveau-y += core/engine/mpeg/nv50.o
+nouveau-y += core/engine/mpeg/nv84.o
+nouveau-y += core/engine/ppp/nv98.o
+nouveau-y += core/engine/software/nv04.o
+nouveau-y += core/engine/software/nv10.o
+nouveau-y += core/engine/software/nv50.o
+nouveau-y += core/engine/software/nvc0.o
+nouveau-y += core/engine/vp/nv84.o
+
+# drm/core
+nouveau-y += nouveau_drm.o nouveau_chan.o nouveau_dma.o nouveau_fence.o
+nouveau-y += nouveau_irq.o nouveau_vga.o nouveau_agp.o
+nouveau-y += nouveau_ttm.o nouveau_sgdma.o nouveau_bo.o nouveau_gem.o
+nouveau-y += nouveau_prime.o nouveau_abi16.o
+nouveau-y += nv04_fence.o nv10_fence.o nv50_fence.o nv84_fence.o nvc0_fence.o
+
+# drm/kms
+nouveau-y += nouveau_bios.o nouveau_fbcon.o nouveau_display.o
+nouveau-y += nouveau_connector.o nouveau_hdmi.o nouveau_dp.o
+nouveau-y += nv04_fbcon.o nv50_fbcon.o nvc0_fbcon.o
+
+# drm/kms/nv04:nv50
+nouveau-y += nouveau_hw.o nouveau_calc.o
+nouveau-y += nv04_dac.o nv04_dfp.o nv04_tv.o nv17_tv.o nv17_tv_modes.o
+nouveau-y += nv04_crtc.o nv04_display.o nv04_cursor.o
+
+# drm/kms/nv50-
+nouveau-y += nv50_display.o nvd0_display.o
+nouveau-y += nv50_crtc.o nv50_dac.o nv50_sor.o nv50_cursor.o
+nouveau-y += nv50_evo.o
+
+# drm/pm
+nouveau-y += nouveau_pm.o nouveau_volt.o nouveau_perf.o
+nouveau-y += nv04_pm.o nv40_pm.o nv50_pm.o nva3_pm.o nvc0_pm.o
+nouveau-y += nouveau_mem.o
+
+# other random bits
 nouveau-$(CONFIG_COMPAT) += nouveau_ioc32.o
-nouveau-$(CONFIG_DRM_NOUVEAU_BACKLIGHT) += nouveau_backlight.o
 nouveau-$(CONFIG_ACPI) += nouveau_acpi.o
+nouveau-$(CONFIG_DRM_NOUVEAU_BACKLIGHT) += nouveau_backlight.o
 
 obj-$(CONFIG_DRM_NOUVEAU)+= nouveau.o
diff --git a/drivers/gpu/drm/nouveau/core/core/client.c b/drivers/gpu/drm/nouveau/core/core/client.c
new file mode 100644
index 000000000000..c617f0480071
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/core/client.c
@@ -0,0 +1,103 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <core/object.h>
+#include <core/client.h>
+#include <core/handle.h>
+#include <core/option.h>
+
+#include <subdev/device.h>
+
+static void
+nouveau_client_dtor(struct nouveau_object *object)
+{
+	struct nouveau_client *client = (void *)object;
+	nouveau_object_ref(NULL, &client->device);
+	nouveau_handle_destroy(client->root);
+	nouveau_namedb_destroy(&client->base);
+}
+
+static struct nouveau_oclass
+nouveau_client_oclass = {
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.dtor = nouveau_client_dtor,
+	},
+};
+
+int
+nouveau_client_create_(const char *name, u64 devname, const char *cfg,
+		       const char *dbg, int length, void **pobject)
+{
+	struct nouveau_object *device;
+	struct nouveau_client *client;
+	int ret;
+
+	device = (void *)nouveau_device_find(devname);
+	if (!device)
+		return -ENODEV;
+
+	ret = nouveau_namedb_create_(NULL, NULL, &nouveau_client_oclass,
+				     NV_CLIENT_CLASS, nouveau_device_sclass,
+				     0, length, pobject);
+	client = *pobject;
+	if (ret)
+		return ret;
+
+	ret = nouveau_handle_create(nv_object(client), ~0, ~0,
+				    nv_object(client), &client->root);
+	if (ret) {
+		nouveau_namedb_destroy(&client->base);
+		return ret;
+	}
+
+	/* prevent init/fini being called, os in in charge of this */
+	atomic_set(&nv_object(client)->usecount, 2);
+
+	nouveau_object_ref(device, &client->device);
+	snprintf(client->name, sizeof(client->name), "%s", name);
+	client->debug = nouveau_dbgopt(dbg, "CLIENT");
+	return 0;
+}
+
+int
+nouveau_client_init(struct nouveau_client *client)
+{
+	int ret;
+	nv_debug(client, "init running\n");
+	ret = nouveau_handle_init(client->root);
+	nv_debug(client, "init completed with %d\n", ret);
+	return ret;
+}
+
+int
+nouveau_client_fini(struct nouveau_client *client, bool suspend)
+{
+	const char *name[2] = { "fini", "suspend" };
+	int ret;
+
+	nv_debug(client, "%s running\n", name[suspend]);
+	ret = nouveau_handle_fini(client->root, suspend);
+	nv_debug(client, "%s completed with %d\n", name[suspend], ret);
+	return ret;
+}
diff --git a/drivers/gpu/drm/nouveau/core/core/engctx.c b/drivers/gpu/drm/nouveau/core/core/engctx.c
new file mode 100644
index 000000000000..e41b10d5eb59
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/core/engctx.c
@@ -0,0 +1,236 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <core/object.h>
+#include <core/namedb.h>
+#include <core/handle.h>
+#include <core/client.h>
+#include <core/engctx.h>
+
+#include <subdev/vm.h>
+
+static inline int
+nouveau_engctx_exists(struct nouveau_object *parent,
+		      struct nouveau_engine *engine, void **pobject)
+{
+	struct nouveau_engctx *engctx;
+	struct nouveau_object *parctx;
+
+	list_for_each_entry(engctx, &engine->contexts, head) {
+		parctx = nv_pclass(nv_object(engctx), NV_PARENT_CLASS);
+		if (parctx == parent) {
+			atomic_inc(&nv_object(engctx)->refcount);
+			*pobject = engctx;
+			return 1;
+		}
+	}
+
+	return 0;
+}
+
+int
+nouveau_engctx_create_(struct nouveau_object *parent,
+		       struct nouveau_object *engobj,
+		       struct nouveau_oclass *oclass,
+		       struct nouveau_object *pargpu,
+		       u32 size, u32 align, u32 flags,
+		       int length, void **pobject)
+{
+	struct nouveau_client *client = nouveau_client(parent);
+	struct nouveau_engine *engine = nv_engine(engobj);
+	struct nouveau_object *engctx;
+	unsigned long save;
+	int ret;
+
+	/* check if this engine already has a context for the parent object,
+	 * and reference it instead of creating a new one
+	 */
+	spin_lock_irqsave(&engine->lock, save);
+	ret = nouveau_engctx_exists(parent, engine, pobject);
+	spin_unlock_irqrestore(&engine->lock, save);
+	if (ret)
+		return ret;
+
+	/* create the new context, supports creating both raw objects and
+	 * objects backed by instance memory
+	 */
+	if (size) {
+		ret = nouveau_gpuobj_create_(parent, engobj, oclass,
+					     NV_ENGCTX_CLASS,
+					     pargpu, size, align, flags,
+					     length, pobject);
+	} else {
+		ret = nouveau_object_create_(parent, engobj, oclass,
+					     NV_ENGCTX_CLASS, length, pobject);
+	}
+
+	engctx = *pobject;
+	if (ret)
+		return ret;
+
+	/* must take the lock again and re-check a context doesn't already
+	 * exist (in case of a race) - the lock had to be dropped before as
+	 * it's not possible to allocate the object with it held.
+	 */
+	spin_lock_irqsave(&engine->lock, save);
+	ret = nouveau_engctx_exists(parent, engine, pobject);
+	if (ret) {
+		spin_unlock_irqrestore(&engine->lock, save);
+		nouveau_object_ref(NULL, &engctx);
+		return ret;
+	}
+
+	if (client->vm)
+		atomic_inc(&client->vm->engref[nv_engidx(engobj)]);
+	list_add(&nv_engctx(engctx)->head, &engine->contexts);
+	nv_engctx(engctx)->addr = ~0ULL;
+	spin_unlock_irqrestore(&engine->lock, save);
+	return 0;
+}
+
+void
+nouveau_engctx_destroy(struct nouveau_engctx *engctx)
+{
+	struct nouveau_object *engobj = nv_object(engctx)->engine;
+	struct nouveau_engine *engine = nv_engine(engobj);
+	struct nouveau_client *client = nouveau_client(engctx);
+	unsigned long save;
+
+	nouveau_gpuobj_unmap(&engctx->vma);
+	spin_lock_irqsave(&engine->lock, save);
+	list_del(&engctx->head);
+	spin_unlock_irqrestore(&engine->lock, save);
+
+	if (client->vm)
+		atomic_dec(&client->vm->engref[nv_engidx(engobj)]);
+
+	if (engctx->base.size)
+		nouveau_gpuobj_destroy(&engctx->base);
+	else
+		nouveau_object_destroy(&engctx->base.base);
+}
+
+int
+nouveau_engctx_init(struct nouveau_engctx *engctx)
+{
+	struct nouveau_object *object = nv_object(engctx);
+	struct nouveau_subdev *subdev = nv_subdev(object->engine);
+	struct nouveau_object *parent;
+	struct nouveau_subdev *pardev;
+	int ret;
+
+	ret = nouveau_gpuobj_init(&engctx->base);
+	if (ret)
+		return ret;
+
+	parent = nv_pclass(object->parent, NV_PARENT_CLASS);
+	pardev = nv_subdev(parent->engine);
+	if (nv_parent(parent)->context_attach) {
+		mutex_lock(&pardev->mutex);
+		ret = nv_parent(parent)->context_attach(parent, object);
+		mutex_unlock(&pardev->mutex);
+	}
+
+	if (ret) {
+		nv_error(parent, "failed to attach %s context, %d\n",
+			 subdev->name, ret);
+		return ret;
+	}
+
+	nv_debug(parent, "attached %s context\n", subdev->name);
+	return 0;
+}
+
+int
+nouveau_engctx_fini(struct nouveau_engctx *engctx, bool suspend)
+{
+	struct nouveau_object *object = nv_object(engctx);
+	struct nouveau_subdev *subdev = nv_subdev(object->engine);
+	struct nouveau_object *parent;
+	struct nouveau_subdev *pardev;
+	int ret = 0;
+
+	parent = nv_pclass(object->parent, NV_PARENT_CLASS);
+	pardev = nv_subdev(parent->engine);
+	if (nv_parent(parent)->context_detach) {
+		mutex_lock(&pardev->mutex);
+		ret = nv_parent(parent)->context_detach(parent, suspend, object);
+		mutex_unlock(&pardev->mutex);
+	}
+
+	if (ret) {
+		nv_error(parent, "failed to detach %s context, %d\n",
+			 subdev->name, ret);
+		return ret;
+	}
+
+	nv_debug(parent, "detached %s context\n", subdev->name);
+	return nouveau_gpuobj_fini(&engctx->base, suspend);
+}
+
+void
+_nouveau_engctx_dtor(struct nouveau_object *object)
+{
+	nouveau_engctx_destroy(nv_engctx(object));
+}
+
+int
+_nouveau_engctx_init(struct nouveau_object *object)
+{
+	return nouveau_engctx_init(nv_engctx(object));
+}
+
+
+int
+_nouveau_engctx_fini(struct nouveau_object *object, bool suspend)
+{
+	return nouveau_engctx_fini(nv_engctx(object), suspend);
+}
+
+struct nouveau_object *
+nouveau_engctx_get(struct nouveau_engine *engine, u64 addr)
+{
+	struct nouveau_engctx *engctx;
+	unsigned long flags;
+
+	spin_lock_irqsave(&engine->lock, flags);
+	list_for_each_entry(engctx, &engine->contexts, head) {
+		if (engctx->addr == addr) {
+			engctx->save = flags;
+			return nv_object(engctx);
+		}
+	}
+	spin_unlock_irqrestore(&engine->lock, flags);
+	return NULL;
+}
+
+void
+nouveau_engctx_put(struct nouveau_object *object)
+{
+	if (object) {
+		struct nouveau_engine *engine = nv_engine(object->engine);
+		struct nouveau_engctx *engctx = nv_engctx(object);
+		spin_unlock_irqrestore(&engine->lock, engctx->save);
+	}
+}
diff --git a/drivers/gpu/drm/nouveau/core/core/engine.c b/drivers/gpu/drm/nouveau/core/core/engine.c
new file mode 100644
index 000000000000..09b3bd502fd0
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/core/engine.c
@@ -0,0 +1,55 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <core/device.h>
+#include <core/engine.h>
+#include <core/option.h>
+
+int
+nouveau_engine_create_(struct nouveau_object *parent,
+		       struct nouveau_object *engobj,
+		       struct nouveau_oclass *oclass, bool enable,
+		       const char *iname, const char *fname,
+		       int length, void **pobject)
+{
+	struct nouveau_device *device = nv_device(parent);
+	struct nouveau_engine *engine;
+	int ret;
+
+	ret = nouveau_subdev_create_(parent, engobj, oclass, NV_ENGINE_CLASS,
+				     iname, fname, length, pobject);
+	engine = *pobject;
+	if (ret)
+		return ret;
+
+	if (!nouveau_boolopt(device->cfgopt, iname, enable)) {
+		if (!enable)
+			nv_warn(engine, "disabled, %s=1 to enable\n", iname);
+		return -ENODEV;
+	}
+
+	INIT_LIST_HEAD(&engine->contexts);
+	spin_lock_init(&engine->lock);
+	return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nouveau_util.c b/drivers/gpu/drm/nouveau/core/core/enum.c
index e51b51503baa..7cc7133d82de 100644
--- a/drivers/gpu/drm/nouveau/nouveau_util.c
+++ b/drivers/gpu/drm/nouveau/core/core/enum.c
@@ -25,27 +25,8 @@
  *
  */
 
-#include <linux/ratelimit.h>
-
-#include "nouveau_util.h"
-
-static DEFINE_RATELIMIT_STATE(nouveau_ratelimit_state, 3 * HZ, 20);
-
-void
-nouveau_bitfield_print(const struct nouveau_bitfield *bf, u32 value)
-{
-	while (bf->name) {
-		if (value & bf->mask) {
-			printk(" %s", bf->name);
-			value &= ~bf->mask;
-		}
-
-		bf++;
-	}
-
-	if (value)
-		printk(" (unknown bits 0x%08x)", value);
-}
+#include <core/os.h>
+#include <core/enum.h>
 
 const struct nouveau_enum *
 nouveau_enum_find(const struct nouveau_enum *en, u32 value)
@@ -63,16 +44,24 @@ void
 nouveau_enum_print(const struct nouveau_enum *en, u32 value)
 {
 	en = nouveau_enum_find(en, value);
-	if (en) {
+	if (en)
 		printk("%s", en->name);
-		return;
-	}
-
-	printk("(unknown enum 0x%08x)", value);
+	else
+		printk("(unknown enum 0x%08x)", value);
 }
 
-int
-nouveau_ratelimit(void)
+void
+nouveau_bitfield_print(const struct nouveau_bitfield *bf, u32 value)
 {
-	return __ratelimit(&nouveau_ratelimit_state);
+	while (bf->name) {
+		if (value & bf->mask) {
+			printk(" %s", bf->name);
+			value &= ~bf->mask;
+		}
+
+		bf++;
+	}
+
+	if (value)
+		printk(" (unknown bits 0x%08x)", value);
 }
diff --git a/drivers/gpu/drm/nouveau/core/core/gpuobj.c b/drivers/gpu/drm/nouveau/core/core/gpuobj.c
new file mode 100644
index 000000000000..1f34549aff18
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/core/gpuobj.c
@@ -0,0 +1,318 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <core/object.h>
+#include <core/gpuobj.h>
+
+#include <subdev/instmem.h>
+#include <subdev/bar.h>
+#include <subdev/vm.h>
+
+void
+nouveau_gpuobj_destroy(struct nouveau_gpuobj *gpuobj)
+{
+	int i;
+
+	if (gpuobj->flags & NVOBJ_FLAG_ZERO_FREE) {
+		for (i = 0; i < gpuobj->size; i += 4)
+			nv_wo32(gpuobj, i, 0x00000000);
+	}
+
+	if (gpuobj->heap.block_size)
+		nouveau_mm_fini(&gpuobj->heap);
+
+	nouveau_object_destroy(&gpuobj->base);
+}
+
+int
+nouveau_gpuobj_create_(struct nouveau_object *parent,
+		       struct nouveau_object *engine,
+		       struct nouveau_oclass *oclass, u32 pclass,
+		       struct nouveau_object *pargpu,
+		       u32 size, u32 align, u32 flags,
+		       int length, void **pobject)
+{
+	struct nouveau_instmem *imem = nouveau_instmem(parent);
+	struct nouveau_bar *bar = nouveau_bar(parent);
+	struct nouveau_gpuobj *gpuobj;
+	struct nouveau_mm *heap = NULL;
+	int ret, i;
+	u64 addr;
+
+	*pobject = NULL;
+
+	if (pargpu) {
+		while ((pargpu = nv_pclass(pargpu, NV_GPUOBJ_CLASS))) {
+			if (nv_gpuobj(pargpu)->heap.block_size)
+				break;
+			pargpu = pargpu->parent;
+		}
+
+		if (unlikely(pargpu == NULL)) {
+			nv_error(parent, "no gpuobj heap\n");
+			return -EINVAL;
+		}
+
+		addr =  nv_gpuobj(pargpu)->addr;
+		heap = &nv_gpuobj(pargpu)->heap;
+		atomic_inc(&parent->refcount);
+	} else {
+		ret = imem->alloc(imem, parent, size, align, &parent);
+		pargpu = parent;
+		if (ret)
+			return ret;
+
+		addr = nv_memobj(pargpu)->addr;
+		size = nv_memobj(pargpu)->size;
+
+		if (bar && bar->alloc) {
+			struct nouveau_instobj *iobj = (void *)parent;
+			struct nouveau_mem **mem = (void *)(iobj + 1);
+			struct nouveau_mem *node = *mem;
+			if (!bar->alloc(bar, parent, node, &pargpu)) {
+				nouveau_object_ref(NULL, &parent);
+				parent = pargpu;
+			}
+		}
+	}
+
+	ret = nouveau_object_create_(parent, engine, oclass, pclass |
+				     NV_GPUOBJ_CLASS, length, pobject);
+	nouveau_object_ref(NULL, &parent);
+	gpuobj = *pobject;
+	if (ret)
+		return ret;
+
+	gpuobj->parent = pargpu;
+	gpuobj->flags = flags;
+	gpuobj->addr = addr;
+	gpuobj->size = size;
+
+	if (heap) {
+		ret = nouveau_mm_head(heap, 1, size, size,
+				      max(align, (u32)1), &gpuobj->node);
+		if (ret)
+			return ret;
+
+		gpuobj->addr += gpuobj->node->offset;
+	}
+
+	if (gpuobj->flags & NVOBJ_FLAG_HEAP) {
+		ret = nouveau_mm_init(&gpuobj->heap, 0, gpuobj->size, 1);
+		if (ret)
+			return ret;
+	}
+
+	if (flags & NVOBJ_FLAG_ZERO_ALLOC) {
+		for (i = 0; i < gpuobj->size; i += 4)
+			nv_wo32(gpuobj, i, 0x00000000);
+	}
+
+	return ret;
+}
+
+struct nouveau_gpuobj_class {
+	struct nouveau_object *pargpu;
+	u64 size;
+	u32 align;
+	u32 flags;
+};
+
+static int
+_nouveau_gpuobj_ctor(struct nouveau_object *parent,
+		     struct nouveau_object *engine,
+		     struct nouveau_oclass *oclass, void *data, u32 size,
+		     struct nouveau_object **pobject)
+{
+	struct nouveau_gpuobj_class *args = data;
+	struct nouveau_gpuobj *object;
+	int ret;
+
+	ret = nouveau_gpuobj_create(parent, engine, oclass, 0, args->pargpu,
+				    args->size, args->align, args->flags,
+				    &object);
+	*pobject = nv_object(object);
+	if (ret)
+		return ret;
+
+	return 0;
+}
+
+void
+_nouveau_gpuobj_dtor(struct nouveau_object *object)
+{
+	nouveau_gpuobj_destroy(nv_gpuobj(object));
+}
+
+int
+_nouveau_gpuobj_init(struct nouveau_object *object)
+{
+	return nouveau_gpuobj_init(nv_gpuobj(object));
+}
+
+int
+_nouveau_gpuobj_fini(struct nouveau_object *object, bool suspend)
+{
+	return nouveau_gpuobj_fini(nv_gpuobj(object), suspend);
+}
+
+u32
+_nouveau_gpuobj_rd32(struct nouveau_object *object, u32 addr)
+{
+	struct nouveau_gpuobj *gpuobj = nv_gpuobj(object);
+	struct nouveau_ofuncs *pfuncs = nv_ofuncs(gpuobj->parent);
+	if (gpuobj->node)
+		addr += gpuobj->node->offset;
+	return pfuncs->rd32(gpuobj->parent, addr);
+}
+
+void
+_nouveau_gpuobj_wr32(struct nouveau_object *object, u32 addr, u32 data)
+{
+	struct nouveau_gpuobj *gpuobj = nv_gpuobj(object);
+	struct nouveau_ofuncs *pfuncs = nv_ofuncs(gpuobj->parent);
+	if (gpuobj->node)
+		addr += gpuobj->node->offset;
+	pfuncs->wr32(gpuobj->parent, addr, data);
+}
+
+static struct nouveau_oclass
+_nouveau_gpuobj_oclass = {
+	.handle = 0x00000000,
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = _nouveau_gpuobj_ctor,
+		.dtor = _nouveau_gpuobj_dtor,
+		.init = _nouveau_gpuobj_init,
+		.fini = _nouveau_gpuobj_fini,
+		.rd32 = _nouveau_gpuobj_rd32,
+		.wr32 = _nouveau_gpuobj_wr32,
+	},
+};
+
+int
+nouveau_gpuobj_new(struct nouveau_object *parent, struct nouveau_object *pargpu,
+		   u32 size, u32 align, u32 flags,
+		   struct nouveau_gpuobj **pgpuobj)
+{
+	struct nouveau_object *engine = parent;
+	struct nouveau_gpuobj_class args = {
+		.pargpu = pargpu,
+		.size = size,
+		.align = align,
+		.flags = flags,
+	};
+
+	if (!nv_iclass(engine, NV_SUBDEV_CLASS))
+		engine = engine->engine;
+	BUG_ON(engine == NULL);
+
+	return nouveau_object_ctor(parent, engine, &_nouveau_gpuobj_oclass,
+				   &args, sizeof(args),
+				   (struct nouveau_object **)pgpuobj);
+}
+
+int
+nouveau_gpuobj_map(struct nouveau_gpuobj *gpuobj, u32 access,
+		   struct nouveau_vma *vma)
+{
+	struct nouveau_bar *bar = nouveau_bar(gpuobj);
+	int ret = -EINVAL;
+
+	if (bar && bar->umap) {
+		struct nouveau_instobj *iobj = (void *)
+			nv_pclass(nv_object(gpuobj), NV_MEMOBJ_CLASS);
+		struct nouveau_mem **mem = (void *)(iobj + 1);
+		ret = bar->umap(bar, *mem, access, vma);
+	}
+
+	return ret;
+}
+
+int
+nouveau_gpuobj_map_vm(struct nouveau_gpuobj *gpuobj, struct nouveau_vm *vm,
+		      u32 access, struct nouveau_vma *vma)
+{
+	struct nouveau_instobj *iobj = (void *)
+		nv_pclass(nv_object(gpuobj), NV_MEMOBJ_CLASS);
+	struct nouveau_mem **mem = (void *)(iobj + 1);
+	int ret;
+
+	ret = nouveau_vm_get(vm, gpuobj->size, 12, access, vma);
+	if (ret)
+		return ret;
+
+	nouveau_vm_map(vma, *mem);
+	return 0;
+}
+
+void
+nouveau_gpuobj_unmap(struct nouveau_vma *vma)
+{
+	if (vma->node) {
+		nouveau_vm_unmap(vma);
+		nouveau_vm_put(vma);
+	}
+}
+
+/* the below is basically only here to support sharing the paged dma object
+ * for PCI(E)GART on <=nv4x chipsets, and should *not* be expected to work
+ * anywhere else.
+ */
+
+static void
+nouveau_gpudup_dtor(struct nouveau_object *object)
+{
+	struct nouveau_gpuobj *gpuobj = (void *)object;
+	nouveau_object_ref(NULL, &gpuobj->parent);
+	nouveau_object_destroy(&gpuobj->base);
+}
+
+static struct nouveau_oclass
+nouveau_gpudup_oclass = {
+	.handle = NV_GPUOBJ_CLASS,
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.dtor = nouveau_gpudup_dtor,
+		.init = nouveau_object_init,
+		.fini = nouveau_object_fini,
+	},
+};
+
+int
+nouveau_gpuobj_dup(struct nouveau_object *parent, struct nouveau_gpuobj *base,
+		   struct nouveau_gpuobj **pgpuobj)
+{
+	struct nouveau_gpuobj *gpuobj;
+	int ret;
+
+	ret = nouveau_object_create(parent, parent->engine,
+				   &nouveau_gpudup_oclass, 0, &gpuobj);
+	*pgpuobj = gpuobj;
+	if (ret)
+		return ret;
+
+	nouveau_object_ref(nv_object(base), &gpuobj->parent);
+	gpuobj->addr = base->addr;
+	gpuobj->size = base->size;
+	return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/core/core/handle.c b/drivers/gpu/drm/nouveau/core/core/handle.c
new file mode 100644
index 000000000000..b8d2cbf8a7a7
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/core/handle.c
@@ -0,0 +1,223 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <core/object.h>
+#include <core/handle.h>
+#include <core/client.h>
+
+#define hprintk(h,l,f,a...) do {                                               \
+	struct nouveau_client *c = nouveau_client((h)->object);                \
+	struct nouveau_handle *p = (h)->parent; u32 n = p ? p->name : ~0;      \
+	nv_printk((c), l, "0x%08x:0x%08x "f, n, (h)->name, ##a);               \
+} while(0)
+
+int
+nouveau_handle_init(struct nouveau_handle *handle)
+{
+	struct nouveau_handle *item;
+	int ret;
+
+	hprintk(handle, TRACE, "init running\n");
+	ret = nouveau_object_inc(handle->object);
+	if (ret)
+		return ret;
+
+	hprintk(handle, TRACE, "init children\n");
+	list_for_each_entry(item, &handle->tree, head) {
+		ret = nouveau_handle_init(item);
+		if (ret)
+			goto fail;
+	}
+
+	hprintk(handle, TRACE, "init completed\n");
+	return 0;
+fail:
+	hprintk(handle, ERROR, "init failed with %d\n", ret);
+	list_for_each_entry_continue_reverse(item, &handle->tree, head) {
+		nouveau_handle_fini(item, false);
+	}
+
+	nouveau_object_dec(handle->object, false);
+	return ret;
+}
+
+int
+nouveau_handle_fini(struct nouveau_handle *handle, bool suspend)
+{
+	static char *name[2] = { "fini", "suspend" };
+	struct nouveau_handle *item;
+	int ret;
+
+	hprintk(handle, TRACE, "%s children\n", name[suspend]);
+	list_for_each_entry(item, &handle->tree, head) {
+		ret = nouveau_handle_fini(item, suspend);
+		if (ret && suspend)
+			goto fail;
+	}
+
+	hprintk(handle, TRACE, "%s running\n", name[suspend]);
+	if (handle->object) {
+		ret = nouveau_object_dec(handle->object, suspend);
+		if (ret && suspend)
+			goto fail;
+	}
+
+	hprintk(handle, TRACE, "%s completed\n", name[suspend]);
+	return 0;
+fail:
+	hprintk(handle, ERROR, "%s failed with %d\n", name[suspend], ret);
+	list_for_each_entry_continue_reverse(item, &handle->tree, head) {
+		int rret = nouveau_handle_init(item);
+		if (rret)
+			hprintk(handle, FATAL, "failed to restart, %d\n", rret);
+	}
+
+	return ret;
+}
+
+int
+nouveau_handle_create(struct nouveau_object *parent, u32 _parent, u32 _handle,
+		      struct nouveau_object *object,
+		      struct nouveau_handle **phandle)
+{
+	struct nouveau_object *namedb;
+	struct nouveau_handle *handle;
+	int ret;
+
+	namedb = parent;
+	while (!nv_iclass(namedb, NV_NAMEDB_CLASS))
+		namedb = namedb->parent;
+
+	handle = *phandle = kzalloc(sizeof(*handle), GFP_KERNEL);
+	if (!handle)
+		return -ENOMEM;
+
+	INIT_LIST_HEAD(&handle->head);
+	INIT_LIST_HEAD(&handle->tree);
+	handle->name = _handle;
+	handle->priv = ~0;
+
+	ret = nouveau_namedb_insert(nv_namedb(namedb), _handle, object, handle);
+	if (ret) {
+		kfree(handle);
+		return ret;
+	}
+
+	if (nv_parent(parent)->object_attach) {
+		ret = nv_parent(parent)->object_attach(parent, object, _handle);
+		if (ret < 0) {
+			nouveau_handle_destroy(handle);
+			return ret;
+		}
+
+		handle->priv = ret;
+	}
+
+	if (object != namedb) {
+		while (!nv_iclass(namedb, NV_CLIENT_CLASS))
+			namedb = namedb->parent;
+
+		handle->parent = nouveau_namedb_get(nv_namedb(namedb), _parent);
+		if (handle->parent) {
+			list_add(&handle->head, &handle->parent->tree);
+			nouveau_namedb_put(handle->parent);
+		}
+	}
+
+	hprintk(handle, TRACE, "created\n");
+	return 0;
+}
+
+void
+nouveau_handle_destroy(struct nouveau_handle *handle)
+{
+	struct nouveau_handle *item, *temp;
+
+	hprintk(handle, TRACE, "destroy running\n");
+	list_for_each_entry_safe(item, temp, &handle->tree, head) {
+		nouveau_handle_destroy(item);
+	}
+	list_del(&handle->head);
+
+	if (handle->priv != ~0) {
+		struct nouveau_object *parent = handle->parent->object;
+		nv_parent(parent)->object_detach(parent, handle->priv);
+	}
+
+	hprintk(handle, TRACE, "destroy completed\n");
+	nouveau_namedb_remove(handle);
+	kfree(handle);
+}
+
+struct nouveau_object *
+nouveau_handle_ref(struct nouveau_object *parent, u32 name)
+{
+	struct nouveau_object *object = NULL;
+	struct nouveau_handle *handle;
+
+	while (!nv_iclass(parent, NV_NAMEDB_CLASS))
+		parent = parent->parent;
+
+	handle = nouveau_namedb_get(nv_namedb(parent), name);
+	if (handle) {
+		nouveau_object_ref(handle->object, &object);
+		nouveau_namedb_put(handle);
+	}
+
+	return object;
+}
+
+struct nouveau_handle *
+nouveau_handle_get_class(struct nouveau_object *engctx, u16 oclass)
+{
+	struct nouveau_namedb *namedb;
+	if (engctx && (namedb = (void *)nv_pclass(engctx, NV_NAMEDB_CLASS)))
+		return nouveau_namedb_get_class(namedb, oclass);
+	return NULL;
+}
+
+struct nouveau_handle *
+nouveau_handle_get_vinst(struct nouveau_object *engctx, u64 vinst)
+{
+	struct nouveau_namedb *namedb;
+	if (engctx && (namedb = (void *)nv_pclass(engctx, NV_NAMEDB_CLASS)))
+		return nouveau_namedb_get_vinst(namedb, vinst);
+	return NULL;
+}
+
+struct nouveau_handle *
+nouveau_handle_get_cinst(struct nouveau_object *engctx, u32 cinst)
+{
+	struct nouveau_namedb *namedb;
+	if (engctx && (namedb = (void *)nv_pclass(engctx, NV_NAMEDB_CLASS)))
+		return nouveau_namedb_get_cinst(namedb, cinst);
+	return NULL;
+}
+
+void
+nouveau_handle_put(struct nouveau_handle *handle)
+{
+	if (handle)
+		nouveau_namedb_put(handle);
+}
diff --git a/drivers/gpu/drm/nouveau/nouveau_mm.c b/drivers/gpu/drm/nouveau/core/core/mm.c
index 3e98806dd76f..bfddf87926dd 100644
--- a/drivers/gpu/drm/nouveau/nouveau_mm.c
+++ b/drivers/gpu/drm/nouveau/core/core/mm.c
@@ -1,5 +1,5 @@
 /*
- * Copyright 2010 Red Hat Inc.
+ * Copyright 2012 Red Hat Inc.
  *
  * Permission is hereby granted, free of charge, to any person obtaining a
  * copy of this software and associated documentation files (the "Software"),
@@ -22,20 +22,52 @@
  * Authors: Ben Skeggs
  */
 
-#include <drm/drmP.h>
-#include "nouveau_drv.h"
-#include "nouveau_mm.h"
+#include "core/os.h"
+#include "core/mm.h"
 
-static inline void
-region_put(struct nouveau_mm *mm, struct nouveau_mm_node *a)
+#define node(root, dir) ((root)->nl_entry.dir == &mm->nodes) ? NULL : \
+	list_entry((root)->nl_entry.dir, struct nouveau_mm_node, nl_entry)
+
+void
+nouveau_mm_free(struct nouveau_mm *mm, struct nouveau_mm_node **pthis)
 {
-	list_del(&a->nl_entry);
-	list_del(&a->fl_entry);
-	kfree(a);
+	struct nouveau_mm_node *this = *pthis;
+
+	if (this) {
+		struct nouveau_mm_node *prev = node(this, prev);
+		struct nouveau_mm_node *next = node(this, next);
+
+		if (prev && prev->type == 0) {
+			prev->length += this->length;
+			list_del(&this->nl_entry);
+			kfree(this); this = prev;
+		}
+
+		if (next && next->type == 0) {
+			next->offset  = this->offset;
+			next->length += this->length;
+			if (this->type == 0)
+				list_del(&this->fl_entry);
+			list_del(&this->nl_entry);
+			kfree(this); this = NULL;
+		}
+
+		if (this && this->type != 0) {
+			list_for_each_entry(prev, &mm->free, fl_entry) {
+				if (this->offset < prev->offset)
+					break;
+			}
+
+			list_add_tail(&this->fl_entry, &prev->fl_entry);
+			this->type = 0;
+		}
+	}
+
+	*pthis = NULL;
 }
 
 static struct nouveau_mm_node *
-region_split(struct nouveau_mm *mm, struct nouveau_mm_node *a, u32 size)
+region_head(struct nouveau_mm *mm, struct nouveau_mm_node *a, u32 size)
 {
 	struct nouveau_mm_node *b;
 
@@ -57,38 +89,12 @@ region_split(struct nouveau_mm *mm, struct nouveau_mm_node *a, u32 size)
 	return b;
 }
 
-#define node(root, dir) ((root)->nl_entry.dir == &mm->nodes) ? NULL : \
-	list_entry((root)->nl_entry.dir, struct nouveau_mm_node, nl_entry)
-
-void
-nouveau_mm_put(struct nouveau_mm *mm, struct nouveau_mm_node *this)
-{
-	struct nouveau_mm_node *prev = node(this, prev);
-	struct nouveau_mm_node *next = node(this, next);
-
-	list_add(&this->fl_entry, &mm->free);
-	this->type = 0;
-
-	if (prev && prev->type == 0) {
-		prev->length += this->length;
-		region_put(mm, this);
-		this = prev;
-	}
-
-	if (next && next->type == 0) {
-		next->offset  = this->offset;
-		next->length += this->length;
-		region_put(mm, this);
-	}
-}
-
 int
-nouveau_mm_get(struct nouveau_mm *mm, int type, u32 size, u32 size_nc,
-	       u32 align, struct nouveau_mm_node **pnode)
+nouveau_mm_head(struct nouveau_mm *mm, u8 type, u32 size_max, u32 size_min,
+		u32 align, struct nouveau_mm_node **pnode)
 {
 	struct nouveau_mm_node *prev, *this, *next;
-	u32 min = size_nc ? size_nc : size;
-	u32 align_mask = align - 1;
+	u32 mask = align - 1;
 	u32 splitoff;
 	u32 s, e;
 
@@ -104,16 +110,86 @@ nouveau_mm_get(struct nouveau_mm *mm, int type, u32 size, u32 size_nc,
 		if (next && next->type != type)
 			e = rounddown(e, mm->block_size);
 
-		s  = (s + align_mask) & ~align_mask;
-		e &= ~align_mask;
-		if (s > e || e - s < min)
+		s  = (s + mask) & ~mask;
+		e &= ~mask;
+		if (s > e || e - s < size_min)
 			continue;
 
 		splitoff = s - this->offset;
-		if (splitoff && !region_split(mm, this, splitoff))
+		if (splitoff && !region_head(mm, this, splitoff))
+			return -ENOMEM;
+
+		this = region_head(mm, this, min(size_max, e - s));
+		if (!this)
+			return -ENOMEM;
+
+		this->type = type;
+		list_del(&this->fl_entry);
+		*pnode = this;
+		return 0;
+	}
+
+	return -ENOSPC;
+}
+
+static struct nouveau_mm_node *
+region_tail(struct nouveau_mm *mm, struct nouveau_mm_node *a, u32 size)
+{
+	struct nouveau_mm_node *b;
+
+	if (a->length == size)
+		return a;
+
+	b = kmalloc(sizeof(*b), GFP_KERNEL);
+	if (unlikely(b == NULL))
+		return NULL;
+
+	a->length -= size;
+	b->offset  = a->offset + a->length;
+	b->length  = size;
+	b->type    = a->type;
+
+	list_add(&b->nl_entry, &a->nl_entry);
+	if (b->type == 0)
+		list_add(&b->fl_entry, &a->fl_entry);
+	return b;
+}
+
+int
+nouveau_mm_tail(struct nouveau_mm *mm, u8 type, u32 size_max, u32 size_min,
+		u32 align, struct nouveau_mm_node **pnode)
+{
+	struct nouveau_mm_node *prev, *this, *next;
+	u32 mask = align - 1;
+
+	list_for_each_entry_reverse(this, &mm->free, fl_entry) {
+		u32 e = this->offset + this->length;
+		u32 s = this->offset;
+		u32 c = 0, a;
+
+		prev = node(this, prev);
+		if (prev && prev->type != type)
+			s = roundup(s, mm->block_size);
+
+		next = node(this, next);
+		if (next && next->type != type) {
+			e = rounddown(e, mm->block_size);
+			c = next->offset - e;
+		}
+
+		s = (s + mask) & ~mask;
+		a = e - s;
+		if (s > e || a < size_min)
+			continue;
+
+		a  = min(a, size_max);
+		s  = (e - a) & ~mask;
+		c += (e - s) - a;
+
+		if (c && !region_tail(mm, this, c))
 			return -ENOMEM;
 
-		this = region_split(mm, this, min(size, e - s));
+		this = region_tail(mm, this, a);
 		if (!this)
 			return -ENOMEM;
 
@@ -148,6 +224,7 @@ nouveau_mm_init(struct nouveau_mm *mm, u32 offset, u32 length, u32 block)
 	list_add_tail(&node->nl_entry, &mm->nodes);
 	list_add_tail(&node->fl_entry, &mm->free);
 	mm->heap_nodes++;
+	mm->heap_size += length;
 	return 0;
 }
 
@@ -159,15 +236,8 @@ nouveau_mm_fini(struct nouveau_mm *mm)
 	int nodes = 0;
 
 	list_for_each_entry(node, &mm->nodes, nl_entry) {
-		if (nodes++ == mm->heap_nodes) {
-			printk(KERN_ERR "nouveau_mm in use at destroy time!\n");
-			list_for_each_entry(node, &mm->nodes, nl_entry) {
-				printk(KERN_ERR "0x%02x: 0x%08x 0x%08x\n",
-				       node->type, node->offset, node->length);
-			}
-			WARN_ON(1);
+		if (nodes++ == mm->heap_nodes)
 			return -EBUSY;
-		}
 	}
 
 	kfree(heap);
diff --git a/drivers/gpu/drm/nouveau/core/core/namedb.c b/drivers/gpu/drm/nouveau/core/core/namedb.c
new file mode 100644
index 000000000000..1ce95a8709df
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/core/namedb.c
@@ -0,0 +1,203 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <core/object.h>
+#include <core/namedb.h>
+#include <core/handle.h>
+#include <core/gpuobj.h>
+
+static struct nouveau_handle *
+nouveau_namedb_lookup(struct nouveau_namedb *namedb, u32 name)
+{
+	struct nouveau_handle *handle;
+
+	list_for_each_entry(handle, &namedb->list, node) {
+		if (handle->name == name)
+			return handle;
+	}
+
+	return NULL;
+}
+
+static struct nouveau_handle *
+nouveau_namedb_lookup_class(struct nouveau_namedb *namedb, u16 oclass)
+{
+	struct nouveau_handle *handle;
+
+	list_for_each_entry(handle, &namedb->list, node) {
+		if (nv_mclass(handle->object) == oclass)
+			return handle;
+	}
+
+	return NULL;
+}
+
+static struct nouveau_handle *
+nouveau_namedb_lookup_vinst(struct nouveau_namedb *namedb, u64 vinst)
+{
+	struct nouveau_handle *handle;
+
+	list_for_each_entry(handle, &namedb->list, node) {
+		if (nv_iclass(handle->object, NV_GPUOBJ_CLASS)) {
+			if (nv_gpuobj(handle->object)->addr == vinst)
+				return handle;
+		}
+	}
+
+	return NULL;
+}
+
+static struct nouveau_handle *
+nouveau_namedb_lookup_cinst(struct nouveau_namedb *namedb, u32 cinst)
+{
+	struct nouveau_handle *handle;
+
+	list_for_each_entry(handle, &namedb->list, node) {
+		if (nv_iclass(handle->object, NV_GPUOBJ_CLASS)) {
+			if (nv_gpuobj(handle->object)->node &&
+			    nv_gpuobj(handle->object)->node->offset == cinst)
+				return handle;
+		}
+	}
+
+	return NULL;
+}
+
+int
+nouveau_namedb_insert(struct nouveau_namedb *namedb, u32 name,
+		      struct nouveau_object *object,
+		      struct nouveau_handle *handle)
+{
+	int ret = -EEXIST;
+	write_lock_irq(&namedb->lock);
+	if (!nouveau_namedb_lookup(namedb, name)) {
+		nouveau_object_ref(object, &handle->object);
+		handle->namedb = namedb;
+		list_add(&handle->node, &namedb->list);
+		ret = 0;
+	}
+	write_unlock_irq(&namedb->lock);
+	return ret;
+}
+
+void
+nouveau_namedb_remove(struct nouveau_handle *handle)
+{
+	struct nouveau_namedb *namedb = handle->namedb;
+	struct nouveau_object *object = handle->object;
+	write_lock_irq(&namedb->lock);
+	list_del(&handle->node);
+	write_unlock_irq(&namedb->lock);
+	nouveau_object_ref(NULL, &object);
+}
+
+struct nouveau_handle *
+nouveau_namedb_get(struct nouveau_namedb *namedb, u32 name)
+{
+	struct nouveau_handle *handle;
+	read_lock(&namedb->lock);
+	handle = nouveau_namedb_lookup(namedb, name);
+	if (handle == NULL)
+		read_unlock(&namedb->lock);
+	return handle;
+}
+
+struct nouveau_handle *
+nouveau_namedb_get_class(struct nouveau_namedb *namedb, u16 oclass)
+{
+	struct nouveau_handle *handle;
+	read_lock(&namedb->lock);
+	handle = nouveau_namedb_lookup_class(namedb, oclass);
+	if (handle == NULL)
+		read_unlock(&namedb->lock);
+	return handle;
+}
+
+struct nouveau_handle *
+nouveau_namedb_get_vinst(struct nouveau_namedb *namedb, u64 vinst)
+{
+	struct nouveau_handle *handle;
+	read_lock(&namedb->lock);
+	handle = nouveau_namedb_lookup_vinst(namedb, vinst);
+	if (handle == NULL)
+		read_unlock(&namedb->lock);
+	return handle;
+}
+
+struct nouveau_handle *
+nouveau_namedb_get_cinst(struct nouveau_namedb *namedb, u32 cinst)
+{
+	struct nouveau_handle *handle;
+	read_lock(&namedb->lock);
+	handle = nouveau_namedb_lookup_cinst(namedb, cinst);
+	if (handle == NULL)
+		read_unlock(&namedb->lock);
+	return handle;
+}
+
+void
+nouveau_namedb_put(struct nouveau_handle *handle)
+{
+	if (handle)
+		read_unlock(&handle->namedb->lock);
+}
+
+int
+nouveau_namedb_create_(struct nouveau_object *parent,
+		       struct nouveau_object *engine,
+		       struct nouveau_oclass *oclass, u32 pclass,
+		       struct nouveau_oclass *sclass, u32 engcls,
+		       int length, void **pobject)
+{
+	struct nouveau_namedb *namedb;
+	int ret;
+
+	ret = nouveau_parent_create_(parent, engine, oclass, pclass |
+				     NV_NAMEDB_CLASS, sclass, engcls,
+				     length, pobject);
+	namedb = *pobject;
+	if (ret)
+		return ret;
+
+	rwlock_init(&namedb->lock);
+	INIT_LIST_HEAD(&namedb->list);
+	return 0;
+}
+
+int
+_nouveau_namedb_ctor(struct nouveau_object *parent,
+		     struct nouveau_object *engine,
+		     struct nouveau_oclass *oclass, void *data, u32 size,
+		     struct nouveau_object **pobject)
+{
+	struct nouveau_namedb *object;
+	int ret;
+
+	ret = nouveau_namedb_create(parent, engine, oclass, 0, NULL, 0, &object);
+	*pobject = nv_object(object);
+	if (ret)
+		return ret;
+
+	return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/core/core/object.c b/drivers/gpu/drm/nouveau/core/core/object.c
new file mode 100644
index 000000000000..0daab62ea14c
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/core/object.c
@@ -0,0 +1,468 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <core/object.h>
+#include <core/parent.h>
+#include <core/namedb.h>
+#include <core/handle.h>
+#include <core/engine.h>
+
+#ifdef NOUVEAU_OBJECT_MAGIC
+static struct list_head _objlist = LIST_HEAD_INIT(_objlist);
+static DEFINE_SPINLOCK(_objlist_lock);
+#endif
+
+int
+nouveau_object_create_(struct nouveau_object *parent,
+		       struct nouveau_object *engine,
+		       struct nouveau_oclass *oclass, u32 pclass,
+		       int size, void **pobject)
+{
+	struct nouveau_object *object;
+
+	object = *pobject = kzalloc(size, GFP_KERNEL);
+	if (!object)
+		return -ENOMEM;
+
+	nouveau_object_ref(parent, &object->parent);
+	nouveau_object_ref(engine, &object->engine);
+	object->oclass = oclass;
+	object->oclass->handle |= pclass;
+	atomic_set(&object->refcount, 1);
+	atomic_set(&object->usecount, 0);
+
+#ifdef NOUVEAU_OBJECT_MAGIC
+	object->_magic = NOUVEAU_OBJECT_MAGIC;
+	spin_lock(&_objlist_lock);
+	list_add(&object->list, &_objlist);
+	spin_unlock(&_objlist_lock);
+#endif
+	return 0;
+}
+
+static int
+_nouveau_object_ctor(struct nouveau_object *parent,
+		     struct nouveau_object *engine,
+		     struct nouveau_oclass *oclass, void *data, u32 size,
+		     struct nouveau_object **pobject)
+{
+	struct nouveau_object *object;
+	int ret;
+
+	ret = nouveau_object_create(parent, engine, oclass, 0, &object);
+	*pobject = nv_object(object);
+	if (ret)
+		return ret;
+
+	return 0;
+}
+
+void
+nouveau_object_destroy(struct nouveau_object *object)
+{
+#ifdef NOUVEAU_OBJECT_MAGIC
+	spin_lock(&_objlist_lock);
+	list_del(&object->list);
+	spin_unlock(&_objlist_lock);
+#endif
+	nouveau_object_ref(NULL, &object->engine);
+	nouveau_object_ref(NULL, &object->parent);
+	kfree(object);
+}
+
+static void
+_nouveau_object_dtor(struct nouveau_object *object)
+{
+	nouveau_object_destroy(object);
+}
+
+int
+nouveau_object_init(struct nouveau_object *object)
+{
+	return 0;
+}
+
+static int
+_nouveau_object_init(struct nouveau_object *object)
+{
+	return nouveau_object_init(object);
+}
+
+int
+nouveau_object_fini(struct nouveau_object *object, bool suspend)
+{
+	return 0;
+}
+
+static int
+_nouveau_object_fini(struct nouveau_object *object, bool suspend)
+{
+	return nouveau_object_fini(object, suspend);
+}
+
+struct nouveau_ofuncs
+nouveau_object_ofuncs = {
+	.ctor = _nouveau_object_ctor,
+	.dtor = _nouveau_object_dtor,
+	.init = _nouveau_object_init,
+	.fini = _nouveau_object_fini,
+};
+
+int
+nouveau_object_ctor(struct nouveau_object *parent,
+		    struct nouveau_object *engine,
+		    struct nouveau_oclass *oclass, void *data, u32 size,
+		    struct nouveau_object **pobject)
+{
+	struct nouveau_ofuncs *ofuncs = oclass->ofuncs;
+	int ret;
+
+	*pobject = NULL;
+
+	ret = ofuncs->ctor(parent, engine, oclass, data, size, pobject);
+	if (ret < 0) {
+		if (ret != -ENODEV) {
+			nv_error(parent, "failed to create 0x%08x, %d\n",
+				 oclass->handle, ret);
+		}
+
+		if (*pobject) {
+			ofuncs->dtor(*pobject);
+			*pobject = NULL;
+		}
+
+		return ret;
+	}
+
+	nv_debug(*pobject, "created\n");
+	return 0;
+}
+
+static void
+nouveau_object_dtor(struct nouveau_object *object)
+{
+	nv_debug(object, "destroying\n");
+	nv_ofuncs(object)->dtor(object);
+}
+
+void
+nouveau_object_ref(struct nouveau_object *obj, struct nouveau_object **ref)
+{
+	if (obj) {
+		atomic_inc(&obj->refcount);
+		nv_trace(obj, "inc() == %d\n", atomic_read(&obj->refcount));
+	}
+
+	if (*ref) {
+		int dead = atomic_dec_and_test(&(*ref)->refcount);
+		nv_trace(*ref, "dec() == %d\n", atomic_read(&(*ref)->refcount));
+		if (dead)
+			nouveau_object_dtor(*ref);
+	}
+
+	*ref = obj;
+}
+
+int
+nouveau_object_new(struct nouveau_object *client, u32 _parent, u32 _handle,
+		   u16 _oclass, void *data, u32 size,
+		   struct nouveau_object **pobject)
+{
+	struct nouveau_object *parent = NULL;
+	struct nouveau_object *engctx = NULL;
+	struct nouveau_object *object = NULL;
+	struct nouveau_object *engine;
+	struct nouveau_oclass *oclass;
+	struct nouveau_handle *handle;
+	int ret;
+
+	/* lookup parent object and ensure it *is* a parent */
+	parent = nouveau_handle_ref(client, _parent);
+	if (!parent) {
+		nv_error(client, "parent 0x%08x not found\n", _parent);
+		return -ENOENT;
+	}
+
+	if (!nv_iclass(parent, NV_PARENT_CLASS)) {
+		nv_error(parent, "cannot have children\n");
+		ret = -EINVAL;
+		goto fail_class;
+	}
+
+	/* check that parent supports the requested subclass */
+	ret = nouveau_parent_sclass(parent, _oclass, &engine, &oclass);
+	if (ret) {
+		nv_debug(parent, "illegal class 0x%04x\n", _oclass);
+		goto fail_class;
+	}
+
+	/* make sure engine init has been completed *before* any objects
+	 * it controls are created - the constructors may depend on
+	 * state calculated at init (ie. default context construction)
+	 */
+	if (engine) {
+		ret = nouveau_object_inc(engine);
+		if (ret)
+			goto fail_class;
+	}
+
+	/* if engine requires it, create a context object to insert
+	 * between the parent and its children (eg. PGRAPH context)
+	 */
+	if (engine && nv_engine(engine)->cclass) {
+		ret = nouveau_object_ctor(parent, engine,
+					  nv_engine(engine)->cclass,
+					  data, size, &engctx);
+		if (ret)
+			goto fail_engctx;
+	} else {
+		nouveau_object_ref(parent, &engctx);
+	}
+
+	/* finally, create new object and bind it to its handle */
+	ret = nouveau_object_ctor(engctx, engine, oclass, data, size, &object);
+	*pobject = object;
+	if (ret)
+		goto fail_ctor;
+
+	ret = nouveau_object_inc(object);
+	if (ret)
+		goto fail_init;
+
+	ret = nouveau_handle_create(parent, _parent, _handle, object, &handle);
+	if (ret)
+		goto fail_handle;
+
+	ret = nouveau_handle_init(handle);
+	if (ret)
+		nouveau_handle_destroy(handle);
+
+fail_handle:
+	nouveau_object_dec(object, false);
+fail_init:
+	nouveau_object_ref(NULL, &object);
+fail_ctor:
+	nouveau_object_ref(NULL, &engctx);
+fail_engctx:
+	if (engine)
+		nouveau_object_dec(engine, false);
+fail_class:
+	nouveau_object_ref(NULL, &parent);
+	return ret;
+}
+
+int
+nouveau_object_del(struct nouveau_object *client, u32 _parent, u32 _handle)
+{
+	struct nouveau_object *parent = NULL;
+	struct nouveau_object *namedb = NULL;
+	struct nouveau_handle *handle = NULL;
+	int ret = -EINVAL;
+
+	parent = nouveau_handle_ref(client, _parent);
+	if (!parent)
+		return -ENOENT;
+
+	namedb = nv_pclass(parent, NV_NAMEDB_CLASS);
+	if (namedb) {
+		handle = nouveau_namedb_get(nv_namedb(namedb), _handle);
+		if (handle) {
+			nouveau_namedb_put(handle);
+			nouveau_handle_fini(handle, false);
+			nouveau_handle_destroy(handle);
+		}
+	}
+
+	nouveau_object_ref(NULL, &parent);
+	return ret;
+}
+
+int
+nouveau_object_inc(struct nouveau_object *object)
+{
+	int ref = atomic_add_return(1, &object->usecount);
+	int ret;
+
+	nv_trace(object, "use(+1) == %d\n", atomic_read(&object->usecount));
+	if (ref != 1)
+		return 0;
+
+	nv_trace(object, "initialising...\n");
+	if (object->parent) {
+		ret = nouveau_object_inc(object->parent);
+		if (ret) {
+			nv_error(object, "parent failed, %d\n", ret);
+			goto fail_parent;
+		}
+	}
+
+	if (object->engine) {
+		mutex_lock(&nv_subdev(object->engine)->mutex);
+		ret = nouveau_object_inc(object->engine);
+		mutex_unlock(&nv_subdev(object->engine)->mutex);
+		if (ret) {
+			nv_error(object, "engine failed, %d\n", ret);
+			goto fail_engine;
+		}
+	}
+
+	ret = nv_ofuncs(object)->init(object);
+	if (ret) {
+		nv_error(object, "init failed, %d\n", ret);
+		goto fail_self;
+	}
+
+	nv_debug(object, "initialised\n");
+	return 0;
+
+fail_self:
+	if (object->engine) {
+		mutex_lock(&nv_subdev(object->engine)->mutex);
+		nouveau_object_dec(object->engine, false);
+		mutex_unlock(&nv_subdev(object->engine)->mutex);
+	}
+fail_engine:
+	if (object->parent)
+		 nouveau_object_dec(object->parent, false);
+fail_parent:
+	atomic_dec(&object->usecount);
+	return ret;
+}
+
+static int
+nouveau_object_decf(struct nouveau_object *object)
+{
+	int ret;
+
+	nv_trace(object, "stopping...\n");
+
+	ret = nv_ofuncs(object)->fini(object, false);
+	if (ret)
+		nv_warn(object, "failed fini, %d\n", ret);
+
+	if (object->engine) {
+		mutex_lock(&nv_subdev(object->engine)->mutex);
+		nouveau_object_dec(object->engine, false);
+		mutex_unlock(&nv_subdev(object->engine)->mutex);
+	}
+
+	if (object->parent)
+		nouveau_object_dec(object->parent, false);
+
+	nv_debug(object, "stopped\n");
+	return 0;
+}
+
+static int
+nouveau_object_decs(struct nouveau_object *object)
+{
+	int ret, rret;
+
+	nv_trace(object, "suspending...\n");
+
+	ret = nv_ofuncs(object)->fini(object, true);
+	if (ret) {
+		nv_error(object, "failed suspend, %d\n", ret);
+		return ret;
+	}
+
+	if (object->engine) {
+		mutex_lock(&nv_subdev(object->engine)->mutex);
+		ret = nouveau_object_dec(object->engine, true);
+		mutex_unlock(&nv_subdev(object->engine)->mutex);
+		if (ret) {
+			nv_warn(object, "engine failed suspend, %d\n", ret);
+			goto fail_engine;
+		}
+	}
+
+	if (object->parent) {
+		ret = nouveau_object_dec(object->parent, true);
+		if (ret) {
+			nv_warn(object, "parent failed suspend, %d\n", ret);
+			goto fail_parent;
+		}
+	}
+
+	nv_debug(object, "suspended\n");
+	return 0;
+
+fail_parent:
+	if (object->engine) {
+		mutex_lock(&nv_subdev(object->engine)->mutex);
+		rret = nouveau_object_inc(object->engine);
+		mutex_unlock(&nv_subdev(object->engine)->mutex);
+		if (rret)
+			nv_fatal(object, "engine failed to reinit, %d\n", rret);
+	}
+
+fail_engine:
+	rret = nv_ofuncs(object)->init(object);
+	if (rret)
+		nv_fatal(object, "failed to reinit, %d\n", rret);
+
+	return ret;
+}
+
+int
+nouveau_object_dec(struct nouveau_object *object, bool suspend)
+{
+	int ref = atomic_add_return(-1, &object->usecount);
+	int ret;
+
+	nv_trace(object, "use(-1) == %d\n", atomic_read(&object->usecount));
+
+	if (ref == 0) {
+		if (suspend)
+			ret = nouveau_object_decs(object);
+		else
+			ret = nouveau_object_decf(object);
+
+		if (ret) {
+			atomic_inc(&object->usecount);
+			return ret;
+		}
+	}
+
+	return 0;
+}
+
+void
+nouveau_object_debug(void)
+{
+#ifdef NOUVEAU_OBJECT_MAGIC
+	struct nouveau_object *object;
+	if (!list_empty(&_objlist)) {
+		nv_fatal(NULL, "*******************************************\n");
+		nv_fatal(NULL, "* AIIIII! object(s) still exist!!!\n");
+		nv_fatal(NULL, "*******************************************\n");
+		list_for_each_entry(object, &_objlist, list) {
+			nv_fatal(object, "%p/%p/%d/%d\n",
+				 object->parent, object->engine,
+				 atomic_read(&object->refcount),
+				 atomic_read(&object->usecount));
+		}
+	}
+#endif
+}
diff --git a/drivers/gpu/drm/nouveau/core/core/option.c b/drivers/gpu/drm/nouveau/core/core/option.c
new file mode 100644
index 000000000000..62a432ea39e5
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/core/option.c
@@ -0,0 +1,131 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <core/option.h>
+#include <core/debug.h>
+
+/* compares unterminated string 'str' with zero-terminated string 'cmp' */
+static inline int
+strncasecmpz(const char *str, const char *cmp, size_t len)
+{
+	if (strlen(cmp) != len)
+		return len;
+	return strncasecmp(str, cmp, len);
+}
+
+const char *
+nouveau_stropt(const char *optstr, const char *opt, int *arglen)
+{
+	while (optstr && *optstr != '\0') {
+		int len = strcspn(optstr, ",=");
+		switch (optstr[len]) {
+		case '=':
+			if (!strncasecmpz(optstr, opt, len)) {
+				optstr += len + 1;
+				*arglen = strcspn(optstr, ",=");
+				return *arglen ? optstr : NULL;
+			}
+			optstr++;
+			break;
+		case ',':
+			optstr++;
+			break;
+		default:
+			break;
+		}
+		optstr += len;
+	}
+
+	return NULL;
+}
+
+bool
+nouveau_boolopt(const char *optstr, const char *opt, bool value)
+{
+	int arglen;
+
+	optstr = nouveau_stropt(optstr, opt, &arglen);
+	if (optstr) {
+		if (!strncasecmpz(optstr, "0", arglen) ||
+		    !strncasecmpz(optstr, "no", arglen) ||
+		    !strncasecmpz(optstr, "off", arglen) ||
+		    !strncasecmpz(optstr, "false", arglen))
+			value = false;
+		else
+		if (!strncasecmpz(optstr, "1", arglen) ||
+		    !strncasecmpz(optstr, "yes", arglen) ||
+		    !strncasecmpz(optstr, "on", arglen) ||
+		    !strncasecmpz(optstr, "true", arglen))
+			value = true;
+	}
+
+	return value;
+}
+
+int
+nouveau_dbgopt(const char *optstr, const char *sub)
+{
+	int mode = 1, level = CONFIG_NOUVEAU_DEBUG_DEFAULT;
+
+	while (optstr) {
+		int len = strcspn(optstr, ",=");
+		switch (optstr[len]) {
+		case '=':
+			if (strncasecmpz(optstr, sub, len))
+				mode = 0;
+			optstr++;
+			break;
+		default:
+			if (mode) {
+				if (!strncasecmpz(optstr, "fatal", len))
+					level = NV_DBG_FATAL;
+				else if (!strncasecmpz(optstr, "error", len))
+					level = NV_DBG_ERROR;
+				else if (!strncasecmpz(optstr, "warn", len))
+					level = NV_DBG_WARN;
+				else if (!strncasecmpz(optstr, "info", len))
+					level = NV_DBG_INFO;
+				else if (!strncasecmpz(optstr, "debug", len))
+					level = NV_DBG_DEBUG;
+				else if (!strncasecmpz(optstr, "trace", len))
+					level = NV_DBG_TRACE;
+				else if (!strncasecmpz(optstr, "paranoia", len))
+					level = NV_DBG_PARANOIA;
+				else if (!strncasecmpz(optstr, "spam", len))
+					level = NV_DBG_SPAM;
+			}
+
+			if (optstr[len] != '\0') {
+				optstr++;
+				mode = 1;
+				break;
+			}
+
+			return level;
+		}
+		optstr += len;
+	}
+
+	return level;
+}
diff --git a/drivers/gpu/drm/nouveau/core/core/parent.c b/drivers/gpu/drm/nouveau/core/core/parent.c
new file mode 100644
index 000000000000..a1ea034611d5
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/core/parent.c
@@ -0,0 +1,139 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <core/object.h>
+#include <core/parent.h>
+
+int
+nouveau_parent_sclass(struct nouveau_object *parent, u16 handle,
+		      struct nouveau_object **pengine,
+		      struct nouveau_oclass **poclass)
+{
+	struct nouveau_sclass *sclass;
+	struct nouveau_engine *engine;
+	struct nouveau_oclass *oclass;
+	u64 mask;
+
+	sclass = nv_parent(parent)->sclass;
+	while (sclass) {
+		if ((sclass->oclass->handle & 0xffff) == handle) {
+			*pengine = parent->engine;
+			*poclass = sclass->oclass;
+			return 0;
+		}
+
+		sclass = sclass->sclass;
+	}
+
+	mask = nv_parent(parent)->engine;
+	while (mask) {
+		int i = ffsll(mask) - 1;
+
+		if ((engine = nouveau_engine(parent, i))) {
+			oclass = engine->sclass;
+			while (oclass->ofuncs) {
+				if ((oclass->handle & 0xffff) == handle) {
+					*pengine = nv_object(engine);
+					*poclass = oclass;
+					return 0;
+				}
+				oclass++;
+			}
+		}
+
+		mask &= ~(1ULL << i);
+	}
+
+	return -EINVAL;
+}
+
+int
+nouveau_parent_create_(struct nouveau_object *parent,
+		       struct nouveau_object *engine,
+		       struct nouveau_oclass *oclass, u32 pclass,
+		       struct nouveau_oclass *sclass, u64 engcls,
+		       int size, void **pobject)
+{
+	struct nouveau_parent *object;
+	struct nouveau_sclass *nclass;
+	int ret;
+
+	ret = nouveau_object_create_(parent, engine, oclass, pclass |
+				     NV_PARENT_CLASS, size, pobject);
+	object = *pobject;
+	if (ret)
+		return ret;
+
+	while (sclass && sclass->ofuncs) {
+		nclass = kzalloc(sizeof(*nclass), GFP_KERNEL);
+		if (!nclass)
+			return -ENOMEM;
+
+		nclass->sclass = object->sclass;
+		object->sclass = nclass;
+		nclass->engine = engine ? nv_engine(engine) : NULL;
+		nclass->oclass = sclass;
+		sclass++;
+	}
+
+	object->engine = engcls;
+	return 0;
+}
+
+int
+_nouveau_parent_ctor(struct nouveau_object *parent,
+		     struct nouveau_object *engine,
+		     struct nouveau_oclass *oclass, void *data, u32 size,
+		     struct nouveau_object **pobject)
+{
+	struct nouveau_parent *object;
+	int ret;
+
+	ret = nouveau_parent_create(parent, engine, oclass, 0, NULL, 0, &object);
+	*pobject = nv_object(object);
+	if (ret)
+		return ret;
+
+	return 0;
+}
+
+void
+nouveau_parent_destroy(struct nouveau_parent *parent)
+{
+	struct nouveau_sclass *sclass;
+
+	while ((sclass = parent->sclass)) {
+		parent->sclass = sclass->sclass;
+		kfree(sclass);
+	}
+
+	nouveau_object_destroy(&parent->base);
+}
+
+
+void
+_nouveau_parent_dtor(struct nouveau_object *object)
+{
+	nouveau_parent_destroy(nv_parent(object));
+}
diff --git a/drivers/gpu/drm/nouveau/core/core/printk.c b/drivers/gpu/drm/nouveau/core/core/printk.c
new file mode 100644
index 000000000000..6161eaf5447c
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/core/printk.c
@@ -0,0 +1,74 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <core/object.h>
+#include <core/client.h>
+#include <core/subdev.h>
+#include <core/printk.h>
+
+void
+nv_printk_(struct nouveau_object *object, const char *pfx, int level,
+	   const char *fmt, ...)
+{
+	static const char name[] = { '!', 'E', 'W', ' ', 'D', 'T', 'P', 'S' };
+	char mfmt[256];
+	va_list args;
+
+	if (object && !nv_iclass(object, NV_CLIENT_CLASS)) {
+		struct nouveau_object *device = object;
+		struct nouveau_object *subdev = object;
+		char obuf[64], *ofmt = "";
+
+		if (object->engine) {
+			snprintf(obuf, sizeof(obuf), "[0x%08x][%p]",
+				 nv_hclass(object), object);
+			ofmt = obuf;
+			subdev = object->engine;
+			device = object->engine;
+		}
+
+		if (subdev->parent)
+			device = subdev->parent;
+
+		if (level > nv_subdev(subdev)->debug)
+			return;
+
+		snprintf(mfmt, sizeof(mfmt), "%snouveau %c[%8s][%s]%s %s", pfx,
+			 name[level], nv_subdev(subdev)->name,
+			 nv_device(device)->name, ofmt, fmt);
+	} else
+	if (object && nv_iclass(object, NV_CLIENT_CLASS)) {
+		if (level > nv_client(object)->debug)
+			return;
+
+		snprintf(mfmt, sizeof(mfmt), "%snouveau %c[%8s] %s", pfx,
+			 name[level], nv_client(object)->name, fmt);
+	} else {
+		snprintf(mfmt, sizeof(mfmt), "%snouveau: %s", pfx, fmt);
+	}
+
+	va_start(args, fmt);
+	vprintk(mfmt, args);
+	va_end(args);
+}
diff --git a/drivers/gpu/drm/nouveau/core/core/ramht.c b/drivers/gpu/drm/nouveau/core/core/ramht.c
new file mode 100644
index 000000000000..86a64045dd60
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/core/ramht.c
@@ -0,0 +1,109 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <core/object.h>
+#include <core/ramht.h>
+#include <core/math.h>
+
+#include <subdev/bar.h>
+
+static u32
+nouveau_ramht_hash(struct nouveau_ramht *ramht, int chid, u32 handle)
+{
+	u32 hash = 0;
+
+	while (handle) {
+		hash ^= (handle & ((1 << ramht->bits) - 1));
+		handle >>= ramht->bits;
+	}
+
+	hash ^= chid << (ramht->bits - 4);
+	hash  = hash << 3;
+	return hash;
+}
+
+int
+nouveau_ramht_insert(struct nouveau_ramht *ramht, int chid,
+		     u32 handle, u32 context)
+{
+	struct nouveau_bar *bar = nouveau_bar(ramht);
+	u32 co, ho;
+
+	co = ho = nouveau_ramht_hash(ramht, chid, handle);
+	do {
+		if (!nv_ro32(ramht, co + 4)) {
+			nv_wo32(ramht, co + 0, handle);
+			nv_wo32(ramht, co + 4, context);
+			if (bar)
+				bar->flush(bar);
+			return co;
+		}
+
+		co += 8;
+		if (co >= nv_gpuobj(ramht)->size)
+			co = 0;
+	} while (co != ho);
+
+	return -ENOMEM;
+}
+
+void
+nouveau_ramht_remove(struct nouveau_ramht *ramht, int cookie)
+{
+	struct nouveau_bar *bar = nouveau_bar(ramht);
+	nv_wo32(ramht, cookie + 0, 0x00000000);
+	nv_wo32(ramht, cookie + 4, 0x00000000);
+	if (bar)
+		bar->flush(bar);
+}
+
+static struct nouveau_oclass
+nouveau_ramht_oclass = {
+	.handle = 0x0000abcd,
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = NULL,
+		.dtor = _nouveau_gpuobj_dtor,
+		.init = _nouveau_gpuobj_init,
+		.fini = _nouveau_gpuobj_fini,
+		.rd32 = _nouveau_gpuobj_rd32,
+		.wr32 = _nouveau_gpuobj_wr32,
+	},
+};
+
+int
+nouveau_ramht_new(struct nouveau_object *parent, struct nouveau_object *pargpu,
+		  u32 size, u32 align, struct nouveau_ramht **pramht)
+{
+	struct nouveau_ramht *ramht;
+	int ret;
+
+	ret = nouveau_gpuobj_create(parent, parent->engine ?
+				    parent->engine : parent, /* <nv50 ramht */
+				    &nouveau_ramht_oclass, 0, pargpu, size,
+				    align, NVOBJ_FLAG_ZERO_ALLOC, &ramht);
+	*pramht = ramht;
+	if (ret)
+		return ret;
+
+	ramht->bits = log2i(nv_gpuobj(ramht)->size >> 3);
+	return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/core/core/subdev.c b/drivers/gpu/drm/nouveau/core/core/subdev.c
new file mode 100644
index 000000000000..f74c30aa33a0
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/core/subdev.c
@@ -0,0 +1,115 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <core/object.h>
+#include <core/subdev.h>
+#include <core/device.h>
+#include <core/option.h>
+
+void
+nouveau_subdev_reset(struct nouveau_object *subdev)
+{
+	nv_trace(subdev, "resetting...\n");
+	nv_ofuncs(subdev)->fini(subdev, false);
+	nv_debug(subdev, "reset\n");
+}
+
+int
+nouveau_subdev_init(struct nouveau_subdev *subdev)
+{
+	int ret = nouveau_object_init(&subdev->base);
+	if (ret)
+		return ret;
+
+	nouveau_subdev_reset(&subdev->base);
+	return 0;
+}
+
+int
+_nouveau_subdev_init(struct nouveau_object *object)
+{
+	return nouveau_subdev_init(nv_subdev(object));
+}
+
+int
+nouveau_subdev_fini(struct nouveau_subdev *subdev, bool suspend)
+{
+	if (subdev->unit) {
+		nv_mask(subdev, 0x000200, subdev->unit, 0x00000000);
+		nv_mask(subdev, 0x000200, subdev->unit, subdev->unit);
+	}
+
+	return nouveau_object_fini(&subdev->base, suspend);
+}
+
+int
+_nouveau_subdev_fini(struct nouveau_object *object, bool suspend)
+{
+	return nouveau_subdev_fini(nv_subdev(object), suspend);
+}
+
+void
+nouveau_subdev_destroy(struct nouveau_subdev *subdev)
+{
+	int subidx = nv_hclass(subdev) & 0xff;
+	nv_device(subdev)->subdev[subidx] = NULL;
+	nouveau_object_destroy(&subdev->base);
+}
+
+void
+_nouveau_subdev_dtor(struct nouveau_object *object)
+{
+	nouveau_subdev_destroy(nv_subdev(object));
+}
+
+int
+nouveau_subdev_create_(struct nouveau_object *parent,
+		       struct nouveau_object *engine,
+		       struct nouveau_oclass *oclass, u32 pclass,
+		       const char *subname, const char *sysname,
+		       int size, void **pobject)
+{
+	struct nouveau_subdev *subdev;
+	int ret;
+
+	ret = nouveau_object_create_(parent, engine, oclass, pclass |
+				     NV_SUBDEV_CLASS, size, pobject);
+	subdev = *pobject;
+	if (ret)
+		return ret;
+
+	mutex_init(&subdev->mutex);
+	subdev->name = subname;
+
+	if (parent) {
+		struct nouveau_device *device = nv_device(parent);
+		int subidx = nv_hclass(subdev) & 0xff;
+
+		subdev->debug = nouveau_dbgopt(device->dbgopt, subname);
+		subdev->mmio  = nv_subdev(device)->mmio;
+		device->subdev[subidx] = *pobject;
+	}
+
+	return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/core/engine/bsp/nv84.c b/drivers/gpu/drm/nouveau/core/engine/bsp/nv84.c
new file mode 100644
index 000000000000..66f7dfd907ee
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/bsp/nv84.c
@@ -0,0 +1,175 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <core/os.h>
+#include <core/class.h>
+#include <core/engctx.h>
+
+#include <engine/bsp.h>
+
+struct nv84_bsp_priv {
+	struct nouveau_bsp base;
+};
+
+struct nv84_bsp_chan {
+	struct nouveau_bsp_chan base;
+};
+
+/*******************************************************************************
+ * BSP object classes
+ ******************************************************************************/
+
+static struct nouveau_oclass
+nv84_bsp_sclass[] = {
+	{},
+};
+
+/*******************************************************************************
+ * BSP context
+ ******************************************************************************/
+
+static int
+nv84_bsp_context_ctor(struct nouveau_object *parent,
+		      struct nouveau_object *engine,
+		      struct nouveau_oclass *oclass, void *data, u32 size,
+		      struct nouveau_object **pobject)
+{
+	struct nv84_bsp_chan *priv;
+	int ret;
+
+	ret = nouveau_bsp_context_create(parent, engine, oclass, NULL,
+					 0, 0, 0, &priv);
+	*pobject = nv_object(priv);
+	if (ret)
+		return ret;
+
+	return 0;
+}
+
+static void
+nv84_bsp_context_dtor(struct nouveau_object *object)
+{
+	struct nv84_bsp_chan *priv = (void *)object;
+	nouveau_bsp_context_destroy(&priv->base);
+}
+
+static int
+nv84_bsp_context_init(struct nouveau_object *object)
+{
+	struct nv84_bsp_chan *priv = (void *)object;
+	int ret;
+
+	ret = nouveau_bsp_context_init(&priv->base);
+	if (ret)
+		return ret;
+
+	return 0;
+}
+
+static int
+nv84_bsp_context_fini(struct nouveau_object *object, bool suspend)
+{
+	struct nv84_bsp_chan *priv = (void *)object;
+	return nouveau_bsp_context_fini(&priv->base, suspend);
+}
+
+static struct nouveau_oclass
+nv84_bsp_cclass = {
+	.handle = NV_ENGCTX(BSP, 0x84),
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nv84_bsp_context_ctor,
+		.dtor = nv84_bsp_context_dtor,
+		.init = nv84_bsp_context_init,
+		.fini = nv84_bsp_context_fini,
+		.rd32 = _nouveau_bsp_context_rd32,
+		.wr32 = _nouveau_bsp_context_wr32,
+	},
+};
+
+/*******************************************************************************
+ * BSP engine/subdev functions
+ ******************************************************************************/
+
+static void
+nv84_bsp_intr(struct nouveau_subdev *subdev)
+{
+}
+
+static int
+nv84_bsp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+	      struct nouveau_oclass *oclass, void *data, u32 size,
+	      struct nouveau_object **pobject)
+{
+	struct nv84_bsp_priv *priv;
+	int ret;
+
+	ret = nouveau_bsp_create(parent, engine, oclass, &priv);
+	*pobject = nv_object(priv);
+	if (ret)
+		return ret;
+
+	nv_subdev(priv)->unit = 0x04008000;
+	nv_subdev(priv)->intr = nv84_bsp_intr;
+	nv_engine(priv)->cclass = &nv84_bsp_cclass;
+	nv_engine(priv)->sclass = nv84_bsp_sclass;
+	return 0;
+}
+
+static void
+nv84_bsp_dtor(struct nouveau_object *object)
+{
+	struct nv84_bsp_priv *priv = (void *)object;
+	nouveau_bsp_destroy(&priv->base);
+}
+
+static int
+nv84_bsp_init(struct nouveau_object *object)
+{
+	struct nv84_bsp_priv *priv = (void *)object;
+	int ret;
+
+	ret = nouveau_bsp_init(&priv->base);
+	if (ret)
+		return ret;
+
+	return 0;
+}
+
+static int
+nv84_bsp_fini(struct nouveau_object *object, bool suspend)
+{
+	struct nv84_bsp_priv *priv = (void *)object;
+	return nouveau_bsp_fini(&priv->base, suspend);
+}
+
+struct nouveau_oclass
+nv84_bsp_oclass = {
+	.handle = NV_ENGINE(BSP, 0x84),
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nv84_bsp_ctor,
+		.dtor = nv84_bsp_dtor,
+		.init = nv84_bsp_init,
+		.fini = nv84_bsp_fini,
+	},
+};
diff --git a/drivers/gpu/drm/nouveau/nva3_copy.fuc b/drivers/gpu/drm/nouveau/core/engine/copy/fuc/nva3.fuc
index 219850d53286..219850d53286 100644
--- a/drivers/gpu/drm/nouveau/nva3_copy.fuc
+++ b/drivers/gpu/drm/nouveau/core/engine/copy/fuc/nva3.fuc
diff --git a/drivers/gpu/drm/nouveau/nva3_copy.fuc.h b/drivers/gpu/drm/nouveau/core/engine/copy/fuc/nva3.fuc.h
index 37d6de3c9d61..c92520f3ed46 100644
--- a/drivers/gpu/drm/nouveau/nva3_copy.fuc.h
+++ b/drivers/gpu/drm/nouveau/core/engine/copy/fuc/nva3.fuc.h
@@ -1,4 +1,4 @@
-u32 nva3_pcopy_data[] = {
+static u32 nva3_pcopy_data[] = {
 /* 0x0000: ctx_object */
 	0x00000000,
 /* 0x0004: ctx_dma */
@@ -183,7 +183,7 @@ u32 nva3_pcopy_data[] = {
 	0x00000800,
 };
 
-u32 nva3_pcopy_code[] = {
+static u32 nva3_pcopy_code[] = {
 /* 0x0000: main */
 	0x04fe04bd,
 	0x3517f000,
diff --git a/drivers/gpu/drm/nouveau/nvc0_copy.fuc.h b/drivers/gpu/drm/nouveau/core/engine/copy/fuc/nvc0.fuc.h
index cd879f31bb38..0d98c6c0958d 100644
--- a/drivers/gpu/drm/nouveau/nvc0_copy.fuc.h
+++ b/drivers/gpu/drm/nouveau/core/engine/copy/fuc/nvc0.fuc.h
@@ -1,4 +1,4 @@
-u32 nvc0_pcopy_data[] = {
+static u32 nvc0_pcopy_data[] = {
 /* 0x0000: ctx_object */
 	0x00000000,
 /* 0x0004: ctx_query_address_high */
@@ -171,7 +171,7 @@ u32 nvc0_pcopy_data[] = {
 	0x00000800,
 };
 
-u32 nvc0_pcopy_code[] = {
+static u32 nvc0_pcopy_code[] = {
 /* 0x0000: main */
 	0x04fe04bd,
 	0x3517f000,
diff --git a/drivers/gpu/drm/nouveau/core/engine/copy/nva3.c b/drivers/gpu/drm/nouveau/core/engine/copy/nva3.c
new file mode 100644
index 000000000000..4df6da0af740
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/copy/nva3.c
@@ -0,0 +1,222 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <core/os.h>
+#include <core/enum.h>
+#include <core/class.h>
+#include <core/engctx.h>
+
+#include <subdev/fb.h>
+#include <subdev/vm.h>
+
+#include <engine/fifo.h>
+#include <engine/copy.h>
+
+#include "fuc/nva3.fuc.h"
+
+struct nva3_copy_priv {
+	struct nouveau_copy base;
+};
+
+struct nva3_copy_chan {
+	struct nouveau_copy_chan base;
+};
+
+/*******************************************************************************
+ * Copy object classes
+ ******************************************************************************/
+
+static struct nouveau_oclass
+nva3_copy_sclass[] = {
+	{ 0x85b5, &nouveau_object_ofuncs },
+	{}
+};
+
+/*******************************************************************************
+ * PCOPY context
+ ******************************************************************************/
+
+static int
+nva3_copy_context_ctor(struct nouveau_object *parent,
+		       struct nouveau_object *engine,
+		       struct nouveau_oclass *oclass, void *data, u32 size,
+		       struct nouveau_object **pobject)
+{
+	struct nva3_copy_chan *priv;
+	int ret;
+
+	ret = nouveau_copy_context_create(parent, engine, oclass, NULL, 256, 0,
+					  NVOBJ_FLAG_ZERO_ALLOC, &priv);
+	*pobject = nv_object(priv);
+	if (ret)
+		return ret;
+
+	return 0;
+}
+
+static struct nouveau_oclass
+nva3_copy_cclass = {
+	.handle = NV_ENGCTX(COPY0, 0xa3),
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nva3_copy_context_ctor,
+		.dtor = _nouveau_copy_context_dtor,
+		.init = _nouveau_copy_context_init,
+		.fini = _nouveau_copy_context_fini,
+		.rd32 = _nouveau_copy_context_rd32,
+		.wr32 = _nouveau_copy_context_wr32,
+
+	},
+};
+
+/*******************************************************************************
+ * PCOPY engine/subdev functions
+ ******************************************************************************/
+
+static const struct nouveau_enum nva3_copy_isr_error_name[] = {
+	{ 0x0001, "ILLEGAL_MTHD" },
+	{ 0x0002, "INVALID_ENUM" },
+	{ 0x0003, "INVALID_BITFIELD" },
+	{}
+};
+
+static void
+nva3_copy_intr(struct nouveau_subdev *subdev)
+{
+	struct nouveau_fifo *pfifo = nouveau_fifo(subdev);
+	struct nouveau_engine *engine = nv_engine(subdev);
+	struct nouveau_object *engctx;
+	struct nva3_copy_priv *priv = (void *)subdev;
+	u32 dispatch = nv_rd32(priv, 0x10401c);
+	u32 stat = nv_rd32(priv, 0x104008) & dispatch & ~(dispatch >> 16);
+	u64 inst = nv_rd32(priv, 0x104050) & 0x3fffffff;
+	u32 ssta = nv_rd32(priv, 0x104040) & 0x0000ffff;
+	u32 addr = nv_rd32(priv, 0x104040) >> 16;
+	u32 mthd = (addr & 0x07ff) << 2;
+	u32 subc = (addr & 0x3800) >> 11;
+	u32 data = nv_rd32(priv, 0x104044);
+	int chid;
+
+	engctx = nouveau_engctx_get(engine, inst);
+	chid   = pfifo->chid(pfifo, engctx);
+
+	if (stat & 0x00000040) {
+		nv_error(priv, "DISPATCH_ERROR [");
+		nouveau_enum_print(nva3_copy_isr_error_name, ssta);
+		printk("] ch %d [0x%010llx] subc %d mthd 0x%04x data 0x%08x\n",
+		       chid, inst << 12, subc, mthd, data);
+		nv_wr32(priv, 0x104004, 0x00000040);
+		stat &= ~0x00000040;
+	}
+
+	if (stat) {
+		nv_error(priv, "unhandled intr 0x%08x\n", stat);
+		nv_wr32(priv, 0x104004, stat);
+	}
+
+	nv50_fb_trap(nouveau_fb(priv), 1);
+	nouveau_engctx_put(engctx);
+}
+
+static int
+nva3_copy_tlb_flush(struct nouveau_engine *engine)
+{
+	nv50_vm_flush_engine(&engine->base, 0x0d);
+	return 0;
+}
+
+static int
+nva3_copy_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+	       struct nouveau_oclass *oclass, void *data, u32 size,
+	       struct nouveau_object **pobject)
+{
+	bool enable = (nv_device(parent)->chipset != 0xaf);
+	struct nva3_copy_priv *priv;
+	int ret;
+
+	ret = nouveau_copy_create(parent, engine, oclass, enable, 0, &priv);
+	*pobject = nv_object(priv);
+	if (ret)
+		return ret;
+
+	nv_subdev(priv)->unit = 0x00802000;
+	nv_subdev(priv)->intr = nva3_copy_intr;
+	nv_engine(priv)->cclass = &nva3_copy_cclass;
+	nv_engine(priv)->sclass = nva3_copy_sclass;
+	nv_engine(priv)->tlb_flush = nva3_copy_tlb_flush;
+	return 0;
+}
+
+static int
+nva3_copy_init(struct nouveau_object *object)
+{
+	struct nva3_copy_priv *priv = (void *)object;
+	int ret, i;
+
+	ret = nouveau_copy_init(&priv->base);
+	if (ret)
+		return ret;
+
+	/* disable all interrupts */
+	nv_wr32(priv, 0x104014, 0xffffffff);
+
+	/* upload ucode */
+	nv_wr32(priv, 0x1041c0, 0x01000000);
+	for (i = 0; i < sizeof(nva3_pcopy_data) / 4; i++)
+		nv_wr32(priv, 0x1041c4, nva3_pcopy_data[i]);
+
+	nv_wr32(priv, 0x104180, 0x01000000);
+	for (i = 0; i < sizeof(nva3_pcopy_code) / 4; i++) {
+		if ((i & 0x3f) == 0)
+			nv_wr32(priv, 0x104188, i >> 6);
+		nv_wr32(priv, 0x104184, nva3_pcopy_code[i]);
+	}
+
+	/* start it running */
+	nv_wr32(priv, 0x10410c, 0x00000000);
+	nv_wr32(priv, 0x104104, 0x00000000); /* ENTRY */
+	nv_wr32(priv, 0x104100, 0x00000002); /* TRIGGER */
+	return 0;
+}
+
+static int
+nva3_copy_fini(struct nouveau_object *object, bool suspend)
+{
+	struct nva3_copy_priv *priv = (void *)object;
+
+	nv_mask(priv, 0x104048, 0x00000003, 0x00000000);
+	nv_wr32(priv, 0x104014, 0xffffffff);
+
+	return nouveau_copy_fini(&priv->base, suspend);
+}
+
+struct nouveau_oclass
+nva3_copy_oclass = {
+	.handle = NV_ENGINE(COPY0, 0xa3),
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nva3_copy_ctor,
+		.dtor = _nouveau_copy_dtor,
+		.init = nva3_copy_init,
+		.fini = nva3_copy_fini,
+	},
+};
diff --git a/drivers/gpu/drm/nouveau/core/engine/copy/nvc0.c b/drivers/gpu/drm/nouveau/core/engine/copy/nvc0.c
new file mode 100644
index 000000000000..06d4a8791055
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/copy/nvc0.c
@@ -0,0 +1,265 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <core/os.h>
+#include <core/enum.h>
+#include <core/class.h>
+#include <core/engctx.h>
+
+#include <engine/fifo.h>
+#include <engine/copy.h>
+
+#include "fuc/nvc0.fuc.h"
+
+struct nvc0_copy_priv {
+	struct nouveau_copy base;
+};
+
+struct nvc0_copy_chan {
+	struct nouveau_copy_chan base;
+};
+
+/*******************************************************************************
+ * Copy object classes
+ ******************************************************************************/
+
+static struct nouveau_oclass
+nvc0_copy0_sclass[] = {
+	{ 0x90b5, &nouveau_object_ofuncs },
+	{},
+};
+
+static struct nouveau_oclass
+nvc0_copy1_sclass[] = {
+	{ 0x90b8, &nouveau_object_ofuncs },
+	{},
+};
+
+/*******************************************************************************
+ * PCOPY context
+ ******************************************************************************/
+
+static int
+nvc0_copy_context_ctor(struct nouveau_object *parent,
+		       struct nouveau_object *engine,
+		       struct nouveau_oclass *oclass, void *data, u32 size,
+		       struct nouveau_object **pobject)
+{
+	struct nvc0_copy_chan *priv;
+	int ret;
+
+	ret = nouveau_copy_context_create(parent, engine, oclass, NULL, 256,
+					  256, NVOBJ_FLAG_ZERO_ALLOC, &priv);
+	*pobject = nv_object(priv);
+	if (ret)
+		return ret;
+
+	return 0;
+}
+
+static struct nouveau_ofuncs
+nvc0_copy_context_ofuncs = {
+	.ctor = nvc0_copy_context_ctor,
+	.dtor = _nouveau_copy_context_dtor,
+	.init = _nouveau_copy_context_init,
+	.fini = _nouveau_copy_context_fini,
+	.rd32 = _nouveau_copy_context_rd32,
+	.wr32 = _nouveau_copy_context_wr32,
+};
+
+static struct nouveau_oclass
+nvc0_copy0_cclass = {
+	.handle = NV_ENGCTX(COPY0, 0xc0),
+	.ofuncs = &nvc0_copy_context_ofuncs,
+};
+
+static struct nouveau_oclass
+nvc0_copy1_cclass = {
+	.handle = NV_ENGCTX(COPY1, 0xc0),
+	.ofuncs = &nvc0_copy_context_ofuncs,
+};
+
+/*******************************************************************************
+ * PCOPY engine/subdev functions
+ ******************************************************************************/
+
+static const struct nouveau_enum nvc0_copy_isr_error_name[] = {
+	{ 0x0001, "ILLEGAL_MTHD" },
+	{ 0x0002, "INVALID_ENUM" },
+	{ 0x0003, "INVALID_BITFIELD" },
+	{}
+};
+
+static void
+nvc0_copy_intr(struct nouveau_subdev *subdev)
+{
+	struct nouveau_fifo *pfifo = nouveau_fifo(subdev);
+	struct nouveau_engine *engine = nv_engine(subdev);
+	struct nouveau_object *engctx;
+	int idx = nv_engidx(nv_object(subdev)) - NVDEV_ENGINE_COPY0;
+	struct nvc0_copy_priv *priv = (void *)subdev;
+	u32 disp = nv_rd32(priv, 0x10401c + (idx * 0x1000));
+	u32 intr = nv_rd32(priv, 0x104008 + (idx * 0x1000));
+	u32 stat = intr & disp & ~(disp >> 16);
+	u64 inst = nv_rd32(priv, 0x104050 + (idx * 0x1000)) & 0x0fffffff;
+	u32 ssta = nv_rd32(priv, 0x104040 + (idx * 0x1000)) & 0x0000ffff;
+	u32 addr = nv_rd32(priv, 0x104040 + (idx * 0x1000)) >> 16;
+	u32 mthd = (addr & 0x07ff) << 2;
+	u32 subc = (addr & 0x3800) >> 11;
+	u32 data = nv_rd32(priv, 0x104044 + (idx * 0x1000));
+	int chid;
+
+	engctx = nouveau_engctx_get(engine, inst);
+	chid   = pfifo->chid(pfifo, engctx);
+
+	if (stat & 0x00000040) {
+		nv_error(priv, "DISPATCH_ERROR [");
+		nouveau_enum_print(nvc0_copy_isr_error_name, ssta);
+		printk("] ch %d [0x%010llx] subc %d mthd 0x%04x data 0x%08x\n",
+		       chid, (u64)inst << 12, subc, mthd, data);
+		nv_wr32(priv, 0x104004 + (idx * 0x1000), 0x00000040);
+		stat &= ~0x00000040;
+	}
+
+	if (stat) {
+		nv_error(priv, "unhandled intr 0x%08x\n", stat);
+		nv_wr32(priv, 0x104004 + (idx * 0x1000), stat);
+	}
+
+	nouveau_engctx_put(engctx);
+}
+
+static int
+nvc0_copy0_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+		struct nouveau_oclass *oclass, void *data, u32 size,
+		struct nouveau_object **pobject)
+{
+	struct nvc0_copy_priv *priv;
+	int ret;
+
+	if (nv_rd32(parent, 0x022500) & 0x00000100)
+		return -ENODEV;
+
+	ret = nouveau_copy_create(parent, engine, oclass, true, 0, &priv);
+	*pobject = nv_object(priv);
+	if (ret)
+		return ret;
+
+	nv_subdev(priv)->unit = 0x00000040;
+	nv_subdev(priv)->intr = nvc0_copy_intr;
+	nv_engine(priv)->cclass = &nvc0_copy0_cclass;
+	nv_engine(priv)->sclass = nvc0_copy0_sclass;
+	return 0;
+}
+
+static int
+nvc0_copy1_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+		struct nouveau_oclass *oclass, void *data, u32 size,
+		struct nouveau_object **pobject)
+{
+	struct nvc0_copy_priv *priv;
+	int ret;
+
+	if (nv_rd32(parent, 0x022500) & 0x00000200)
+		return -ENODEV;
+
+	ret = nouveau_copy_create(parent, engine, oclass, true, 1, &priv);
+	*pobject = nv_object(priv);
+	if (ret)
+		return ret;
+
+	nv_subdev(priv)->unit = 0x00000080;
+	nv_subdev(priv)->intr = nvc0_copy_intr;
+	nv_engine(priv)->cclass = &nvc0_copy1_cclass;
+	nv_engine(priv)->sclass = nvc0_copy1_sclass;
+	return 0;
+}
+
+static int
+nvc0_copy_init(struct nouveau_object *object)
+{
+	int idx = nv_engidx(object) - NVDEV_ENGINE_COPY0;
+	struct nvc0_copy_priv *priv = (void *)object;
+	int ret, i;
+
+	ret = nouveau_copy_init(&priv->base);
+	if (ret)
+		return ret;
+
+	/* disable all interrupts */
+	nv_wr32(priv, 0x104014 + (idx * 0x1000), 0xffffffff);
+
+	/* upload ucode */
+	nv_wr32(priv, 0x1041c0 + (idx * 0x1000), 0x01000000);
+	for (i = 0; i < sizeof(nvc0_pcopy_data) / 4; i++)
+		nv_wr32(priv, 0x1041c4 + (idx * 0x1000), nvc0_pcopy_data[i]);
+
+	nv_wr32(priv, 0x104180 + (idx * 0x1000), 0x01000000);
+	for (i = 0; i < sizeof(nvc0_pcopy_code) / 4; i++) {
+		if ((i & 0x3f) == 0)
+			nv_wr32(priv, 0x104188 + (idx * 0x1000), i >> 6);
+		nv_wr32(priv, 0x104184 + (idx * 0x1000), nvc0_pcopy_code[i]);
+	}
+
+	/* start it running */
+	nv_wr32(priv, 0x104084 + (idx * 0x1000), idx);
+	nv_wr32(priv, 0x10410c + (idx * 0x1000), 0x00000000);
+	nv_wr32(priv, 0x104104 + (idx * 0x1000), 0x00000000); /* ENTRY */
+	nv_wr32(priv, 0x104100 + (idx * 0x1000), 0x00000002); /* TRIGGER */
+	return 0;
+}
+
+static int
+nvc0_copy_fini(struct nouveau_object *object, bool suspend)
+{
+	int idx = nv_engidx(object) - NVDEV_ENGINE_COPY0;
+	struct nvc0_copy_priv *priv = (void *)object;
+
+	nv_mask(priv, 0x104048 + (idx * 0x1000), 0x00000003, 0x00000000);
+	nv_wr32(priv, 0x104014 + (idx * 0x1000), 0xffffffff);
+
+	return nouveau_copy_fini(&priv->base, suspend);
+}
+
+struct nouveau_oclass
+nvc0_copy0_oclass = {
+	.handle = NV_ENGINE(COPY0, 0xc0),
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nvc0_copy0_ctor,
+		.dtor = _nouveau_copy_dtor,
+		.init = nvc0_copy_init,
+		.fini = nvc0_copy_fini,
+	},
+};
+
+struct nouveau_oclass
+nvc0_copy1_oclass = {
+	.handle = NV_ENGINE(COPY1, 0xc0),
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nvc0_copy1_ctor,
+		.dtor = _nouveau_copy_dtor,
+		.init = nvc0_copy_init,
+		.fini = nvc0_copy_fini,
+	},
+};
diff --git a/drivers/gpu/drm/nouveau/core/engine/copy/nve0.c b/drivers/gpu/drm/nouveau/core/engine/copy/nve0.c
new file mode 100644
index 000000000000..2017c1579ac5
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/copy/nve0.c
@@ -0,0 +1,156 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <core/os.h>
+#include <core/enum.h>
+#include <core/class.h>
+#include <core/engctx.h>
+
+#include <engine/copy.h>
+
+struct nve0_copy_priv {
+	struct nouveau_copy base;
+};
+
+struct nve0_copy_chan {
+	struct nouveau_copy_chan base;
+};
+
+/*******************************************************************************
+ * Copy object classes
+ ******************************************************************************/
+
+static struct nouveau_oclass
+nve0_copy_sclass[] = {
+	{ 0xa0b5, &nouveau_object_ofuncs },
+	{},
+};
+
+/*******************************************************************************
+ * PCOPY context
+ ******************************************************************************/
+
+static int
+nve0_copy_context_ctor(struct nouveau_object *parent,
+		       struct nouveau_object *engine,
+		       struct nouveau_oclass *oclass, void *data, u32 size,
+		       struct nouveau_object **pobject)
+{
+	struct nve0_copy_chan *priv;
+	int ret;
+
+	ret = nouveau_copy_context_create(parent, engine, oclass, NULL, 256,
+					  256, NVOBJ_FLAG_ZERO_ALLOC, &priv);
+	*pobject = nv_object(priv);
+	if (ret)
+		return ret;
+
+	return 0;
+}
+
+static struct nouveau_ofuncs
+nve0_copy_context_ofuncs = {
+	.ctor = nve0_copy_context_ctor,
+	.dtor = _nouveau_copy_context_dtor,
+	.init = _nouveau_copy_context_init,
+	.fini = _nouveau_copy_context_fini,
+	.rd32 = _nouveau_copy_context_rd32,
+	.wr32 = _nouveau_copy_context_wr32,
+};
+
+static struct nouveau_oclass
+nve0_copy_cclass = {
+	.handle = NV_ENGCTX(COPY0, 0xc0),
+	.ofuncs = &nve0_copy_context_ofuncs,
+};
+
+/*******************************************************************************
+ * PCOPY engine/subdev functions
+ ******************************************************************************/
+
+static int
+nve0_copy0_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+		struct nouveau_oclass *oclass, void *data, u32 size,
+		struct nouveau_object **pobject)
+{
+	struct nve0_copy_priv *priv;
+	int ret;
+
+	if (nv_rd32(parent, 0x022500) & 0x00000100)
+		return -ENODEV;
+
+	ret = nouveau_copy_create(parent, engine, oclass, true, 0, &priv);
+	*pobject = nv_object(priv);
+	if (ret)
+		return ret;
+
+	nv_subdev(priv)->unit = 0x00000040;
+	nv_engine(priv)->cclass = &nve0_copy_cclass;
+	nv_engine(priv)->sclass = nve0_copy_sclass;
+	return 0;
+}
+
+static int
+nve0_copy1_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+		struct nouveau_oclass *oclass, void *data, u32 size,
+		struct nouveau_object **pobject)
+{
+	struct nve0_copy_priv *priv;
+	int ret;
+
+	if (nv_rd32(parent, 0x022500) & 0x00000200)
+		return -ENODEV;
+
+	ret = nouveau_copy_create(parent, engine, oclass, true, 1, &priv);
+	*pobject = nv_object(priv);
+	if (ret)
+		return ret;
+
+	nv_subdev(priv)->unit = 0x00000080;
+	nv_engine(priv)->cclass = &nve0_copy_cclass;
+	nv_engine(priv)->sclass = nve0_copy_sclass;
+	return 0;
+}
+
+struct nouveau_oclass
+nve0_copy0_oclass = {
+	.handle = NV_ENGINE(COPY0, 0xe0),
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nve0_copy0_ctor,
+		.dtor = _nouveau_copy_dtor,
+		.init = _nouveau_copy_init,
+		.fini = _nouveau_copy_fini,
+	},
+};
+
+struct nouveau_oclass
+nve0_copy1_oclass = {
+	.handle = NV_ENGINE(COPY1, 0xe0),
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nve0_copy1_ctor,
+		.dtor = _nouveau_copy_dtor,
+		.init = _nouveau_copy_init,
+		.fini = _nouveau_copy_fini,
+	},
+};
diff --git a/drivers/gpu/drm/nouveau/nv98_crypt.fuc b/drivers/gpu/drm/nouveau/core/engine/crypt/fuc/nv98.fuc
index 7393813044de..629da02dc352 100644
--- a/drivers/gpu/drm/nouveau/nv98_crypt.fuc
+++ b/drivers/gpu/drm/nouveau/core/engine/crypt/fuc/nv98.fuc
@@ -238,7 +238,7 @@ ih:
 			cmpu b32 $r4 0x60+#dma_count
 			bra nc #illegal_mthd
 			shl b32 $r5 $r4 2
-			add b32 $r5 (#ctx_dma - 0x60 * 4) & 0xffff
+			add b32 $r5 ((#ctx_dma - 0x60 * 4) & 0xffff)
 			bset $r3 0x1e
 			st b32 D[$r5] $r3
 			add b32 $r4 0x180 - 0x60
diff --git a/drivers/gpu/drm/nouveau/nv98_crypt.fuc.h b/drivers/gpu/drm/nouveau/core/engine/crypt/fuc/nv98.fuc.h
index 38676c74e6e0..09962e4210e9 100644
--- a/drivers/gpu/drm/nouveau/nv98_crypt.fuc.h
+++ b/drivers/gpu/drm/nouveau/core/engine/crypt/fuc/nv98.fuc.h
@@ -1,4 +1,4 @@
-uint32_t nv98_pcrypt_data[] = {
+static uint32_t nv98_pcrypt_data[] = {
 /* 0x0000: ctx_dma */
 /* 0x0000: ctx_dma_query */
 	0x00000000,
@@ -150,7 +150,7 @@ uint32_t nv98_pcrypt_data[] = {
 	0x00000000,
 };
 
-uint32_t nv98_pcrypt_code[] = {
+static uint32_t nv98_pcrypt_code[] = {
 	0x17f004bd,
 	0x0010fe35,
 	0xf10004fe,
diff --git a/drivers/gpu/drm/nouveau/core/engine/crypt/nv84.c b/drivers/gpu/drm/nouveau/core/engine/crypt/nv84.c
new file mode 100644
index 000000000000..1d85e5b66ca0
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/crypt/nv84.c
@@ -0,0 +1,217 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <core/os.h>
+#include <core/enum.h>
+#include <core/class.h>
+#include <core/engctx.h>
+#include <core/gpuobj.h>
+
+#include <subdev/fb.h>
+
+#include <engine/fifo.h>
+#include <engine/crypt.h>
+
+struct nv84_crypt_priv {
+	struct nouveau_crypt base;
+};
+
+struct nv84_crypt_chan {
+	struct nouveau_crypt_chan base;
+};
+
+/*******************************************************************************
+ * Crypt object classes
+ ******************************************************************************/
+
+static int
+nv84_crypt_object_ctor(struct nouveau_object *parent,
+		       struct nouveau_object *engine,
+		       struct nouveau_oclass *oclass, void *data, u32 size,
+		       struct nouveau_object **pobject)
+{
+	struct nouveau_gpuobj *obj;
+	int ret;
+
+	ret = nouveau_gpuobj_create(parent, engine, oclass, 0, parent,
+				    16, 16, 0, &obj);
+	*pobject = nv_object(obj);
+	if (ret)
+		return ret;
+
+	nv_wo32(obj, 0x00, nv_mclass(obj));
+	nv_wo32(obj, 0x04, 0x00000000);
+	nv_wo32(obj, 0x08, 0x00000000);
+	nv_wo32(obj, 0x0c, 0x00000000);
+	return 0;
+}
+
+static struct nouveau_ofuncs
+nv84_crypt_ofuncs = {
+	.ctor = nv84_crypt_object_ctor,
+	.dtor = _nouveau_gpuobj_dtor,
+	.init = _nouveau_gpuobj_init,
+	.fini = _nouveau_gpuobj_fini,
+	.rd32 = _nouveau_gpuobj_rd32,
+	.wr32 = _nouveau_gpuobj_wr32,
+};
+
+static struct nouveau_oclass
+nv84_crypt_sclass[] = {
+	{ 0x74c1, &nv84_crypt_ofuncs },
+	{}
+};
+
+/*******************************************************************************
+ * PCRYPT context
+ ******************************************************************************/
+
+static int
+nv84_crypt_context_ctor(struct nouveau_object *parent,
+			struct nouveau_object *engine,
+			struct nouveau_oclass *oclass, void *data, u32 size,
+			struct nouveau_object **pobject)
+{
+	struct nv84_crypt_chan *priv;
+	int ret;
+
+	ret = nouveau_crypt_context_create(parent, engine, oclass, NULL, 256,
+					   0, NVOBJ_FLAG_ZERO_ALLOC, &priv);
+	*pobject = nv_object(priv);
+	if (ret)
+		return ret;
+
+	return 0;
+}
+
+static struct nouveau_oclass
+nv84_crypt_cclass = {
+	.handle = NV_ENGCTX(CRYPT, 0x84),
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nv84_crypt_context_ctor,
+		.dtor = _nouveau_crypt_context_dtor,
+		.init = _nouveau_crypt_context_init,
+		.fini = _nouveau_crypt_context_fini,
+		.rd32 = _nouveau_crypt_context_rd32,
+		.wr32 = _nouveau_crypt_context_wr32,
+	},
+};
+
+/*******************************************************************************
+ * PCRYPT engine/subdev functions
+ ******************************************************************************/
+
+static const struct nouveau_bitfield nv84_crypt_intr_mask[] = {
+	{ 0x00000001, "INVALID_STATE" },
+	{ 0x00000002, "ILLEGAL_MTHD" },
+	{ 0x00000004, "ILLEGAL_CLASS" },
+	{ 0x00000080, "QUERY" },
+	{ 0x00000100, "FAULT" },
+	{}
+};
+
+static void
+nv84_crypt_intr(struct nouveau_subdev *subdev)
+{
+	struct nouveau_fifo *pfifo = nouveau_fifo(subdev);
+	struct nouveau_engine *engine = nv_engine(subdev);
+	struct nouveau_object *engctx;
+	struct nv84_crypt_priv *priv = (void *)subdev;
+	u32 stat = nv_rd32(priv, 0x102130);
+	u32 mthd = nv_rd32(priv, 0x102190);
+	u32 data = nv_rd32(priv, 0x102194);
+	u32 inst = nv_rd32(priv, 0x102188) & 0x7fffffff;
+	int chid;
+
+	engctx = nouveau_engctx_get(engine, inst);
+	chid   = pfifo->chid(pfifo, engctx);
+
+	if (stat) {
+		nv_error(priv, "");
+		nouveau_bitfield_print(nv84_crypt_intr_mask, stat);
+		printk(" ch %d [0x%010llx] mthd 0x%04x data 0x%08x\n",
+		       chid, (u64)inst << 12, mthd, data);
+	}
+
+	nv_wr32(priv, 0x102130, stat);
+	nv_wr32(priv, 0x10200c, 0x10);
+
+	nv50_fb_trap(nouveau_fb(priv), 1);
+	nouveau_engctx_put(engctx);
+}
+
+static int
+nv84_crypt_tlb_flush(struct nouveau_engine *engine)
+{
+	nv50_vm_flush_engine(&engine->base, 0x0a);
+	return 0;
+}
+
+static int
+nv84_crypt_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+	       struct nouveau_oclass *oclass, void *data, u32 size,
+	       struct nouveau_object **pobject)
+{
+	struct nv84_crypt_priv *priv;
+	int ret;
+
+	ret = nouveau_crypt_create(parent, engine, oclass, &priv);
+	*pobject = nv_object(priv);
+	if (ret)
+		return ret;
+
+	nv_subdev(priv)->unit = 0x00004000;
+	nv_subdev(priv)->intr = nv84_crypt_intr;
+	nv_engine(priv)->cclass = &nv84_crypt_cclass;
+	nv_engine(priv)->sclass = nv84_crypt_sclass;
+	nv_engine(priv)->tlb_flush = nv84_crypt_tlb_flush;
+	return 0;
+}
+
+static int
+nv84_crypt_init(struct nouveau_object *object)
+{
+	struct nv84_crypt_priv *priv = (void *)object;
+	int ret;
+
+	ret = nouveau_crypt_init(&priv->base);
+	if (ret)
+		return ret;
+
+	nv_wr32(priv, 0x102130, 0xffffffff);
+	nv_wr32(priv, 0x102140, 0xffffffbf);
+	nv_wr32(priv, 0x10200c, 0x00000010);
+	return 0;
+}
+
+struct nouveau_oclass
+nv84_crypt_oclass = {
+	.handle = NV_ENGINE(CRYPT, 0x84),
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nv84_crypt_ctor,
+		.dtor = _nouveau_crypt_dtor,
+		.init = nv84_crypt_init,
+		.fini = _nouveau_crypt_fini,
+	},
+};
diff --git a/drivers/gpu/drm/nouveau/core/engine/crypt/nv98.c b/drivers/gpu/drm/nouveau/core/engine/crypt/nv98.c
new file mode 100644
index 000000000000..9e3876c89b96
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/crypt/nv98.c
@@ -0,0 +1,208 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <core/os.h>
+#include <core/enum.h>
+#include <core/class.h>
+#include <core/engctx.h>
+
+#include <subdev/timer.h>
+#include <subdev/fb.h>
+
+#include <engine/fifo.h>
+#include <engine/crypt.h>
+
+#include "fuc/nv98.fuc.h"
+
+struct nv98_crypt_priv {
+	struct nouveau_crypt base;
+};
+
+struct nv98_crypt_chan {
+	struct nouveau_crypt_chan base;
+};
+
+/*******************************************************************************
+ * Crypt object classes
+ ******************************************************************************/
+
+static struct nouveau_oclass
+nv98_crypt_sclass[] = {
+	{ 0x88b4, &nouveau_object_ofuncs },
+	{},
+};
+
+/*******************************************************************************
+ * PCRYPT context
+ ******************************************************************************/
+
+static int
+nv98_crypt_context_ctor(struct nouveau_object *parent,
+			struct nouveau_object *engine,
+			struct nouveau_oclass *oclass, void *data, u32 size,
+			struct nouveau_object **pobject)
+{
+	struct nv98_crypt_chan *priv;
+	int ret;
+
+	ret = nouveau_crypt_context_create(parent, engine, oclass, NULL, 256,
+					   256, NVOBJ_FLAG_ZERO_ALLOC, &priv);
+	*pobject = nv_object(priv);
+	if (ret)
+		return ret;
+
+	return 0;
+}
+
+static struct nouveau_oclass
+nv98_crypt_cclass = {
+	.handle = NV_ENGCTX(CRYPT, 0x98),
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nv98_crypt_context_ctor,
+		.dtor = _nouveau_crypt_context_dtor,
+		.init = _nouveau_crypt_context_init,
+		.fini = _nouveau_crypt_context_fini,
+		.rd32 = _nouveau_crypt_context_rd32,
+		.wr32 = _nouveau_crypt_context_wr32,
+	},
+};
+
+/*******************************************************************************
+ * PCRYPT engine/subdev functions
+ ******************************************************************************/
+
+static const struct nouveau_enum nv98_crypt_isr_error_name[] = {
+	{ 0x0000, "ILLEGAL_MTHD" },
+	{ 0x0001, "INVALID_BITFIELD" },
+	{ 0x0002, "INVALID_ENUM" },
+	{ 0x0003, "QUERY" },
+	{}
+};
+
+static void
+nv98_crypt_intr(struct nouveau_subdev *subdev)
+{
+	struct nouveau_fifo *pfifo = nouveau_fifo(subdev);
+	struct nouveau_engine *engine = nv_engine(subdev);
+	struct nouveau_object *engctx;
+	struct nv98_crypt_priv *priv = (void *)subdev;
+	u32 disp = nv_rd32(priv, 0x08701c);
+	u32 stat = nv_rd32(priv, 0x087008) & disp & ~(disp >> 16);
+	u32 inst = nv_rd32(priv, 0x087050) & 0x3fffffff;
+	u32 ssta = nv_rd32(priv, 0x087040) & 0x0000ffff;
+	u32 addr = nv_rd32(priv, 0x087040) >> 16;
+	u32 mthd = (addr & 0x07ff) << 2;
+	u32 subc = (addr & 0x3800) >> 11;
+	u32 data = nv_rd32(priv, 0x087044);
+	int chid;
+
+	engctx = nouveau_engctx_get(engine, inst);
+	chid   = pfifo->chid(pfifo, engctx);
+
+	if (stat & 0x00000040) {
+		nv_error(priv, "DISPATCH_ERROR [");
+		nouveau_enum_print(nv98_crypt_isr_error_name, ssta);
+		printk("] ch %d [0x%010llx] subc %d mthd 0x%04x data 0x%08x\n",
+		       chid, (u64)inst << 12, subc, mthd, data);
+		nv_wr32(priv, 0x087004, 0x00000040);
+		stat &= ~0x00000040;
+	}
+
+	if (stat) {
+		nv_error(priv, "unhandled intr 0x%08x\n", stat);
+		nv_wr32(priv, 0x087004, stat);
+	}
+
+	nv50_fb_trap(nouveau_fb(priv), 1);
+	nouveau_engctx_put(engctx);
+}
+
+static int
+nv98_crypt_tlb_flush(struct nouveau_engine *engine)
+{
+	nv50_vm_flush_engine(&engine->base, 0x0a);
+	return 0;
+}
+
+static int
+nv98_crypt_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+	       struct nouveau_oclass *oclass, void *data, u32 size,
+	       struct nouveau_object **pobject)
+{
+	struct nv98_crypt_priv *priv;
+	int ret;
+
+	ret = nouveau_crypt_create(parent, engine, oclass, &priv);
+	*pobject = nv_object(priv);
+	if (ret)
+		return ret;
+
+	nv_subdev(priv)->unit = 0x00004000;
+	nv_subdev(priv)->intr = nv98_crypt_intr;
+	nv_engine(priv)->cclass = &nv98_crypt_cclass;
+	nv_engine(priv)->sclass = nv98_crypt_sclass;
+	nv_engine(priv)->tlb_flush = nv98_crypt_tlb_flush;
+	return 0;
+}
+
+static int
+nv98_crypt_init(struct nouveau_object *object)
+{
+	struct nv98_crypt_priv *priv = (void *)object;
+	int ret, i;
+
+	ret = nouveau_crypt_init(&priv->base);
+	if (ret)
+		return ret;
+
+	/* wait for exit interrupt to signal */
+	nv_wait(priv, 0x087008, 0x00000010, 0x00000010);
+	nv_wr32(priv, 0x087004, 0x00000010);
+
+	/* upload microcode code and data segments */
+	nv_wr32(priv, 0x087ff8, 0x00100000);
+	for (i = 0; i < ARRAY_SIZE(nv98_pcrypt_code); i++)
+		nv_wr32(priv, 0x087ff4, nv98_pcrypt_code[i]);
+
+	nv_wr32(priv, 0x087ff8, 0x00000000);
+	for (i = 0; i < ARRAY_SIZE(nv98_pcrypt_data); i++)
+		nv_wr32(priv, 0x087ff4, nv98_pcrypt_data[i]);
+
+	/* start it running */
+	nv_wr32(priv, 0x08710c, 0x00000000);
+	nv_wr32(priv, 0x087104, 0x00000000); /* ENTRY */
+	nv_wr32(priv, 0x087100, 0x00000002); /* TRIGGER */
+	return 0;
+}
+
+struct nouveau_oclass
+nv98_crypt_oclass = {
+	.handle = NV_ENGINE(CRYPT, 0x98),
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nv98_crypt_ctor,
+		.dtor = _nouveau_crypt_dtor,
+		.init = nv98_crypt_init,
+		.fini = _nouveau_crypt_fini,
+	},
+};
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nv04.c b/drivers/gpu/drm/nouveau/core/engine/disp/nv04.c
new file mode 100644
index 000000000000..1c919f2af89f
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/nv04.c
@@ -0,0 +1,90 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <engine/disp.h>
+
+struct nv04_disp_priv {
+	struct nouveau_disp base;
+};
+
+static struct nouveau_oclass
+nv04_disp_sclass[] = {
+	{},
+};
+
+static void
+nv04_disp_intr_vblank(struct nv04_disp_priv *priv, int crtc)
+{
+	struct nouveau_disp *disp = &priv->base;
+	if (disp->vblank.notify)
+		disp->vblank.notify(disp->vblank.data, crtc);
+}
+
+static void
+nv04_disp_intr(struct nouveau_subdev *subdev)
+{
+	struct nv04_disp_priv *priv = (void *)subdev;
+	u32 crtc0 = nv_rd32(priv, 0x600100);
+	u32 crtc1 = nv_rd32(priv, 0x602100);
+
+	if (crtc0 & 0x00000001) {
+		nv04_disp_intr_vblank(priv, 0);
+		nv_wr32(priv, 0x600100, 0x00000001);
+	}
+
+	if (crtc1 & 0x00000001) {
+		nv04_disp_intr_vblank(priv, 1);
+		nv_wr32(priv, 0x602100, 0x00000001);
+	}
+}
+
+static int
+nv04_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+		  struct nouveau_oclass *oclass, void *data, u32 size,
+		  struct nouveau_object **pobject)
+{
+	struct nv04_disp_priv *priv;
+	int ret;
+
+	ret = nouveau_disp_create(parent, engine, oclass, "DISPLAY",
+				  "display", &priv);
+	*pobject = nv_object(priv);
+	if (ret)
+		return ret;
+
+	nv_engine(priv)->sclass = nv04_disp_sclass;
+	nv_subdev(priv)->intr = nv04_disp_intr;
+	return 0;
+}
+
+struct nouveau_oclass
+nv04_disp_oclass = {
+	.handle = NV_ENGINE(DISP, 0x04),
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nv04_disp_ctor,
+		.dtor = _nouveau_disp_dtor,
+		.init = _nouveau_disp_init,
+		.fini = _nouveau_disp_fini,
+	},
+};
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nv50.c b/drivers/gpu/drm/nouveau/core/engine/disp/nv50.c
new file mode 100644
index 000000000000..16a9afb1060b
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/nv50.c
@@ -0,0 +1,125 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <engine/software.h>
+#include <engine/disp.h>
+
+struct nv50_disp_priv {
+	struct nouveau_disp base;
+};
+
+static struct nouveau_oclass
+nv50_disp_sclass[] = {
+	{},
+};
+
+static void
+nv50_disp_intr_vblank(struct nv50_disp_priv *priv, int crtc)
+{
+	struct nouveau_disp *disp = &priv->base;
+	struct nouveau_software_chan *chan, *temp;
+	unsigned long flags;
+
+	spin_lock_irqsave(&disp->vblank.lock, flags);
+	list_for_each_entry_safe(chan, temp, &disp->vblank.list, vblank.head) {
+		if (chan->vblank.crtc != crtc)
+			continue;
+
+		nv_wr32(priv, 0x001704, chan->vblank.channel);
+		nv_wr32(priv, 0x001710, 0x80000000 | chan->vblank.ctxdma);
+
+		if (nv_device(priv)->chipset == 0x50) {
+			nv_wr32(priv, 0x001570, chan->vblank.offset);
+			nv_wr32(priv, 0x001574, chan->vblank.value);
+		} else {
+			if (nv_device(priv)->chipset >= 0xc0) {
+				nv_wr32(priv, 0x06000c,
+					upper_32_bits(chan->vblank.offset));
+			}
+			nv_wr32(priv, 0x060010, chan->vblank.offset);
+			nv_wr32(priv, 0x060014, chan->vblank.value);
+		}
+
+		list_del(&chan->vblank.head);
+		if (disp->vblank.put)
+			disp->vblank.put(disp->vblank.data, crtc);
+	}
+	spin_unlock_irqrestore(&disp->vblank.lock, flags);
+
+	if (disp->vblank.notify)
+		disp->vblank.notify(disp->vblank.data, crtc);
+}
+
+static void
+nv50_disp_intr(struct nouveau_subdev *subdev)
+{
+	struct nv50_disp_priv *priv = (void *)subdev;
+	u32 stat1 = nv_rd32(priv, 0x610024);
+
+	if (stat1 & 0x00000004) {
+		nv50_disp_intr_vblank(priv, 0);
+		nv_wr32(priv, 0x610024, 0x00000004);
+		stat1 &= ~0x00000004;
+	}
+
+	if (stat1 & 0x00000008) {
+		nv50_disp_intr_vblank(priv, 1);
+		nv_wr32(priv, 0x610024, 0x00000008);
+		stat1 &= ~0x00000008;
+	}
+
+}
+
+static int
+nv50_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+		  struct nouveau_oclass *oclass, void *data, u32 size,
+		  struct nouveau_object **pobject)
+{
+	struct nv50_disp_priv *priv;
+	int ret;
+
+	ret = nouveau_disp_create(parent, engine, oclass, "PDISP",
+				  "display", &priv);
+	*pobject = nv_object(priv);
+	if (ret)
+		return ret;
+
+	nv_engine(priv)->sclass = nv50_disp_sclass;
+	nv_subdev(priv)->intr = nv50_disp_intr;
+
+	INIT_LIST_HEAD(&priv->base.vblank.list);
+	spin_lock_init(&priv->base.vblank.lock);
+	return 0;
+}
+
+struct nouveau_oclass
+nv50_disp_oclass = {
+	.handle = NV_ENGINE(DISP, 0x50),
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nv50_disp_ctor,
+		.dtor = _nouveau_disp_dtor,
+		.init = _nouveau_disp_init,
+		.fini = _nouveau_disp_fini,
+	},
+};
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nvd0.c b/drivers/gpu/drm/nouveau/core/engine/disp/nvd0.c
new file mode 100644
index 000000000000..d93efbcf75b8
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/nvd0.c
@@ -0,0 +1,118 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <subdev/bar.h>
+
+#include <engine/software.h>
+#include <engine/disp.h>
+
+struct nvd0_disp_priv {
+	struct nouveau_disp base;
+};
+
+static struct nouveau_oclass
+nvd0_disp_sclass[] = {
+	{},
+};
+
+static void
+nvd0_disp_intr_vblank(struct nvd0_disp_priv *priv, int crtc)
+{
+	struct nouveau_bar *bar = nouveau_bar(priv);
+	struct nouveau_disp *disp = &priv->base;
+	struct nouveau_software_chan *chan, *temp;
+	unsigned long flags;
+
+	spin_lock_irqsave(&disp->vblank.lock, flags);
+	list_for_each_entry_safe(chan, temp, &disp->vblank.list, vblank.head) {
+		if (chan->vblank.crtc != crtc)
+			continue;
+
+		nv_wr32(priv, 0x001718, 0x80000000 | chan->vblank.channel);
+		bar->flush(bar);
+		nv_wr32(priv, 0x06000c, upper_32_bits(chan->vblank.offset));
+		nv_wr32(priv, 0x060010, lower_32_bits(chan->vblank.offset));
+		nv_wr32(priv, 0x060014, chan->vblank.value);
+
+		list_del(&chan->vblank.head);
+		if (disp->vblank.put)
+			disp->vblank.put(disp->vblank.data, crtc);
+	}
+	spin_unlock_irqrestore(&disp->vblank.lock, flags);
+
+	if (disp->vblank.notify)
+		disp->vblank.notify(disp->vblank.data, crtc);
+}
+
+static void
+nvd0_disp_intr(struct nouveau_subdev *subdev)
+{
+	struct nvd0_disp_priv *priv = (void *)subdev;
+	u32 intr = nv_rd32(priv, 0x610088);
+	int i;
+
+	for (i = 0; i < 4; i++) {
+		u32 mask = 0x01000000 << i;
+		if (mask & intr) {
+			u32 stat = nv_rd32(priv, 0x6100bc + (i * 0x800));
+			if (stat & 0x00000001)
+				nvd0_disp_intr_vblank(priv, i);
+			nv_mask(priv, 0x6100bc + (i * 0x800), 0, 0);
+			nv_rd32(priv, 0x6100c0 + (i * 0x800));
+		}
+	}
+}
+
+static int
+nvd0_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+		  struct nouveau_oclass *oclass, void *data, u32 size,
+		  struct nouveau_object **pobject)
+{
+	struct nvd0_disp_priv *priv;
+	int ret;
+
+	ret = nouveau_disp_create(parent, engine, oclass, "PDISP",
+				  "display", &priv);
+	*pobject = nv_object(priv);
+	if (ret)
+		return ret;
+
+	nv_engine(priv)->sclass = nvd0_disp_sclass;
+	nv_subdev(priv)->intr = nvd0_disp_intr;
+
+	INIT_LIST_HEAD(&priv->base.vblank.list);
+	spin_lock_init(&priv->base.vblank.lock);
+	return 0;
+}
+
+struct nouveau_oclass
+nvd0_disp_oclass = {
+	.handle = NV_ENGINE(DISP, 0xd0),
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nvd0_disp_ctor,
+		.dtor = _nouveau_disp_dtor,
+		.init = _nouveau_disp_init,
+		.fini = _nouveau_disp_fini,
+	},
+};
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/vga.c b/drivers/gpu/drm/nouveau/core/engine/disp/vga.c
new file mode 100644
index 000000000000..5a1c68474597
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/vga.c
@@ -0,0 +1,215 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <core/subdev.h>
+#include <core/device.h>
+#include <subdev/vga.h>
+
+u8
+nv_rdport(void *obj, int head, u16 port)
+{
+	struct nouveau_device *device = nv_device(obj);
+
+	if (device->card_type >= NV_50)
+		return nv_rd08(obj, 0x601000 + port);
+
+	if (port == 0x03c0 || port == 0x03c1 ||	/* AR */
+	    port == 0x03c2 || port == 0x03da ||	/* INP0 */
+	    port == 0x03d4 || port == 0x03d5)	/* CR */
+		return nv_rd08(obj, 0x601000 + (head * 0x2000) + port);
+
+	if (port == 0x03c2 || port == 0x03cc ||	/* MISC */
+	    port == 0x03c4 || port == 0x03c5 ||	/* SR */
+	    port == 0x03ce || port == 0x03cf) {	/* GR */
+		if (device->card_type < NV_40)
+			head = 0; /* CR44 selects head */
+		return nv_rd08(obj, 0x0c0000 + (head * 0x2000) + port);
+	}
+
+	nv_error(obj, "unknown vga port 0x%04x\n", port);
+	return 0x00;
+}
+
+void
+nv_wrport(void *obj, int head, u16 port, u8 data)
+{
+	struct nouveau_device *device = nv_device(obj);
+
+	if (device->card_type >= NV_50)
+		nv_wr08(obj, 0x601000 + port, data);
+	else
+	if (port == 0x03c0 || port == 0x03c1 ||	/* AR */
+	    port == 0x03c2 || port == 0x03da ||	/* INP0 */
+	    port == 0x03d4 || port == 0x03d5)	/* CR */
+		nv_wr08(obj, 0x601000 + (head * 0x2000) + port, data);
+	else
+	if (port == 0x03c2 || port == 0x03cc ||	/* MISC */
+	    port == 0x03c4 || port == 0x03c5 ||	/* SR */
+	    port == 0x03ce || port == 0x03cf) {	/* GR */
+		if (device->card_type < NV_40)
+			head = 0; /* CR44 selects head */
+		nv_wr08(obj, 0x0c0000 + (head * 0x2000) + port, data);
+	} else
+		nv_error(obj, "unknown vga port 0x%04x\n", port);
+}
+
+u8
+nv_rdvgas(void *obj, int head, u8 index)
+{
+	nv_wrport(obj, head, 0x03c4, index);
+	return nv_rdport(obj, head, 0x03c5);
+}
+
+void
+nv_wrvgas(void *obj, int head, u8 index, u8 value)
+{
+	nv_wrport(obj, head, 0x03c4, index);
+	nv_wrport(obj, head, 0x03c5, value);
+}
+
+u8
+nv_rdvgag(void *obj, int head, u8 index)
+{
+	nv_wrport(obj, head, 0x03ce, index);
+	return nv_rdport(obj, head, 0x03cf);
+}
+
+void
+nv_wrvgag(void *obj, int head, u8 index, u8 value)
+{
+	nv_wrport(obj, head, 0x03ce, index);
+	nv_wrport(obj, head, 0x03cf, value);
+}
+
+u8
+nv_rdvgac(void *obj, int head, u8 index)
+{
+	nv_wrport(obj, head, 0x03d4, index);
+	return nv_rdport(obj, head, 0x03d5);
+}
+
+void
+nv_wrvgac(void *obj, int head, u8 index, u8 value)
+{
+	nv_wrport(obj, head, 0x03d4, index);
+	nv_wrport(obj, head, 0x03d5, value);
+}
+
+u8
+nv_rdvgai(void *obj, int head, u16 port, u8 index)
+{
+	if (port == 0x03c4) return nv_rdvgas(obj, head, index);
+	if (port == 0x03ce) return nv_rdvgag(obj, head, index);
+	if (port == 0x03d4) return nv_rdvgac(obj, head, index);
+	nv_error(obj, "unknown indexed vga port 0x%04x\n", port);
+	return 0x00;
+}
+
+void
+nv_wrvgai(void *obj, int head, u16 port, u8 index, u8 value)
+{
+	if      (port == 0x03c4) nv_wrvgas(obj, head, index, value);
+	else if (port == 0x03ce) nv_wrvgag(obj, head, index, value);
+	else if (port == 0x03d4) nv_wrvgac(obj, head, index, value);
+	else nv_error(obj, "unknown indexed vga port 0x%04x\n", port);
+}
+
+bool
+nv_lockvgac(void *obj, bool lock)
+{
+	bool locked = !nv_rdvgac(obj, 0, 0x1f);
+	u8 data = lock ? 0x99 : 0x57;
+	nv_wrvgac(obj, 0, 0x1f, data);
+	if (nv_device(obj)->chipset == 0x11) {
+		if (!(nv_rd32(obj, 0x001084) & 0x10000000))
+			nv_wrvgac(obj, 1, 0x1f, data);
+	}
+	return locked;
+}
+
+/* CR44 takes values 0 (head A), 3 (head B) and 4 (heads tied)
+ * it affects only the 8 bit vga io regs, which we access using mmio at
+ * 0xc{0,2}3c*, 0x60{1,3}3*, and 0x68{1,3}3d*
+ * in general, the set value of cr44 does not matter: reg access works as
+ * expected and values can be set for the appropriate head by using a 0x2000
+ * offset as required
+ * however:
+ * a) pre nv40, the head B range of PRMVIO regs at 0xc23c* was not exposed and
+ *    cr44 must be set to 0 or 3 for accessing values on the correct head
+ *    through the common 0xc03c* addresses
+ * b) in tied mode (4) head B is programmed to the values set on head A, and
+ *    access using the head B addresses can have strange results, ergo we leave
+ *    tied mode in init once we know to what cr44 should be restored on exit
+ *
+ * the owner parameter is slightly abused:
+ * 0 and 1 are treated as head values and so the set value is (owner * 3)
+ * other values are treated as literal values to set
+ */
+u8
+nv_rdvgaowner(void *obj)
+{
+	if (nv_device(obj)->card_type < NV_50) {
+		if (nv_device(obj)->chipset == 0x11) {
+			u32 tied = nv_rd32(obj, 0x001084) & 0x10000000;
+			if (tied == 0) {
+				u8 slA = nv_rdvgac(obj, 0, 0x28) & 0x80;
+				u8 tvA = nv_rdvgac(obj, 0, 0x33) & 0x01;
+				u8 slB = nv_rdvgac(obj, 1, 0x28) & 0x80;
+				u8 tvB = nv_rdvgac(obj, 1, 0x33) & 0x01;
+				if (slA && !tvA) return 0x00;
+				if (slB && !tvB) return 0x03;
+				if (slA) return 0x00;
+				if (slB) return 0x03;
+				return 0x00;
+			}
+			return 0x04;
+		}
+
+		return nv_rdvgac(obj, 0, 0x44);
+	}
+
+	nv_error(obj, "rdvgaowner after nv4x\n");
+	return 0x00;
+}
+
+void
+nv_wrvgaowner(void *obj, u8 select)
+{
+	if (nv_device(obj)->card_type < NV_50) {
+		u8 owner = (select == 1) ? 3 : select;
+		if (nv_device(obj)->chipset == 0x11) {
+			/* workaround hw lockup bug */
+			nv_rdvgac(obj, 0, 0x1f);
+			nv_rdvgac(obj, 1, 0x1f);
+		}
+
+		nv_wrvgac(obj, 0, 0x44, owner);
+
+		if (nv_device(obj)->chipset == 0x11) {
+			nv_wrvgac(obj, 0, 0x2e, owner);
+			nv_wrvgac(obj, 0, 0x2e, owner);
+		}
+	} else
+		nv_error(obj, "wrvgaowner after nv4x\n");
+}
diff --git a/drivers/gpu/drm/nouveau/core/engine/dmaobj/base.c b/drivers/gpu/drm/nouveau/core/engine/dmaobj/base.c
new file mode 100644
index 000000000000..e1f013d39768
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/dmaobj/base.c
@@ -0,0 +1,87 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <core/object.h>
+#include <core/class.h>
+
+#include <subdev/fb.h>
+#include <engine/dmaobj.h>
+
+int
+nouveau_dmaobj_create_(struct nouveau_object *parent,
+		       struct nouveau_object *engine,
+		       struct nouveau_oclass *oclass,
+		       void *data, u32 size, int len, void **pobject)
+{
+	struct nv_dma_class *args = data;
+	struct nouveau_dmaobj *object;
+	int ret;
+
+	if (size < sizeof(*args))
+		return -EINVAL;
+
+	ret = nouveau_object_create_(parent, engine, oclass, 0, len, pobject);
+	object = *pobject;
+	if (ret)
+		return ret;
+
+	switch (args->flags & NV_DMA_TARGET_MASK) {
+	case NV_DMA_TARGET_VM:
+		object->target = NV_MEM_TARGET_VM;
+		break;
+	case NV_DMA_TARGET_VRAM:
+		object->target = NV_MEM_TARGET_VRAM;
+		break;
+	case NV_DMA_TARGET_PCI:
+		object->target = NV_MEM_TARGET_PCI;
+		break;
+	case NV_DMA_TARGET_PCI_US:
+	case NV_DMA_TARGET_AGP:
+		object->target = NV_MEM_TARGET_PCI_NOSNOOP;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	switch (args->flags & NV_DMA_ACCESS_MASK) {
+	case NV_DMA_ACCESS_VM:
+		object->access = NV_MEM_ACCESS_VM;
+		break;
+	case NV_DMA_ACCESS_RD:
+		object->access = NV_MEM_ACCESS_RO;
+		break;
+	case NV_DMA_ACCESS_WR:
+		object->access = NV_MEM_ACCESS_WO;
+		break;
+	case NV_DMA_ACCESS_RDWR:
+		object->access = NV_MEM_ACCESS_RW;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	object->start = args->start;
+	object->limit = args->limit;
+	return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/core/engine/dmaobj/nv04.c b/drivers/gpu/drm/nouveau/core/engine/dmaobj/nv04.c
new file mode 100644
index 000000000000..9f4cc2f31994
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/dmaobj/nv04.c
@@ -0,0 +1,185 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <core/gpuobj.h>
+#include <core/class.h>
+
+#include <subdev/fb.h>
+#include <subdev/vm/nv04.h>
+
+#include <engine/dmaobj.h>
+
+struct nv04_dmaeng_priv {
+	struct nouveau_dmaeng base;
+};
+
+struct nv04_dmaobj_priv {
+	struct nouveau_dmaobj base;
+};
+
+static int
+nv04_dmaobj_bind(struct nouveau_dmaeng *dmaeng,
+		 struct nouveau_object *parent,
+		 struct nouveau_dmaobj *dmaobj,
+		 struct nouveau_gpuobj **pgpuobj)
+{
+	struct nv04_vmmgr_priv *vmm = nv04_vmmgr(dmaeng);
+	struct nouveau_gpuobj *gpuobj;
+	u32 flags0 = nv_mclass(dmaobj);
+	u32 flags2 = 0x00000000;
+	u64 offset = dmaobj->start & 0xfffff000;
+	u64 adjust = dmaobj->start & 0x00000fff;
+	u32 length = dmaobj->limit - dmaobj->start;
+	int ret;
+
+	if (dmaobj->target == NV_MEM_TARGET_VM) {
+		if (nv_object(vmm)->oclass == &nv04_vmmgr_oclass) {
+			struct nouveau_gpuobj *pgt = vmm->vm->pgt[0].obj[0];
+			if (!dmaobj->start)
+				return nouveau_gpuobj_dup(parent, pgt, pgpuobj);
+			offset  = nv_ro32(pgt, 8 + (offset >> 10));
+			offset &= 0xfffff000;
+		}
+
+		dmaobj->target = NV_MEM_TARGET_PCI;
+		dmaobj->access = NV_MEM_ACCESS_RW;
+	}
+
+	switch (dmaobj->target) {
+	case NV_MEM_TARGET_VRAM:
+		flags0 |= 0x00003000;
+		break;
+	case NV_MEM_TARGET_PCI:
+		flags0 |= 0x00023000;
+		break;
+	case NV_MEM_TARGET_PCI_NOSNOOP:
+		flags0 |= 0x00033000;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	switch (dmaobj->access) {
+	case NV_MEM_ACCESS_RO:
+		flags0 |= 0x00004000;
+		break;
+	case NV_MEM_ACCESS_WO:
+		flags0 |= 0x00008000;
+	case NV_MEM_ACCESS_RW:
+		flags2 |= 0x00000002;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	ret = nouveau_gpuobj_new(parent, parent, 16, 16, 0, &gpuobj);
+	*pgpuobj = gpuobj;
+	if (ret == 0) {
+		nv_wo32(*pgpuobj, 0x00, flags0 | (adjust << 20));
+		nv_wo32(*pgpuobj, 0x04, length);
+		nv_wo32(*pgpuobj, 0x08, flags2 | offset);
+		nv_wo32(*pgpuobj, 0x0c, flags2 | offset);
+	}
+
+	return ret;
+}
+
+static int
+nv04_dmaobj_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+		 struct nouveau_oclass *oclass, void *data, u32 size,
+		 struct nouveau_object **pobject)
+{
+	struct nouveau_dmaeng *dmaeng = (void *)engine;
+	struct nv04_dmaobj_priv *dmaobj;
+	struct nouveau_gpuobj *gpuobj;
+	int ret;
+
+	ret = nouveau_dmaobj_create(parent, engine, oclass,
+				    data, size, &dmaobj);
+	*pobject = nv_object(dmaobj);
+	if (ret)
+		return ret;
+
+	switch (nv_mclass(parent)) {
+	case NV_DEVICE_CLASS:
+		break;
+	case NV03_CHANNEL_DMA_CLASS:
+	case NV10_CHANNEL_DMA_CLASS:
+	case NV17_CHANNEL_DMA_CLASS:
+	case NV40_CHANNEL_DMA_CLASS:
+		ret = dmaeng->bind(dmaeng, *pobject, &dmaobj->base, &gpuobj);
+		nouveau_object_ref(NULL, pobject);
+		*pobject = nv_object(gpuobj);
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	return ret;
+}
+
+static struct nouveau_ofuncs
+nv04_dmaobj_ofuncs = {
+	.ctor = nv04_dmaobj_ctor,
+	.dtor = _nouveau_dmaobj_dtor,
+	.init = _nouveau_dmaobj_init,
+	.fini = _nouveau_dmaobj_fini,
+};
+
+static struct nouveau_oclass
+nv04_dmaobj_sclass[] = {
+	{ 0x0002, &nv04_dmaobj_ofuncs },
+	{ 0x0003, &nv04_dmaobj_ofuncs },
+	{ 0x003d, &nv04_dmaobj_ofuncs },
+	{}
+};
+
+static int
+nv04_dmaeng_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+		 struct nouveau_oclass *oclass, void *data, u32 size,
+		 struct nouveau_object **pobject)
+{
+	struct nv04_dmaeng_priv *priv;
+	int ret;
+
+	ret = nouveau_dmaeng_create(parent, engine, oclass, &priv);
+	*pobject = nv_object(priv);
+	if (ret)
+		return ret;
+
+	priv->base.base.sclass = nv04_dmaobj_sclass;
+	priv->base.bind = nv04_dmaobj_bind;
+	return 0;
+}
+
+struct nouveau_oclass
+nv04_dmaeng_oclass = {
+	.handle = NV_ENGINE(DMAOBJ, 0x04),
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nv04_dmaeng_ctor,
+		.dtor = _nouveau_dmaeng_dtor,
+		.init = _nouveau_dmaeng_init,
+		.fini = _nouveau_dmaeng_fini,
+	},
+};
diff --git a/drivers/gpu/drm/nouveau/core/engine/dmaobj/nv50.c b/drivers/gpu/drm/nouveau/core/engine/dmaobj/nv50.c
new file mode 100644
index 000000000000..045d2565e289
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/dmaobj/nv50.c
@@ -0,0 +1,173 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <core/gpuobj.h>
+#include <core/class.h>
+
+#include <subdev/fb.h>
+#include <engine/dmaobj.h>
+
+struct nv50_dmaeng_priv {
+	struct nouveau_dmaeng base;
+};
+
+struct nv50_dmaobj_priv {
+	struct nouveau_dmaobj base;
+};
+
+static int
+nv50_dmaobj_bind(struct nouveau_dmaeng *dmaeng,
+		 struct nouveau_object *parent,
+		 struct nouveau_dmaobj *dmaobj,
+		 struct nouveau_gpuobj **pgpuobj)
+{
+	u32 flags = nv_mclass(dmaobj);
+	int ret;
+
+	switch (dmaobj->target) {
+	case NV_MEM_TARGET_VM:
+		flags |= 0x00000000;
+		flags |= 0x60000000; /* COMPRESSION_USEVM */
+		flags |= 0x1fc00000; /* STORAGE_TYPE_USEVM */
+		break;
+	case NV_MEM_TARGET_VRAM:
+		flags |= 0x00010000;
+		flags |= 0x00100000; /* ACCESSUS_USER_SYSTEM */
+		break;
+	case NV_MEM_TARGET_PCI:
+		flags |= 0x00020000;
+		flags |= 0x00100000; /* ACCESSUS_USER_SYSTEM */
+		break;
+	case NV_MEM_TARGET_PCI_NOSNOOP:
+		flags |= 0x00030000;
+		flags |= 0x00100000; /* ACCESSUS_USER_SYSTEM */
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	switch (dmaobj->access) {
+	case NV_MEM_ACCESS_VM:
+		break;
+	case NV_MEM_ACCESS_RO:
+		flags |= 0x00040000;
+		break;
+	case NV_MEM_ACCESS_WO:
+	case NV_MEM_ACCESS_RW:
+		flags |= 0x00080000;
+		break;
+	}
+
+	ret = nouveau_gpuobj_new(parent, parent, 24, 32, 0, pgpuobj);
+	if (ret == 0) {
+		nv_wo32(*pgpuobj, 0x00, flags);
+		nv_wo32(*pgpuobj, 0x04, lower_32_bits(dmaobj->limit));
+		nv_wo32(*pgpuobj, 0x08, lower_32_bits(dmaobj->start));
+		nv_wo32(*pgpuobj, 0x0c, upper_32_bits(dmaobj->limit) << 24 |
+					upper_32_bits(dmaobj->start));
+		nv_wo32(*pgpuobj, 0x10, 0x00000000);
+		nv_wo32(*pgpuobj, 0x14, 0x00000000);
+	}
+
+	return ret;
+}
+
+static int
+nv50_dmaobj_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+		 struct nouveau_oclass *oclass, void *data, u32 size,
+		 struct nouveau_object **pobject)
+{
+	struct nouveau_dmaeng *dmaeng = (void *)engine;
+	struct nv50_dmaobj_priv *dmaobj;
+	struct nouveau_gpuobj *gpuobj;
+	int ret;
+
+	ret = nouveau_dmaobj_create(parent, engine, oclass,
+				    data, size, &dmaobj);
+	*pobject = nv_object(dmaobj);
+	if (ret)
+		return ret;
+
+	switch (nv_mclass(parent)) {
+	case NV_DEVICE_CLASS:
+		break;
+	case NV50_CHANNEL_DMA_CLASS:
+	case NV84_CHANNEL_DMA_CLASS:
+	case NV50_CHANNEL_IND_CLASS:
+	case NV84_CHANNEL_IND_CLASS:
+		ret = dmaeng->bind(dmaeng, *pobject, &dmaobj->base, &gpuobj);
+		nouveau_object_ref(NULL, pobject);
+		*pobject = nv_object(gpuobj);
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	return ret;
+}
+
+static struct nouveau_ofuncs
+nv50_dmaobj_ofuncs = {
+	.ctor = nv50_dmaobj_ctor,
+	.dtor = _nouveau_dmaobj_dtor,
+	.init = _nouveau_dmaobj_init,
+	.fini = _nouveau_dmaobj_fini,
+};
+
+static struct nouveau_oclass
+nv50_dmaobj_sclass[] = {
+	{ 0x0002, &nv50_dmaobj_ofuncs },
+	{ 0x0003, &nv50_dmaobj_ofuncs },
+	{ 0x003d, &nv50_dmaobj_ofuncs },
+	{}
+};
+
+static int
+nv50_dmaeng_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+		 struct nouveau_oclass *oclass, void *data, u32 size,
+		 struct nouveau_object **pobject)
+{
+	struct nv50_dmaeng_priv *priv;
+	int ret;
+
+	ret = nouveau_dmaeng_create(parent, engine, oclass, &priv);
+	*pobject = nv_object(priv);
+	if (ret)
+		return ret;
+
+	priv->base.base.sclass = nv50_dmaobj_sclass;
+	priv->base.bind = nv50_dmaobj_bind;
+	return 0;
+}
+
+struct nouveau_oclass
+nv50_dmaeng_oclass = {
+	.handle = NV_ENGINE(DMAOBJ, 0x50),
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nv50_dmaeng_ctor,
+		.dtor = _nouveau_dmaeng_dtor,
+		.init = _nouveau_dmaeng_init,
+		.fini = _nouveau_dmaeng_fini,
+	},
+};
diff --git a/drivers/gpu/drm/nouveau/core/engine/dmaobj/nvc0.c b/drivers/gpu/drm/nouveau/core/engine/dmaobj/nvc0.c
new file mode 100644
index 000000000000..5baa08695535
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/dmaobj/nvc0.c
@@ -0,0 +1,99 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <core/gpuobj.h>
+
+#include <subdev/fb.h>
+#include <engine/dmaobj.h>
+
+struct nvc0_dmaeng_priv {
+	struct nouveau_dmaeng base;
+};
+
+struct nvc0_dmaobj_priv {
+	struct nouveau_dmaobj base;
+};
+
+static int
+nvc0_dmaobj_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+		 struct nouveau_oclass *oclass, void *data, u32 size,
+		 struct nouveau_object **pobject)
+{
+	struct nvc0_dmaobj_priv *dmaobj;
+	int ret;
+
+	ret = nouveau_dmaobj_create(parent, engine, oclass, data, size, &dmaobj);
+	*pobject = nv_object(dmaobj);
+	if (ret)
+		return ret;
+
+	if (dmaobj->base.target != NV_MEM_TARGET_VM || dmaobj->base.start)
+		return -EINVAL;
+
+	return 0;
+}
+
+static struct nouveau_ofuncs
+nvc0_dmaobj_ofuncs = {
+	.ctor = nvc0_dmaobj_ctor,
+	.dtor = _nouveau_dmaobj_dtor,
+	.init = _nouveau_dmaobj_init,
+	.fini = _nouveau_dmaobj_fini,
+};
+
+static struct nouveau_oclass
+nvc0_dmaobj_sclass[] = {
+	{ 0x0002, &nvc0_dmaobj_ofuncs },
+	{ 0x0003, &nvc0_dmaobj_ofuncs },
+	{ 0x003d, &nvc0_dmaobj_ofuncs },
+	{}
+};
+
+static int
+nvc0_dmaeng_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+		 struct nouveau_oclass *oclass, void *data, u32 size,
+		 struct nouveau_object **pobject)
+{
+	struct nvc0_dmaeng_priv *priv;
+	int ret;
+
+	ret = nouveau_dmaeng_create(parent, engine, oclass, &priv);
+	*pobject = nv_object(priv);
+	if (ret)
+		return ret;
+
+	priv->base.base.sclass = nvc0_dmaobj_sclass;
+	return 0;
+}
+
+struct nouveau_oclass
+nvc0_dmaeng_oclass = {
+	.handle = NV_ENGINE(DMAOBJ, 0xc0),
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nvc0_dmaeng_ctor,
+		.dtor = _nouveau_dmaeng_dtor,
+		.init = _nouveau_dmaeng_init,
+		.fini = _nouveau_dmaeng_fini,
+	},
+};
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/base.c b/drivers/gpu/drm/nouveau/core/engine/fifo/base.c
new file mode 100644
index 000000000000..bbb43c67c2ae
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/fifo/base.c
@@ -0,0 +1,181 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <core/object.h>
+#include <core/handle.h>
+
+#include <engine/dmaobj.h>
+#include <engine/fifo.h>
+
+int
+nouveau_fifo_channel_create_(struct nouveau_object *parent,
+			     struct nouveau_object *engine,
+			     struct nouveau_oclass *oclass,
+			     int bar, u32 addr, u32 size, u32 pushbuf,
+			     u32 engmask, int len, void **ptr)
+{
+	struct nouveau_device *device = nv_device(engine);
+	struct nouveau_fifo *priv = (void *)engine;
+	struct nouveau_fifo_chan *chan;
+	struct nouveau_dmaeng *dmaeng;
+	unsigned long flags;
+	int ret;
+
+	/* create base object class */
+	ret = nouveau_namedb_create_(parent, engine, oclass, 0, NULL,
+				     engmask, len, ptr);
+	chan = *ptr;
+	if (ret)
+		return ret;
+
+	/* validate dma object representing push buffer */
+	chan->pushdma = (void *)nouveau_handle_ref(parent, pushbuf);
+	if (!chan->pushdma)
+		return -ENOENT;
+
+	dmaeng = (void *)chan->pushdma->base.engine;
+	switch (chan->pushdma->base.oclass->handle) {
+	case 0x0002:
+	case 0x003d:
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	if (dmaeng->bind) {
+		ret = dmaeng->bind(dmaeng, parent, chan->pushdma, &chan->pushgpu);
+		if (ret)
+			return ret;
+	}
+
+	/* find a free fifo channel */
+	spin_lock_irqsave(&priv->lock, flags);
+	for (chan->chid = priv->min; chan->chid < priv->max; chan->chid++) {
+		if (!priv->channel[chan->chid]) {
+			priv->channel[chan->chid] = nv_object(chan);
+			break;
+		}
+	}
+	spin_unlock_irqrestore(&priv->lock, flags);
+
+	if (chan->chid == priv->max) {
+		nv_error(priv, "no free channels\n");
+		return -ENOSPC;
+	}
+
+	/* map fifo control registers */
+	chan->user = ioremap(pci_resource_start(device->pdev, bar) + addr +
+			     (chan->chid * size), size);
+	if (!chan->user)
+		return -EFAULT;
+
+	chan->size = size;
+	return 0;
+}
+
+void
+nouveau_fifo_channel_destroy(struct nouveau_fifo_chan *chan)
+{
+	struct nouveau_fifo *priv = (void *)nv_object(chan)->engine;
+	unsigned long flags;
+
+	iounmap(chan->user);
+
+	spin_lock_irqsave(&priv->lock, flags);
+	priv->channel[chan->chid] = NULL;
+	spin_unlock_irqrestore(&priv->lock, flags);
+
+	nouveau_gpuobj_ref(NULL, &chan->pushgpu);
+	nouveau_object_ref(NULL, (struct nouveau_object **)&chan->pushdma);
+	nouveau_namedb_destroy(&chan->base);
+}
+
+void
+_nouveau_fifo_channel_dtor(struct nouveau_object *object)
+{
+	struct nouveau_fifo_chan *chan = (void *)object;
+	nouveau_fifo_channel_destroy(chan);
+}
+
+u32
+_nouveau_fifo_channel_rd32(struct nouveau_object *object, u32 addr)
+{
+	struct nouveau_fifo_chan *chan = (void *)object;
+	return ioread32_native(chan->user + addr);
+}
+
+void
+_nouveau_fifo_channel_wr32(struct nouveau_object *object, u32 addr, u32 data)
+{
+	struct nouveau_fifo_chan *chan = (void *)object;
+	iowrite32_native(data, chan->user + addr);
+}
+
+static int
+nouveau_fifo_chid(struct nouveau_fifo *priv, struct nouveau_object *object)
+{
+	int engidx = nv_hclass(priv) & 0xff;
+
+	while (object && object->parent) {
+		if ( nv_iclass(object->parent, NV_ENGCTX_CLASS) &&
+		    (nv_hclass(object->parent) & 0xff) == engidx)
+			return nouveau_fifo_chan(object)->chid;
+		object = object->parent;
+	}
+
+	return -1;
+}
+
+void
+nouveau_fifo_destroy(struct nouveau_fifo *priv)
+{
+	kfree(priv->channel);
+	nouveau_engine_destroy(&priv->base);
+}
+
+int
+nouveau_fifo_create_(struct nouveau_object *parent,
+		     struct nouveau_object *engine,
+		     struct nouveau_oclass *oclass,
+		     int min, int max, int length, void **pobject)
+{
+	struct nouveau_fifo *priv;
+	int ret;
+
+	ret = nouveau_engine_create_(parent, engine, oclass, true, "PFIFO",
+				     "fifo", length, pobject);
+	priv = *pobject;
+	if (ret)
+		return ret;
+
+	priv->min = min;
+	priv->max = max;
+	priv->channel = kzalloc(sizeof(*priv->channel) * (max + 1), GFP_KERNEL);
+	if (!priv->channel)
+		return -ENOMEM;
+
+	priv->chid = nouveau_fifo_chid;
+	spin_lock_init(&priv->lock);
+	return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nv04.c b/drivers/gpu/drm/nouveau/core/engine/fifo/nv04.c
new file mode 100644
index 000000000000..ea76e3e8c9c2
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nv04.c
@@ -0,0 +1,630 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <core/os.h>
+#include <core/class.h>
+#include <core/engctx.h>
+#include <core/namedb.h>
+#include <core/handle.h>
+#include <core/ramht.h>
+
+#include <subdev/instmem.h>
+#include <subdev/instmem/nv04.h>
+#include <subdev/timer.h>
+#include <subdev/fb.h>
+
+#include <engine/fifo.h>
+
+#include "nv04.h"
+
+static struct ramfc_desc
+nv04_ramfc[] = {
+	{ 32,  0, 0x00,  0, NV04_PFIFO_CACHE1_DMA_PUT },
+	{ 32,  0, 0x04,  0, NV04_PFIFO_CACHE1_DMA_GET },
+	{ 16,  0, 0x08,  0, NV04_PFIFO_CACHE1_DMA_INSTANCE },
+	{ 16, 16, 0x08,  0, NV04_PFIFO_CACHE1_DMA_DCOUNT },
+	{ 32,  0, 0x0c,  0, NV04_PFIFO_CACHE1_DMA_STATE },
+	{ 32,  0, 0x10,  0, NV04_PFIFO_CACHE1_DMA_FETCH },
+	{ 32,  0, 0x14,  0, NV04_PFIFO_CACHE1_ENGINE },
+	{ 32,  0, 0x18,  0, NV04_PFIFO_CACHE1_PULL1 },
+	{}
+};
+
+/*******************************************************************************
+ * FIFO channel objects
+ ******************************************************************************/
+
+int
+nv04_fifo_object_attach(struct nouveau_object *parent,
+			struct nouveau_object *object, u32 handle)
+{
+	struct nv04_fifo_priv *priv = (void *)parent->engine;
+	struct nv04_fifo_chan *chan = (void *)parent;
+	u32 context, chid = chan->base.chid;
+	int ret;
+
+	if (nv_iclass(object, NV_GPUOBJ_CLASS))
+		context = nv_gpuobj(object)->addr >> 4;
+	else
+		context = 0x00000004; /* just non-zero */
+
+	switch (nv_engidx(object->engine)) {
+	case NVDEV_ENGINE_DMAOBJ:
+	case NVDEV_ENGINE_SW:
+		context |= 0x00000000;
+		break;
+	case NVDEV_ENGINE_GR:
+		context |= 0x00010000;
+		break;
+	case NVDEV_ENGINE_MPEG:
+		context |= 0x00020000;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	context |= 0x80000000; /* valid */
+	context |= chid << 24;
+
+	mutex_lock(&nv_subdev(priv)->mutex);
+	ret = nouveau_ramht_insert(priv->ramht, chid, handle, context);
+	mutex_unlock(&nv_subdev(priv)->mutex);
+	return ret;
+}
+
+void
+nv04_fifo_object_detach(struct nouveau_object *parent, int cookie)
+{
+	struct nv04_fifo_priv *priv = (void *)parent->engine;
+	mutex_lock(&nv_subdev(priv)->mutex);
+	nouveau_ramht_remove(priv->ramht, cookie);
+	mutex_unlock(&nv_subdev(priv)->mutex);
+}
+
+int
+nv04_fifo_context_attach(struct nouveau_object *parent,
+			 struct nouveau_object *object)
+{
+	nv_engctx(object)->addr = nouveau_fifo_chan(parent)->chid;
+	return 0;
+}
+
+static int
+nv04_fifo_chan_ctor(struct nouveau_object *parent,
+		    struct nouveau_object *engine,
+		    struct nouveau_oclass *oclass, void *data, u32 size,
+		    struct nouveau_object **pobject)
+{
+	struct nv04_fifo_priv *priv = (void *)engine;
+	struct nv04_fifo_chan *chan;
+	struct nv03_channel_dma_class *args = data;
+	int ret;
+
+	if (size < sizeof(*args))
+		return -EINVAL;
+
+	ret = nouveau_fifo_channel_create(parent, engine, oclass, 0, 0x800000,
+					  0x10000, args->pushbuf,
+					  (1 << NVDEV_ENGINE_DMAOBJ) |
+					  (1 << NVDEV_ENGINE_SW) |
+					  (1 << NVDEV_ENGINE_GR), &chan);
+	*pobject = nv_object(chan);
+	if (ret)
+		return ret;
+
+	nv_parent(chan)->object_attach = nv04_fifo_object_attach;
+	nv_parent(chan)->object_detach = nv04_fifo_object_detach;
+	nv_parent(chan)->context_attach = nv04_fifo_context_attach;
+	chan->ramfc = chan->base.chid * 32;
+
+	nv_wo32(priv->ramfc, chan->ramfc + 0x00, args->offset);
+	nv_wo32(priv->ramfc, chan->ramfc + 0x04, args->offset);
+	nv_wo32(priv->ramfc, chan->ramfc + 0x08, chan->base.pushgpu->addr >> 4);
+	nv_wo32(priv->ramfc, chan->ramfc + 0x10,
+			     NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES |
+			     NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES |
+#ifdef __BIG_ENDIAN
+			     NV_PFIFO_CACHE1_BIG_ENDIAN |
+#endif
+			     NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8);
+	return 0;
+}
+
+void
+nv04_fifo_chan_dtor(struct nouveau_object *object)
+{
+	struct nv04_fifo_priv *priv = (void *)object->engine;
+	struct nv04_fifo_chan *chan = (void *)object;
+	struct ramfc_desc *c = priv->ramfc_desc;
+
+	do {
+		nv_wo32(priv->ramfc, chan->ramfc + c->ctxp, 0x00000000);
+	} while ((++c)->bits);
+
+	nouveau_fifo_channel_destroy(&chan->base);
+}
+
+int
+nv04_fifo_chan_init(struct nouveau_object *object)
+{
+	struct nv04_fifo_priv *priv = (void *)object->engine;
+	struct nv04_fifo_chan *chan = (void *)object;
+	u32 mask = 1 << chan->base.chid;
+	unsigned long flags;
+	int ret;
+
+	ret = nouveau_fifo_channel_init(&chan->base);
+	if (ret)
+		return ret;
+
+	spin_lock_irqsave(&priv->base.lock, flags);
+	nv_mask(priv, NV04_PFIFO_MODE, mask, mask);
+	spin_unlock_irqrestore(&priv->base.lock, flags);
+	return 0;
+}
+
+int
+nv04_fifo_chan_fini(struct nouveau_object *object, bool suspend)
+{
+	struct nv04_fifo_priv *priv = (void *)object->engine;
+	struct nv04_fifo_chan *chan = (void *)object;
+	struct nouveau_gpuobj *fctx = priv->ramfc;
+	struct ramfc_desc *c;
+	unsigned long flags;
+	u32 data = chan->ramfc;
+	u32 chid;
+
+	/* prevent fifo context switches */
+	spin_lock_irqsave(&priv->base.lock, flags);
+	nv_wr32(priv, NV03_PFIFO_CACHES, 0);
+
+	/* if this channel is active, replace it with a null context */
+	chid = nv_rd32(priv, NV03_PFIFO_CACHE1_PUSH1) & priv->base.max;
+	if (chid == chan->base.chid) {
+		nv_mask(priv, NV04_PFIFO_CACHE1_DMA_PUSH, 0x00000001, 0);
+		nv_wr32(priv, NV03_PFIFO_CACHE1_PUSH0, 0);
+		nv_mask(priv, NV04_PFIFO_CACHE1_PULL0, 0x00000001, 0);
+
+		c = priv->ramfc_desc;
+		do {
+			u32 rm = ((1ULL << c->bits) - 1) << c->regs;
+			u32 cm = ((1ULL << c->bits) - 1) << c->ctxs;
+			u32 rv = (nv_rd32(priv, c->regp) &  rm) >> c->regs;
+			u32 cv = (nv_ro32(fctx, c->ctxp + data) & ~cm);
+			nv_wo32(fctx, c->ctxp + data, cv | (rv << c->ctxs));
+		} while ((++c)->bits);
+
+		c = priv->ramfc_desc;
+		do {
+			nv_wr32(priv, c->regp, 0x00000000);
+		} while ((++c)->bits);
+
+		nv_wr32(priv, NV03_PFIFO_CACHE1_GET, 0);
+		nv_wr32(priv, NV03_PFIFO_CACHE1_PUT, 0);
+		nv_wr32(priv, NV03_PFIFO_CACHE1_PUSH1, priv->base.max);
+		nv_wr32(priv, NV03_PFIFO_CACHE1_PUSH0, 1);
+		nv_wr32(priv, NV04_PFIFO_CACHE1_PULL0, 1);
+	}
+
+	/* restore normal operation, after disabling dma mode */
+	nv_mask(priv, NV04_PFIFO_MODE, 1 << chan->base.chid, 0);
+	nv_wr32(priv, NV03_PFIFO_CACHES, 1);
+	spin_unlock_irqrestore(&priv->base.lock, flags);
+
+	return nouveau_fifo_channel_fini(&chan->base, suspend);
+}
+
+static struct nouveau_ofuncs
+nv04_fifo_ofuncs = {
+	.ctor = nv04_fifo_chan_ctor,
+	.dtor = nv04_fifo_chan_dtor,
+	.init = nv04_fifo_chan_init,
+	.fini = nv04_fifo_chan_fini,
+	.rd32 = _nouveau_fifo_channel_rd32,
+	.wr32 = _nouveau_fifo_channel_wr32,
+};
+
+static struct nouveau_oclass
+nv04_fifo_sclass[] = {
+	{ NV03_CHANNEL_DMA_CLASS, &nv04_fifo_ofuncs },
+	{}
+};
+
+/*******************************************************************************
+ * FIFO context - basically just the instmem reserved for the channel
+ ******************************************************************************/
+
+int
+nv04_fifo_context_ctor(struct nouveau_object *parent,
+		       struct nouveau_object *engine,
+		       struct nouveau_oclass *oclass, void *data, u32 size,
+		       struct nouveau_object **pobject)
+{
+	struct nv04_fifo_base *base;
+	int ret;
+
+	ret = nouveau_fifo_context_create(parent, engine, oclass, NULL, 0x1000,
+				          0x1000, NVOBJ_FLAG_HEAP, &base);
+	*pobject = nv_object(base);
+	if (ret)
+		return ret;
+
+	return 0;
+}
+
+static struct nouveau_oclass
+nv04_fifo_cclass = {
+	.handle = NV_ENGCTX(FIFO, 0x04),
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nv04_fifo_context_ctor,
+		.dtor = _nouveau_fifo_context_dtor,
+		.init = _nouveau_fifo_context_init,
+		.fini = _nouveau_fifo_context_fini,
+		.rd32 = _nouveau_fifo_context_rd32,
+		.wr32 = _nouveau_fifo_context_wr32,
+	},
+};
+
+/*******************************************************************************
+ * PFIFO engine
+ ******************************************************************************/
+
+void
+nv04_fifo_pause(struct nouveau_fifo *pfifo, unsigned long *pflags)
+__acquires(priv->base.lock)
+{
+	struct nv04_fifo_priv *priv = (void *)pfifo;
+	unsigned long flags;
+
+	spin_lock_irqsave(&priv->base.lock, flags);
+	*pflags = flags;
+
+	nv_wr32(priv, NV03_PFIFO_CACHES, 0x00000000);
+	nv_mask(priv, NV04_PFIFO_CACHE1_PULL0, 0x00000001, 0x00000000);
+
+	/* in some cases the puller may be left in an inconsistent state
+	 * if you try to stop it while it's busy translating handles.
+	 * sometimes you get a CACHE_ERROR, sometimes it just fails
+	 * silently; sending incorrect instance offsets to PGRAPH after
+	 * it's started up again.
+	 *
+	 * to avoid this, we invalidate the most recently calculated
+	 * instance.
+	 */
+	if (!nv_wait(priv, NV04_PFIFO_CACHE1_PULL0,
+			   NV04_PFIFO_CACHE1_PULL0_HASH_BUSY, 0x00000000))
+		nv_warn(priv, "timeout idling puller\n");
+
+	if (nv_rd32(priv, NV04_PFIFO_CACHE1_PULL0) &
+			  NV04_PFIFO_CACHE1_PULL0_HASH_FAILED)
+		nv_wr32(priv, NV03_PFIFO_INTR_0, NV_PFIFO_INTR_CACHE_ERROR);
+
+	nv_wr32(priv, NV04_PFIFO_CACHE1_HASH, 0x00000000);
+}
+
+void
+nv04_fifo_start(struct nouveau_fifo *pfifo, unsigned long *pflags)
+__releases(priv->base.lock)
+{
+	struct nv04_fifo_priv *priv = (void *)pfifo;
+	unsigned long flags = *pflags;
+
+	nv_mask(priv, NV04_PFIFO_CACHE1_PULL0, 0x00000001, 0x00000001);
+	nv_wr32(priv, NV03_PFIFO_CACHES, 0x00000001);
+
+	spin_unlock_irqrestore(&priv->base.lock, flags);
+}
+
+static const char *
+nv_dma_state_err(u32 state)
+{
+	static const char * const desc[] = {
+		"NONE", "CALL_SUBR_ACTIVE", "INVALID_MTHD", "RET_SUBR_INACTIVE",
+		"INVALID_CMD", "IB_EMPTY"/* NV50+ */, "MEM_FAULT", "UNK"
+	};
+	return desc[(state >> 29) & 0x7];
+}
+
+static bool
+nv04_fifo_swmthd(struct nv04_fifo_priv *priv, u32 chid, u32 addr, u32 data)
+{
+	struct nv04_fifo_chan *chan = NULL;
+	struct nouveau_handle *bind;
+	const int subc = (addr >> 13) & 0x7;
+	const int mthd = addr & 0x1ffc;
+	bool handled = false;
+	unsigned long flags;
+	u32 engine;
+
+	spin_lock_irqsave(&priv->base.lock, flags);
+	if (likely(chid >= priv->base.min && chid <= priv->base.max))
+		chan = (void *)priv->base.channel[chid];
+	if (unlikely(!chan))
+		goto out;
+
+	switch (mthd) {
+	case 0x0000:
+		bind = nouveau_namedb_get(nv_namedb(chan), data);
+		if (unlikely(!bind))
+			break;
+
+		if (nv_engidx(bind->object->engine) == NVDEV_ENGINE_SW) {
+			engine = 0x0000000f << (subc * 4);
+			chan->subc[subc] = data;
+			handled = true;
+
+			nv_mask(priv, NV04_PFIFO_CACHE1_ENGINE, engine, 0);
+		}
+
+		nouveau_namedb_put(bind);
+		break;
+	default:
+		engine = nv_rd32(priv, NV04_PFIFO_CACHE1_ENGINE);
+		if (unlikely(((engine >> (subc * 4)) & 0xf) != 0))
+			break;
+
+		bind = nouveau_namedb_get(nv_namedb(chan), chan->subc[subc]);
+		if (likely(bind)) {
+			if (!nv_call(bind->object, mthd, data))
+				handled = true;
+			nouveau_namedb_put(bind);
+		}
+		break;
+	}
+
+out:
+	spin_unlock_irqrestore(&priv->base.lock, flags);
+	return handled;
+}
+
+void
+nv04_fifo_intr(struct nouveau_subdev *subdev)
+{
+	struct nouveau_device *device = nv_device(subdev);
+	struct nv04_fifo_priv *priv = (void *)subdev;
+	uint32_t status, reassign;
+	int cnt = 0;
+
+	reassign = nv_rd32(priv, NV03_PFIFO_CACHES) & 1;
+	while ((status = nv_rd32(priv, NV03_PFIFO_INTR_0)) && (cnt++ < 100)) {
+		uint32_t chid, get;
+
+		nv_wr32(priv, NV03_PFIFO_CACHES, 0);
+
+		chid = nv_rd32(priv, NV03_PFIFO_CACHE1_PUSH1) & priv->base.max;
+		get  = nv_rd32(priv, NV03_PFIFO_CACHE1_GET);
+
+		if (status & NV_PFIFO_INTR_CACHE_ERROR) {
+			uint32_t mthd, data;
+			int ptr;
+
+			/* NV_PFIFO_CACHE1_GET actually goes to 0xffc before
+			 * wrapping on my G80 chips, but CACHE1 isn't big
+			 * enough for this much data.. Tests show that it
+			 * wraps around to the start at GET=0x800.. No clue
+			 * as to why..
+			 */
+			ptr = (get & 0x7ff) >> 2;
+
+			if (device->card_type < NV_40) {
+				mthd = nv_rd32(priv,
+					NV04_PFIFO_CACHE1_METHOD(ptr));
+				data = nv_rd32(priv,
+					NV04_PFIFO_CACHE1_DATA(ptr));
+			} else {
+				mthd = nv_rd32(priv,
+					NV40_PFIFO_CACHE1_METHOD(ptr));
+				data = nv_rd32(priv,
+					NV40_PFIFO_CACHE1_DATA(ptr));
+			}
+
+			if (!nv04_fifo_swmthd(priv, chid, mthd, data)) {
+				nv_info(priv, "CACHE_ERROR - Ch %d/%d "
+					      "Mthd 0x%04x Data 0x%08x\n",
+					chid, (mthd >> 13) & 7, mthd & 0x1ffc,
+					data);
+			}
+
+			nv_wr32(priv, NV04_PFIFO_CACHE1_DMA_PUSH, 0);
+			nv_wr32(priv, NV03_PFIFO_INTR_0,
+						NV_PFIFO_INTR_CACHE_ERROR);
+
+			nv_wr32(priv, NV03_PFIFO_CACHE1_PUSH0,
+				nv_rd32(priv, NV03_PFIFO_CACHE1_PUSH0) & ~1);
+			nv_wr32(priv, NV03_PFIFO_CACHE1_GET, get + 4);
+			nv_wr32(priv, NV03_PFIFO_CACHE1_PUSH0,
+				nv_rd32(priv, NV03_PFIFO_CACHE1_PUSH0) | 1);
+			nv_wr32(priv, NV04_PFIFO_CACHE1_HASH, 0);
+
+			nv_wr32(priv, NV04_PFIFO_CACHE1_DMA_PUSH,
+				nv_rd32(priv, NV04_PFIFO_CACHE1_DMA_PUSH) | 1);
+			nv_wr32(priv, NV04_PFIFO_CACHE1_PULL0, 1);
+
+			status &= ~NV_PFIFO_INTR_CACHE_ERROR;
+		}
+
+		if (status & NV_PFIFO_INTR_DMA_PUSHER) {
+			u32 dma_get = nv_rd32(priv, 0x003244);
+			u32 dma_put = nv_rd32(priv, 0x003240);
+			u32 push = nv_rd32(priv, 0x003220);
+			u32 state = nv_rd32(priv, 0x003228);
+
+			if (device->card_type == NV_50) {
+				u32 ho_get = nv_rd32(priv, 0x003328);
+				u32 ho_put = nv_rd32(priv, 0x003320);
+				u32 ib_get = nv_rd32(priv, 0x003334);
+				u32 ib_put = nv_rd32(priv, 0x003330);
+
+				nv_info(priv, "DMA_PUSHER - Ch %d Get 0x%02x%08x "
+				     "Put 0x%02x%08x IbGet 0x%08x IbPut 0x%08x "
+				     "State 0x%08x (err: %s) Push 0x%08x\n",
+					chid, ho_get, dma_get, ho_put,
+					dma_put, ib_get, ib_put, state,
+					nv_dma_state_err(state),
+					push);
+
+				/* METHOD_COUNT, in DMA_STATE on earlier chipsets */
+				nv_wr32(priv, 0x003364, 0x00000000);
+				if (dma_get != dma_put || ho_get != ho_put) {
+					nv_wr32(priv, 0x003244, dma_put);
+					nv_wr32(priv, 0x003328, ho_put);
+				} else
+				if (ib_get != ib_put) {
+					nv_wr32(priv, 0x003334, ib_put);
+				}
+			} else {
+				nv_info(priv, "DMA_PUSHER - Ch %d Get 0x%08x "
+					     "Put 0x%08x State 0x%08x (err: %s) Push 0x%08x\n",
+					chid, dma_get, dma_put, state,
+					nv_dma_state_err(state), push);
+
+				if (dma_get != dma_put)
+					nv_wr32(priv, 0x003244, dma_put);
+			}
+
+			nv_wr32(priv, 0x003228, 0x00000000);
+			nv_wr32(priv, 0x003220, 0x00000001);
+			nv_wr32(priv, 0x002100, NV_PFIFO_INTR_DMA_PUSHER);
+			status &= ~NV_PFIFO_INTR_DMA_PUSHER;
+		}
+
+		if (status & NV_PFIFO_INTR_SEMAPHORE) {
+			uint32_t sem;
+
+			status &= ~NV_PFIFO_INTR_SEMAPHORE;
+			nv_wr32(priv, NV03_PFIFO_INTR_0,
+				NV_PFIFO_INTR_SEMAPHORE);
+
+			sem = nv_rd32(priv, NV10_PFIFO_CACHE1_SEMAPHORE);
+			nv_wr32(priv, NV10_PFIFO_CACHE1_SEMAPHORE, sem | 0x1);
+
+			nv_wr32(priv, NV03_PFIFO_CACHE1_GET, get + 4);
+			nv_wr32(priv, NV04_PFIFO_CACHE1_PULL0, 1);
+		}
+
+		if (device->card_type == NV_50) {
+			if (status & 0x00000010) {
+				nv50_fb_trap(nouveau_fb(priv), 1);
+				status &= ~0x00000010;
+				nv_wr32(priv, 0x002100, 0x00000010);
+			}
+		}
+
+		if (status) {
+			nv_info(priv, "unknown intr 0x%08x, ch %d\n",
+				status, chid);
+			nv_wr32(priv, NV03_PFIFO_INTR_0, status);
+			status = 0;
+		}
+
+		nv_wr32(priv, NV03_PFIFO_CACHES, reassign);
+	}
+
+	if (status) {
+		nv_info(priv, "still angry after %d spins, halt\n", cnt);
+		nv_wr32(priv, 0x002140, 0);
+		nv_wr32(priv, 0x000140, 0);
+	}
+
+	nv_wr32(priv, 0x000100, 0x00000100);
+}
+
+static int
+nv04_fifo_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+	       struct nouveau_oclass *oclass, void *data, u32 size,
+	       struct nouveau_object **pobject)
+{
+	struct nv04_instmem_priv *imem = nv04_instmem(parent);
+	struct nv04_fifo_priv *priv;
+	int ret;
+
+	ret = nouveau_fifo_create(parent, engine, oclass, 0, 15, &priv);
+	*pobject = nv_object(priv);
+	if (ret)
+		return ret;
+
+	nouveau_ramht_ref(imem->ramht, &priv->ramht);
+	nouveau_gpuobj_ref(imem->ramro, &priv->ramro);
+	nouveau_gpuobj_ref(imem->ramfc, &priv->ramfc);
+
+	nv_subdev(priv)->unit = 0x00000100;
+	nv_subdev(priv)->intr = nv04_fifo_intr;
+	nv_engine(priv)->cclass = &nv04_fifo_cclass;
+	nv_engine(priv)->sclass = nv04_fifo_sclass;
+	priv->base.pause = nv04_fifo_pause;
+	priv->base.start = nv04_fifo_start;
+	priv->ramfc_desc = nv04_ramfc;
+	return 0;
+}
+
+void
+nv04_fifo_dtor(struct nouveau_object *object)
+{
+	struct nv04_fifo_priv *priv = (void *)object;
+	nouveau_gpuobj_ref(NULL, &priv->ramfc);
+	nouveau_gpuobj_ref(NULL, &priv->ramro);
+	nouveau_ramht_ref(NULL, &priv->ramht);
+	nouveau_fifo_destroy(&priv->base);
+}
+
+int
+nv04_fifo_init(struct nouveau_object *object)
+{
+	struct nv04_fifo_priv *priv = (void *)object;
+	int ret;
+
+	ret = nouveau_fifo_init(&priv->base);
+	if (ret)
+		return ret;
+
+	nv_wr32(priv, NV04_PFIFO_DELAY_0, 0x000000ff);
+	nv_wr32(priv, NV04_PFIFO_DMA_TIMESLICE, 0x0101ffff);
+
+	nv_wr32(priv, NV03_PFIFO_RAMHT, (0x03 << 24) /* search 128 */ |
+				       ((priv->ramht->bits - 9) << 16) |
+				        (priv->ramht->base.addr >> 8));
+	nv_wr32(priv, NV03_PFIFO_RAMRO, priv->ramro->addr >> 8);
+	nv_wr32(priv, NV03_PFIFO_RAMFC, priv->ramfc->addr >> 8);
+
+	nv_wr32(priv, NV03_PFIFO_CACHE1_PUSH1, priv->base.max);
+
+	nv_wr32(priv, NV03_PFIFO_INTR_0, 0xffffffff);
+	nv_wr32(priv, NV03_PFIFO_INTR_EN_0, 0xffffffff);
+
+	nv_wr32(priv, NV03_PFIFO_CACHE1_PUSH0, 1);
+	nv_wr32(priv, NV04_PFIFO_CACHE1_PULL0, 1);
+	nv_wr32(priv, NV03_PFIFO_CACHES, 1);
+	return 0;
+}
+
+struct nouveau_oclass
+nv04_fifo_oclass = {
+	.handle = NV_ENGINE(FIFO, 0x04),
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nv04_fifo_ctor,
+		.dtor = nv04_fifo_dtor,
+		.init = nv04_fifo_init,
+		.fini = _nouveau_fifo_fini,
+	},
+};
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nv04.h b/drivers/gpu/drm/nouveau/core/engine/fifo/nv04.h
new file mode 100644
index 000000000000..496a4b4fdfaf
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nv04.h
@@ -0,0 +1,178 @@
+#ifndef __NV04_FIFO_H__
+#define __NV04_FIFO_H__
+
+#include <engine/fifo.h>
+
+#define NV04_PFIFO_DELAY_0                                 0x00002040
+#define NV04_PFIFO_DMA_TIMESLICE                           0x00002044
+#define NV04_PFIFO_NEXT_CHANNEL                            0x00002050
+#define NV03_PFIFO_INTR_0                                  0x00002100
+#define NV03_PFIFO_INTR_EN_0                               0x00002140
+#    define NV_PFIFO_INTR_CACHE_ERROR                          (1<<0)
+#    define NV_PFIFO_INTR_RUNOUT                               (1<<4)
+#    define NV_PFIFO_INTR_RUNOUT_OVERFLOW                      (1<<8)
+#    define NV_PFIFO_INTR_DMA_PUSHER                          (1<<12)
+#    define NV_PFIFO_INTR_DMA_PT                              (1<<16)
+#    define NV_PFIFO_INTR_SEMAPHORE                           (1<<20)
+#    define NV_PFIFO_INTR_ACQUIRE_TIMEOUT                     (1<<24)
+#define NV03_PFIFO_RAMHT                                   0x00002210
+#define NV03_PFIFO_RAMFC                                   0x00002214
+#define NV03_PFIFO_RAMRO                                   0x00002218
+#define NV40_PFIFO_RAMFC                                   0x00002220
+#define NV03_PFIFO_CACHES                                  0x00002500
+#define NV04_PFIFO_MODE                                    0x00002504
+#define NV04_PFIFO_DMA                                     0x00002508
+#define NV04_PFIFO_SIZE                                    0x0000250c
+#define NV50_PFIFO_CTX_TABLE(c)                        (0x2600+(c)*4)
+#define NV50_PFIFO_CTX_TABLE__SIZE                                128
+#define NV50_PFIFO_CTX_TABLE_CHANNEL_ENABLED                  (1<<31)
+#define NV50_PFIFO_CTX_TABLE_UNK30_BAD                        (1<<30)
+#define NV50_PFIFO_CTX_TABLE_INSTANCE_MASK_G80             0x0FFFFFFF
+#define NV50_PFIFO_CTX_TABLE_INSTANCE_MASK_G84             0x00FFFFFF
+#define NV03_PFIFO_CACHE0_PUSH0                            0x00003000
+#define NV03_PFIFO_CACHE0_PULL0                            0x00003040
+#define NV04_PFIFO_CACHE0_PULL0                            0x00003050
+#define NV04_PFIFO_CACHE0_PULL1                            0x00003054
+#define NV03_PFIFO_CACHE1_PUSH0                            0x00003200
+#define NV03_PFIFO_CACHE1_PUSH1                            0x00003204
+#define NV03_PFIFO_CACHE1_PUSH1_DMA                            (1<<8)
+#define NV40_PFIFO_CACHE1_PUSH1_DMA                           (1<<16)
+#define NV03_PFIFO_CACHE1_PUSH1_CHID_MASK                  0x0000000f
+#define NV10_PFIFO_CACHE1_PUSH1_CHID_MASK                  0x0000001f
+#define NV50_PFIFO_CACHE1_PUSH1_CHID_MASK                  0x0000007f
+#define NV03_PFIFO_CACHE1_PUT                              0x00003210
+#define NV04_PFIFO_CACHE1_DMA_PUSH                         0x00003220
+#define NV04_PFIFO_CACHE1_DMA_FETCH                        0x00003224
+#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_8_BYTES         0x00000000
+#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_16_BYTES        0x00000008
+#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_24_BYTES        0x00000010
+#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_32_BYTES        0x00000018
+#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_40_BYTES        0x00000020
+#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_48_BYTES        0x00000028
+#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_56_BYTES        0x00000030
+#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_64_BYTES        0x00000038
+#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_72_BYTES        0x00000040
+#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_80_BYTES        0x00000048
+#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_88_BYTES        0x00000050
+#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_96_BYTES        0x00000058
+#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_104_BYTES       0x00000060
+#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_112_BYTES       0x00000068
+#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_120_BYTES       0x00000070
+#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES       0x00000078
+#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_136_BYTES       0x00000080
+#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_144_BYTES       0x00000088
+#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_152_BYTES       0x00000090
+#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_160_BYTES       0x00000098
+#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_168_BYTES       0x000000A0
+#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_176_BYTES       0x000000A8
+#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_184_BYTES       0x000000B0
+#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_192_BYTES       0x000000B8
+#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_200_BYTES       0x000000C0
+#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_208_BYTES       0x000000C8
+#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_216_BYTES       0x000000D0
+#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_224_BYTES       0x000000D8
+#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_232_BYTES       0x000000E0
+#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_240_BYTES       0x000000E8
+#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_248_BYTES       0x000000F0
+#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_256_BYTES       0x000000F8
+#    define NV_PFIFO_CACHE1_DMA_FETCH_SIZE                 0x0000E000
+#    define NV_PFIFO_CACHE1_DMA_FETCH_SIZE_32_BYTES        0x00000000
+#    define NV_PFIFO_CACHE1_DMA_FETCH_SIZE_64_BYTES        0x00002000
+#    define NV_PFIFO_CACHE1_DMA_FETCH_SIZE_96_BYTES        0x00004000
+#    define NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES       0x00006000
+#    define NV_PFIFO_CACHE1_DMA_FETCH_SIZE_160_BYTES       0x00008000
+#    define NV_PFIFO_CACHE1_DMA_FETCH_SIZE_192_BYTES       0x0000A000
+#    define NV_PFIFO_CACHE1_DMA_FETCH_SIZE_224_BYTES       0x0000C000
+#    define NV_PFIFO_CACHE1_DMA_FETCH_SIZE_256_BYTES       0x0000E000
+#    define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS             0x001F0000
+#    define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_0           0x00000000
+#    define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_1           0x00010000
+#    define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_2           0x00020000
+#    define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_3           0x00030000
+#    define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_4           0x00040000
+#    define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_5           0x00050000
+#    define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_6           0x00060000
+#    define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_7           0x00070000
+#    define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8           0x00080000
+#    define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_9           0x00090000
+#    define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_10          0x000A0000
+#    define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_11          0x000B0000
+#    define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_12          0x000C0000
+#    define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_13          0x000D0000
+#    define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_14          0x000E0000
+#    define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_15          0x000F0000
+#    define NV_PFIFO_CACHE1_ENDIAN                         0x80000000
+#    define NV_PFIFO_CACHE1_LITTLE_ENDIAN                  0x7FFFFFFF
+#    define NV_PFIFO_CACHE1_BIG_ENDIAN                     0x80000000
+#define NV04_PFIFO_CACHE1_DMA_STATE                        0x00003228
+#define NV04_PFIFO_CACHE1_DMA_INSTANCE                     0x0000322c
+#define NV04_PFIFO_CACHE1_DMA_CTL                          0x00003230
+#define NV04_PFIFO_CACHE1_DMA_PUT                          0x00003240
+#define NV04_PFIFO_CACHE1_DMA_GET                          0x00003244
+#define NV10_PFIFO_CACHE1_REF_CNT                          0x00003248
+#define NV10_PFIFO_CACHE1_DMA_SUBROUTINE                   0x0000324C
+#define NV03_PFIFO_CACHE1_PULL0                            0x00003240
+#define NV04_PFIFO_CACHE1_PULL0                            0x00003250
+#    define NV04_PFIFO_CACHE1_PULL0_HASH_FAILED            0x00000010
+#    define NV04_PFIFO_CACHE1_PULL0_HASH_BUSY              0x00001000
+#define NV03_PFIFO_CACHE1_PULL1                            0x00003250
+#define NV04_PFIFO_CACHE1_PULL1                            0x00003254
+#define NV04_PFIFO_CACHE1_HASH                             0x00003258
+#define NV10_PFIFO_CACHE1_ACQUIRE_TIMEOUT                  0x00003260
+#define NV10_PFIFO_CACHE1_ACQUIRE_TIMESTAMP                0x00003264
+#define NV10_PFIFO_CACHE1_ACQUIRE_VALUE                    0x00003268
+#define NV10_PFIFO_CACHE1_SEMAPHORE                        0x0000326C
+#define NV03_PFIFO_CACHE1_GET                              0x00003270
+#define NV04_PFIFO_CACHE1_ENGINE                           0x00003280
+#define NV04_PFIFO_CACHE1_DMA_DCOUNT                       0x000032A0
+#define NV40_PFIFO_GRCTX_INSTANCE                          0x000032E0
+#define NV40_PFIFO_UNK32E4                                 0x000032E4
+#define NV04_PFIFO_CACHE1_METHOD(i)                (0x00003800+(i*8))
+#define NV04_PFIFO_CACHE1_DATA(i)                  (0x00003804+(i*8))
+#define NV40_PFIFO_CACHE1_METHOD(i)                (0x00090000+(i*8))
+#define NV40_PFIFO_CACHE1_DATA(i)                  (0x00090004+(i*8))
+
+struct ramfc_desc {
+	unsigned bits:6;
+	unsigned ctxs:5;
+	unsigned ctxp:8;
+	unsigned regs:5;
+	unsigned regp;
+};
+
+struct nv04_fifo_priv {
+	struct nouveau_fifo base;
+	struct ramfc_desc *ramfc_desc;
+	struct nouveau_ramht  *ramht;
+	struct nouveau_gpuobj *ramro;
+	struct nouveau_gpuobj *ramfc;
+};
+
+struct nv04_fifo_base {
+	struct nouveau_fifo_base base;
+};
+
+struct nv04_fifo_chan {
+	struct nouveau_fifo_chan base;
+	u32 subc[8];
+	u32 ramfc;
+};
+
+int  nv04_fifo_object_attach(struct nouveau_object *,
+			     struct nouveau_object *, u32);
+void nv04_fifo_object_detach(struct nouveau_object *, int);
+
+void nv04_fifo_chan_dtor(struct nouveau_object *);
+int  nv04_fifo_chan_init(struct nouveau_object *);
+int  nv04_fifo_chan_fini(struct nouveau_object *, bool suspend);
+
+int  nv04_fifo_context_ctor(struct nouveau_object *, struct nouveau_object *,
+			    struct nouveau_oclass *, void *, u32,
+			    struct nouveau_object **);
+
+void nv04_fifo_dtor(struct nouveau_object *);
+int  nv04_fifo_init(struct nouveau_object *);
+void nv04_fifo_pause(struct nouveau_fifo *, unsigned long *);
+void nv04_fifo_start(struct nouveau_fifo *, unsigned long *);
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nv10.c b/drivers/gpu/drm/nouveau/core/engine/fifo/nv10.c
new file mode 100644
index 000000000000..4ba75422b89d
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nv10.c
@@ -0,0 +1,171 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <core/os.h>
+#include <core/class.h>
+#include <core/engctx.h>
+#include <core/ramht.h>
+
+#include <subdev/instmem.h>
+#include <subdev/instmem/nv04.h>
+#include <subdev/fb.h>
+
+#include <engine/fifo.h>
+
+#include "nv04.h"
+
+static struct ramfc_desc
+nv10_ramfc[] = {
+	{ 32,  0, 0x00,  0, NV04_PFIFO_CACHE1_DMA_PUT },
+	{ 32,  0, 0x04,  0, NV04_PFIFO_CACHE1_DMA_GET },
+	{ 32,  0, 0x08,  0, NV10_PFIFO_CACHE1_REF_CNT },
+	{ 16,  0, 0x0c,  0, NV04_PFIFO_CACHE1_DMA_INSTANCE },
+	{ 16, 16, 0x0c,  0, NV04_PFIFO_CACHE1_DMA_DCOUNT },
+	{ 32,  0, 0x10,  0, NV04_PFIFO_CACHE1_DMA_STATE },
+	{ 32,  0, 0x14,  0, NV04_PFIFO_CACHE1_DMA_FETCH },
+	{ 32,  0, 0x18,  0, NV04_PFIFO_CACHE1_ENGINE },
+	{ 32,  0, 0x1c,  0, NV04_PFIFO_CACHE1_PULL1 },
+	{}
+};
+
+/*******************************************************************************
+ * FIFO channel objects
+ ******************************************************************************/
+
+static int
+nv10_fifo_chan_ctor(struct nouveau_object *parent,
+		    struct nouveau_object *engine,
+		    struct nouveau_oclass *oclass, void *data, u32 size,
+		    struct nouveau_object **pobject)
+{
+	struct nv04_fifo_priv *priv = (void *)engine;
+	struct nv04_fifo_chan *chan;
+	struct nv03_channel_dma_class *args = data;
+	int ret;
+
+	if (size < sizeof(*args))
+		return -EINVAL;
+
+	ret = nouveau_fifo_channel_create(parent, engine, oclass, 0, 0x800000,
+					  0x10000, args->pushbuf,
+					  (1 << NVDEV_ENGINE_DMAOBJ) |
+					  (1 << NVDEV_ENGINE_SW) |
+					  (1 << NVDEV_ENGINE_GR), &chan);
+	*pobject = nv_object(chan);
+	if (ret)
+		return ret;
+
+	nv_parent(chan)->object_attach = nv04_fifo_object_attach;
+	nv_parent(chan)->object_detach = nv04_fifo_object_detach;
+	nv_parent(chan)->context_attach = nv04_fifo_context_attach;
+	chan->ramfc = chan->base.chid * 32;
+
+	nv_wo32(priv->ramfc, chan->ramfc + 0x00, args->offset);
+	nv_wo32(priv->ramfc, chan->ramfc + 0x04, args->offset);
+	nv_wo32(priv->ramfc, chan->ramfc + 0x0c, chan->base.pushgpu->addr >> 4);
+	nv_wo32(priv->ramfc, chan->ramfc + 0x14,
+			     NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES |
+			     NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES |
+#ifdef __BIG_ENDIAN
+			     NV_PFIFO_CACHE1_BIG_ENDIAN |
+#endif
+			     NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8);
+	return 0;
+}
+
+static struct nouveau_ofuncs
+nv10_fifo_ofuncs = {
+	.ctor = nv10_fifo_chan_ctor,
+	.dtor = nv04_fifo_chan_dtor,
+	.init = nv04_fifo_chan_init,
+	.fini = nv04_fifo_chan_fini,
+	.rd32 = _nouveau_fifo_channel_rd32,
+	.wr32 = _nouveau_fifo_channel_wr32,
+};
+
+static struct nouveau_oclass
+nv10_fifo_sclass[] = {
+	{ NV10_CHANNEL_DMA_CLASS, &nv10_fifo_ofuncs },
+	{}
+};
+
+/*******************************************************************************
+ * FIFO context - basically just the instmem reserved for the channel
+ ******************************************************************************/
+
+static struct nouveau_oclass
+nv10_fifo_cclass = {
+	.handle = NV_ENGCTX(FIFO, 0x10),
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nv04_fifo_context_ctor,
+		.dtor = _nouveau_fifo_context_dtor,
+		.init = _nouveau_fifo_context_init,
+		.fini = _nouveau_fifo_context_fini,
+		.rd32 = _nouveau_fifo_context_rd32,
+		.wr32 = _nouveau_fifo_context_wr32,
+	},
+};
+
+/*******************************************************************************
+ * PFIFO engine
+ ******************************************************************************/
+
+static int
+nv10_fifo_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+	       struct nouveau_oclass *oclass, void *data, u32 size,
+	       struct nouveau_object **pobject)
+{
+	struct nv04_instmem_priv *imem = nv04_instmem(parent);
+	struct nv04_fifo_priv *priv;
+	int ret;
+
+	ret = nouveau_fifo_create(parent, engine, oclass, 0, 31, &priv);
+	*pobject = nv_object(priv);
+	if (ret)
+		return ret;
+
+	nouveau_ramht_ref(imem->ramht, &priv->ramht);
+	nouveau_gpuobj_ref(imem->ramro, &priv->ramro);
+	nouveau_gpuobj_ref(imem->ramfc, &priv->ramfc);
+
+	nv_subdev(priv)->unit = 0x00000100;
+	nv_subdev(priv)->intr = nv04_fifo_intr;
+	nv_engine(priv)->cclass = &nv10_fifo_cclass;
+	nv_engine(priv)->sclass = nv10_fifo_sclass;
+	priv->base.pause = nv04_fifo_pause;
+	priv->base.start = nv04_fifo_start;
+	priv->ramfc_desc = nv10_ramfc;
+	return 0;
+}
+
+struct nouveau_oclass
+nv10_fifo_oclass = {
+	.handle = NV_ENGINE(FIFO, 0x10),
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nv10_fifo_ctor,
+		.dtor = nv04_fifo_dtor,
+		.init = nv04_fifo_init,
+		.fini = _nouveau_fifo_fini,
+	},
+};
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nv17.c b/drivers/gpu/drm/nouveau/core/engine/fifo/nv17.c
new file mode 100644
index 000000000000..b96e6b0ae2b1
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nv17.c
@@ -0,0 +1,208 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <core/os.h>
+#include <core/class.h>
+#include <core/engctx.h>
+#include <core/ramht.h>
+
+#include <subdev/instmem.h>
+#include <subdev/instmem/nv04.h>
+#include <subdev/fb.h>
+
+#include <engine/fifo.h>
+
+#include "nv04.h"
+
+static struct ramfc_desc
+nv17_ramfc[] = {
+	{ 32,  0, 0x00,  0, NV04_PFIFO_CACHE1_DMA_PUT },
+	{ 32,  0, 0x04,  0, NV04_PFIFO_CACHE1_DMA_GET },
+	{ 32,  0, 0x08,  0, NV10_PFIFO_CACHE1_REF_CNT },
+	{ 16,  0, 0x0c,  0, NV04_PFIFO_CACHE1_DMA_INSTANCE },
+	{ 16, 16, 0x0c,  0, NV04_PFIFO_CACHE1_DMA_DCOUNT },
+	{ 32,  0, 0x10,  0, NV04_PFIFO_CACHE1_DMA_STATE },
+	{ 32,  0, 0x14,  0, NV04_PFIFO_CACHE1_DMA_FETCH },
+	{ 32,  0, 0x18,  0, NV04_PFIFO_CACHE1_ENGINE },
+	{ 32,  0, 0x1c,  0, NV04_PFIFO_CACHE1_PULL1 },
+	{ 32,  0, 0x20,  0, NV10_PFIFO_CACHE1_ACQUIRE_VALUE },
+	{ 32,  0, 0x24,  0, NV10_PFIFO_CACHE1_ACQUIRE_TIMESTAMP },
+	{ 32,  0, 0x28,  0, NV10_PFIFO_CACHE1_ACQUIRE_TIMEOUT },
+	{ 32,  0, 0x2c,  0, NV10_PFIFO_CACHE1_SEMAPHORE },
+	{ 32,  0, 0x30,  0, NV10_PFIFO_CACHE1_DMA_SUBROUTINE },
+	{}
+};
+
+/*******************************************************************************
+ * FIFO channel objects
+ ******************************************************************************/
+
+static int
+nv17_fifo_chan_ctor(struct nouveau_object *parent,
+		    struct nouveau_object *engine,
+		    struct nouveau_oclass *oclass, void *data, u32 size,
+		    struct nouveau_object **pobject)
+{
+	struct nv04_fifo_priv *priv = (void *)engine;
+	struct nv04_fifo_chan *chan;
+	struct nv03_channel_dma_class *args = data;
+	int ret;
+
+	if (size < sizeof(*args))
+		return -EINVAL;
+
+	ret = nouveau_fifo_channel_create(parent, engine, oclass, 0, 0x800000,
+					  0x10000, args->pushbuf,
+					  (1 << NVDEV_ENGINE_DMAOBJ) |
+					  (1 << NVDEV_ENGINE_SW) |
+					  (1 << NVDEV_ENGINE_GR) |
+					  (1 << NVDEV_ENGINE_MPEG), /* NV31- */
+					  &chan);
+	*pobject = nv_object(chan);
+	if (ret)
+		return ret;
+
+	nv_parent(chan)->object_attach = nv04_fifo_object_attach;
+	nv_parent(chan)->object_detach = nv04_fifo_object_detach;
+	nv_parent(chan)->context_attach = nv04_fifo_context_attach;
+	chan->ramfc = chan->base.chid * 64;
+
+	nv_wo32(priv->ramfc, chan->ramfc + 0x00, args->offset);
+	nv_wo32(priv->ramfc, chan->ramfc + 0x04, args->offset);
+	nv_wo32(priv->ramfc, chan->ramfc + 0x0c, chan->base.pushgpu->addr >> 4);
+	nv_wo32(priv->ramfc, chan->ramfc + 0x14,
+			     NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES |
+			     NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES |
+#ifdef __BIG_ENDIAN
+			     NV_PFIFO_CACHE1_BIG_ENDIAN |
+#endif
+			     NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8);
+	return 0;
+}
+
+static struct nouveau_ofuncs
+nv17_fifo_ofuncs = {
+	.ctor = nv17_fifo_chan_ctor,
+	.dtor = nv04_fifo_chan_dtor,
+	.init = nv04_fifo_chan_init,
+	.fini = nv04_fifo_chan_fini,
+	.rd32 = _nouveau_fifo_channel_rd32,
+	.wr32 = _nouveau_fifo_channel_wr32,
+};
+
+static struct nouveau_oclass
+nv17_fifo_sclass[] = {
+	{ NV17_CHANNEL_DMA_CLASS, &nv17_fifo_ofuncs },
+	{}
+};
+
+/*******************************************************************************
+ * FIFO context - basically just the instmem reserved for the channel
+ ******************************************************************************/
+
+static struct nouveau_oclass
+nv17_fifo_cclass = {
+	.handle = NV_ENGCTX(FIFO, 0x17),
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nv04_fifo_context_ctor,
+		.dtor = _nouveau_fifo_context_dtor,
+		.init = _nouveau_fifo_context_init,
+		.fini = _nouveau_fifo_context_fini,
+		.rd32 = _nouveau_fifo_context_rd32,
+		.wr32 = _nouveau_fifo_context_wr32,
+	},
+};
+
+/*******************************************************************************
+ * PFIFO engine
+ ******************************************************************************/
+
+static int
+nv17_fifo_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+	       struct nouveau_oclass *oclass, void *data, u32 size,
+	       struct nouveau_object **pobject)
+{
+	struct nv04_instmem_priv *imem = nv04_instmem(parent);
+	struct nv04_fifo_priv *priv;
+	int ret;
+
+	ret = nouveau_fifo_create(parent, engine, oclass, 0, 31, &priv);
+	*pobject = nv_object(priv);
+	if (ret)
+		return ret;
+
+	nouveau_ramht_ref(imem->ramht, &priv->ramht);
+	nouveau_gpuobj_ref(imem->ramro, &priv->ramro);
+	nouveau_gpuobj_ref(imem->ramfc, &priv->ramfc);
+
+	nv_subdev(priv)->unit = 0x00000100;
+	nv_subdev(priv)->intr = nv04_fifo_intr;
+	nv_engine(priv)->cclass = &nv17_fifo_cclass;
+	nv_engine(priv)->sclass = nv17_fifo_sclass;
+	priv->base.pause = nv04_fifo_pause;
+	priv->base.start = nv04_fifo_start;
+	priv->ramfc_desc = nv17_ramfc;
+	return 0;
+}
+
+static int
+nv17_fifo_init(struct nouveau_object *object)
+{
+	struct nv04_fifo_priv *priv = (void *)object;
+	int ret;
+
+	ret = nouveau_fifo_init(&priv->base);
+	if (ret)
+		return ret;
+
+	nv_wr32(priv, NV04_PFIFO_DELAY_0, 0x000000ff);
+	nv_wr32(priv, NV04_PFIFO_DMA_TIMESLICE, 0x0101ffff);
+
+	nv_wr32(priv, NV03_PFIFO_RAMHT, (0x03 << 24) /* search 128 */ |
+				       ((priv->ramht->bits - 9) << 16) |
+				        (priv->ramht->base.addr >> 8));
+	nv_wr32(priv, NV03_PFIFO_RAMRO, priv->ramro->addr >> 8);
+	nv_wr32(priv, NV03_PFIFO_RAMFC, priv->ramfc->addr >> 8 | 0x00010000);
+
+	nv_wr32(priv, NV03_PFIFO_CACHE1_PUSH1, priv->base.max);
+
+	nv_wr32(priv, NV03_PFIFO_INTR_0, 0xffffffff);
+	nv_wr32(priv, NV03_PFIFO_INTR_EN_0, 0xffffffff);
+
+	nv_wr32(priv, NV03_PFIFO_CACHE1_PUSH0, 1);
+	nv_wr32(priv, NV04_PFIFO_CACHE1_PULL0, 1);
+	nv_wr32(priv, NV03_PFIFO_CACHES, 1);
+	return 0;
+}
+
+struct nouveau_oclass
+nv17_fifo_oclass = {
+	.handle = NV_ENGINE(FIFO, 0x17),
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nv17_fifo_ctor,
+		.dtor = nv04_fifo_dtor,
+		.init = nv17_fifo_init,
+		.fini = _nouveau_fifo_fini,
+	},
+};
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nv40.c b/drivers/gpu/drm/nouveau/core/engine/fifo/nv40.c
new file mode 100644
index 000000000000..559c3b4e1b86
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nv40.c
@@ -0,0 +1,349 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <core/os.h>
+#include <core/class.h>
+#include <core/engctx.h>
+#include <core/ramht.h>
+
+#include <subdev/instmem.h>
+#include <subdev/instmem/nv04.h>
+#include <subdev/fb.h>
+
+#include <engine/fifo.h>
+
+#include "nv04.h"
+
+static struct ramfc_desc
+nv40_ramfc[] = {
+	{ 32,  0, 0x00,  0, NV04_PFIFO_CACHE1_DMA_PUT },
+	{ 32,  0, 0x04,  0, NV04_PFIFO_CACHE1_DMA_GET },
+	{ 32,  0, 0x08,  0, NV10_PFIFO_CACHE1_REF_CNT },
+	{ 32,  0, 0x0c,  0, NV04_PFIFO_CACHE1_DMA_INSTANCE },
+	{ 32,  0, 0x10,  0, NV04_PFIFO_CACHE1_DMA_DCOUNT },
+	{ 32,  0, 0x14,  0, NV04_PFIFO_CACHE1_DMA_STATE },
+	{ 28,  0, 0x18,  0, NV04_PFIFO_CACHE1_DMA_FETCH },
+	{  2, 28, 0x18, 28, 0x002058 },
+	{ 32,  0, 0x1c,  0, NV04_PFIFO_CACHE1_ENGINE },
+	{ 32,  0, 0x20,  0, NV04_PFIFO_CACHE1_PULL1 },
+	{ 32,  0, 0x24,  0, NV10_PFIFO_CACHE1_ACQUIRE_VALUE },
+	{ 32,  0, 0x28,  0, NV10_PFIFO_CACHE1_ACQUIRE_TIMESTAMP },
+	{ 32,  0, 0x2c,  0, NV10_PFIFO_CACHE1_ACQUIRE_TIMEOUT },
+	{ 32,  0, 0x30,  0, NV10_PFIFO_CACHE1_SEMAPHORE },
+	{ 32,  0, 0x34,  0, NV10_PFIFO_CACHE1_DMA_SUBROUTINE },
+	{ 32,  0, 0x38,  0, NV40_PFIFO_GRCTX_INSTANCE },
+	{ 17,  0, 0x3c,  0, NV04_PFIFO_DMA_TIMESLICE },
+	{ 32,  0, 0x40,  0, 0x0032e4 },
+	{ 32,  0, 0x44,  0, 0x0032e8 },
+	{ 32,  0, 0x4c,  0, 0x002088 },
+	{ 32,  0, 0x50,  0, 0x003300 },
+	{ 32,  0, 0x54,  0, 0x00330c },
+	{}
+};
+
+/*******************************************************************************
+ * FIFO channel objects
+ ******************************************************************************/
+
+static int
+nv40_fifo_object_attach(struct nouveau_object *parent,
+			struct nouveau_object *object, u32 handle)
+{
+	struct nv04_fifo_priv *priv = (void *)parent->engine;
+	struct nv04_fifo_chan *chan = (void *)parent;
+	u32 context, chid = chan->base.chid;
+	int ret;
+
+	if (nv_iclass(object, NV_GPUOBJ_CLASS))
+		context = nv_gpuobj(object)->addr >> 4;
+	else
+		context = 0x00000004; /* just non-zero */
+
+	switch (nv_engidx(object->engine)) {
+	case NVDEV_ENGINE_DMAOBJ:
+	case NVDEV_ENGINE_SW:
+		context |= 0x00000000;
+		break;
+	case NVDEV_ENGINE_GR:
+		context |= 0x00100000;
+		break;
+	case NVDEV_ENGINE_MPEG:
+		context |= 0x00200000;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	context |= chid << 23;
+
+	mutex_lock(&nv_subdev(priv)->mutex);
+	ret = nouveau_ramht_insert(priv->ramht, chid, handle, context);
+	mutex_unlock(&nv_subdev(priv)->mutex);
+	return ret;
+}
+
+static int
+nv40_fifo_context_attach(struct nouveau_object *parent,
+			 struct nouveau_object *engctx)
+{
+	struct nv04_fifo_priv *priv = (void *)parent->engine;
+	struct nv04_fifo_chan *chan = (void *)parent;
+	unsigned long flags;
+	u32 reg, ctx;
+
+	switch (nv_engidx(engctx->engine)) {
+	case NVDEV_ENGINE_SW:
+		return 0;
+	case NVDEV_ENGINE_GR:
+		reg = 0x32e0;
+		ctx = 0x38;
+		break;
+	case NVDEV_ENGINE_MPEG:
+		reg = 0x330c;
+		ctx = 0x54;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	spin_lock_irqsave(&priv->base.lock, flags);
+	nv_engctx(engctx)->addr = nv_gpuobj(engctx)->addr >> 4;
+	nv_mask(priv, 0x002500, 0x00000001, 0x00000000);
+
+	if ((nv_rd32(priv, 0x003204) & priv->base.max) == chan->base.chid)
+		nv_wr32(priv, reg, nv_engctx(engctx)->addr);
+	nv_wo32(priv->ramfc, chan->ramfc + ctx, nv_engctx(engctx)->addr);
+
+	nv_mask(priv, 0x002500, 0x00000001, 0x00000001);
+	spin_unlock_irqrestore(&priv->base.lock, flags);
+	return 0;
+}
+
+static int
+nv40_fifo_context_detach(struct nouveau_object *parent, bool suspend,
+			 struct nouveau_object *engctx)
+{
+	struct nv04_fifo_priv *priv = (void *)parent->engine;
+	struct nv04_fifo_chan *chan = (void *)parent;
+	unsigned long flags;
+	u32 reg, ctx;
+
+	switch (nv_engidx(engctx->engine)) {
+	case NVDEV_ENGINE_SW:
+		return 0;
+	case NVDEV_ENGINE_GR:
+		reg = 0x32e0;
+		ctx = 0x38;
+		break;
+	case NVDEV_ENGINE_MPEG:
+		reg = 0x330c;
+		ctx = 0x54;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	spin_lock_irqsave(&priv->base.lock, flags);
+	nv_mask(priv, 0x002500, 0x00000001, 0x00000000);
+
+	if ((nv_rd32(priv, 0x003204) & priv->base.max) == chan->base.chid)
+		nv_wr32(priv, reg, 0x00000000);
+	nv_wo32(priv->ramfc, chan->ramfc + ctx, 0x00000000);
+
+	nv_mask(priv, 0x002500, 0x00000001, 0x00000001);
+	spin_unlock_irqrestore(&priv->base.lock, flags);
+	return 0;
+}
+
+static int
+nv40_fifo_chan_ctor(struct nouveau_object *parent,
+		    struct nouveau_object *engine,
+		    struct nouveau_oclass *oclass, void *data, u32 size,
+		    struct nouveau_object **pobject)
+{
+	struct nv04_fifo_priv *priv = (void *)engine;
+	struct nv04_fifo_chan *chan;
+	struct nv03_channel_dma_class *args = data;
+	int ret;
+
+	if (size < sizeof(*args))
+		return -EINVAL;
+
+	ret = nouveau_fifo_channel_create(parent, engine, oclass, 0, 0xc00000,
+					  0x1000, args->pushbuf,
+					  (1 << NVDEV_ENGINE_DMAOBJ) |
+					  (1 << NVDEV_ENGINE_SW) |
+					  (1 << NVDEV_ENGINE_GR) |
+					  (1 << NVDEV_ENGINE_MPEG), &chan);
+	*pobject = nv_object(chan);
+	if (ret)
+		return ret;
+
+	nv_parent(chan)->context_attach = nv40_fifo_context_attach;
+	nv_parent(chan)->context_detach = nv40_fifo_context_detach;
+	nv_parent(chan)->object_attach = nv40_fifo_object_attach;
+	nv_parent(chan)->object_detach = nv04_fifo_object_detach;
+	chan->ramfc = chan->base.chid * 128;
+
+	nv_wo32(priv->ramfc, chan->ramfc + 0x00, args->offset);
+	nv_wo32(priv->ramfc, chan->ramfc + 0x04, args->offset);
+	nv_wo32(priv->ramfc, chan->ramfc + 0x0c, chan->base.pushgpu->addr >> 4);
+	nv_wo32(priv->ramfc, chan->ramfc + 0x18, 0x30000000 |
+			     NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES |
+			     NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES |
+#ifdef __BIG_ENDIAN
+			     NV_PFIFO_CACHE1_BIG_ENDIAN |
+#endif
+			     NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8);
+	nv_wo32(priv->ramfc, chan->ramfc + 0x3c, 0x0001ffff);
+	return 0;
+}
+
+static struct nouveau_ofuncs
+nv40_fifo_ofuncs = {
+	.ctor = nv40_fifo_chan_ctor,
+	.dtor = nv04_fifo_chan_dtor,
+	.init = nv04_fifo_chan_init,
+	.fini = nv04_fifo_chan_fini,
+	.rd32 = _nouveau_fifo_channel_rd32,
+	.wr32 = _nouveau_fifo_channel_wr32,
+};
+
+static struct nouveau_oclass
+nv40_fifo_sclass[] = {
+	{ NV40_CHANNEL_DMA_CLASS, &nv40_fifo_ofuncs },
+	{}
+};
+
+/*******************************************************************************
+ * FIFO context - basically just the instmem reserved for the channel
+ ******************************************************************************/
+
+static struct nouveau_oclass
+nv40_fifo_cclass = {
+	.handle = NV_ENGCTX(FIFO, 0x40),
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nv04_fifo_context_ctor,
+		.dtor = _nouveau_fifo_context_dtor,
+		.init = _nouveau_fifo_context_init,
+		.fini = _nouveau_fifo_context_fini,
+		.rd32 = _nouveau_fifo_context_rd32,
+		.wr32 = _nouveau_fifo_context_wr32,
+	},
+};
+
+/*******************************************************************************
+ * PFIFO engine
+ ******************************************************************************/
+
+static int
+nv40_fifo_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+	       struct nouveau_oclass *oclass, void *data, u32 size,
+	       struct nouveau_object **pobject)
+{
+	struct nv04_instmem_priv *imem = nv04_instmem(parent);
+	struct nv04_fifo_priv *priv;
+	int ret;
+
+	ret = nouveau_fifo_create(parent, engine, oclass, 0, 31, &priv);
+	*pobject = nv_object(priv);
+	if (ret)
+		return ret;
+
+	nouveau_ramht_ref(imem->ramht, &priv->ramht);
+	nouveau_gpuobj_ref(imem->ramro, &priv->ramro);
+	nouveau_gpuobj_ref(imem->ramfc, &priv->ramfc);
+
+	nv_subdev(priv)->unit = 0x00000100;
+	nv_subdev(priv)->intr = nv04_fifo_intr;
+	nv_engine(priv)->cclass = &nv40_fifo_cclass;
+	nv_engine(priv)->sclass = nv40_fifo_sclass;
+	priv->base.pause = nv04_fifo_pause;
+	priv->base.start = nv04_fifo_start;
+	priv->ramfc_desc = nv40_ramfc;
+	return 0;
+}
+
+static int
+nv40_fifo_init(struct nouveau_object *object)
+{
+	struct nv04_fifo_priv *priv = (void *)object;
+	struct nouveau_fb *pfb = nouveau_fb(object);
+	int ret;
+
+	ret = nouveau_fifo_init(&priv->base);
+	if (ret)
+		return ret;
+
+	nv_wr32(priv, 0x002040, 0x000000ff);
+	nv_wr32(priv, 0x002044, 0x2101ffff);
+	nv_wr32(priv, 0x002058, 0x00000001);
+
+	nv_wr32(priv, NV03_PFIFO_RAMHT, (0x03 << 24) /* search 128 */ |
+				       ((priv->ramht->bits - 9) << 16) |
+				        (priv->ramht->base.addr >> 8));
+	nv_wr32(priv, NV03_PFIFO_RAMRO, priv->ramro->addr >> 8);
+
+	switch (nv_device(priv)->chipset) {
+	case 0x47:
+	case 0x49:
+	case 0x4b:
+		nv_wr32(priv, 0x002230, 0x00000001);
+	case 0x40:
+	case 0x41:
+	case 0x42:
+	case 0x43:
+	case 0x45:
+	case 0x48:
+		nv_wr32(priv, 0x002220, 0x00030002);
+		break;
+	default:
+		nv_wr32(priv, 0x002230, 0x00000000);
+		nv_wr32(priv, 0x002220, ((pfb->ram.size - 512 * 1024 +
+					 priv->ramfc->addr) >> 16) |
+					0x00030000);
+		break;
+	}
+
+	nv_wr32(priv, NV03_PFIFO_CACHE1_PUSH1, priv->base.max);
+
+	nv_wr32(priv, NV03_PFIFO_INTR_0, 0xffffffff);
+	nv_wr32(priv, NV03_PFIFO_INTR_EN_0, 0xffffffff);
+
+	nv_wr32(priv, NV03_PFIFO_CACHE1_PUSH0, 1);
+	nv_wr32(priv, NV04_PFIFO_CACHE1_PULL0, 1);
+	nv_wr32(priv, NV03_PFIFO_CACHES, 1);
+	return 0;
+}
+
+struct nouveau_oclass
+nv40_fifo_oclass = {
+	.handle = NV_ENGINE(FIFO, 0x40),
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nv40_fifo_ctor,
+		.dtor = nv04_fifo_dtor,
+		.init = nv40_fifo_init,
+		.fini = _nouveau_fifo_fini,
+	},
+};
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nv50.c b/drivers/gpu/drm/nouveau/core/engine/fifo/nv50.c
new file mode 100644
index 000000000000..536e7634a00d
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nv50.c
@@ -0,0 +1,502 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <core/client.h>
+#include <core/engctx.h>
+#include <core/ramht.h>
+#include <core/class.h>
+#include <core/math.h>
+
+#include <subdev/timer.h>
+#include <subdev/bar.h>
+
+#include <engine/dmaobj.h>
+#include <engine/fifo.h>
+
+#include "nv50.h"
+
+/*******************************************************************************
+ * FIFO channel objects
+ ******************************************************************************/
+
+void
+nv50_fifo_playlist_update(struct nv50_fifo_priv *priv)
+{
+	struct nouveau_bar *bar = nouveau_bar(priv);
+	struct nouveau_gpuobj *cur;
+	int i, p;
+
+	cur = priv->playlist[priv->cur_playlist];
+	priv->cur_playlist = !priv->cur_playlist;
+
+	for (i = priv->base.min, p = 0; i < priv->base.max; i++) {
+		if (nv_rd32(priv, 0x002600 + (i * 4)) & 0x80000000)
+			nv_wo32(cur, p++ * 4, i);
+	}
+
+	bar->flush(bar);
+
+	nv_wr32(priv, 0x0032f4, cur->addr >> 12);
+	nv_wr32(priv, 0x0032ec, p);
+	nv_wr32(priv, 0x002500, 0x00000101);
+}
+
+static int
+nv50_fifo_context_attach(struct nouveau_object *parent,
+			 struct nouveau_object *object)
+{
+	struct nouveau_bar *bar = nouveau_bar(parent);
+	struct nv50_fifo_base *base = (void *)parent->parent;
+	struct nouveau_gpuobj *ectx = (void *)object;
+	u64 limit = ectx->addr + ectx->size - 1;
+	u64 start = ectx->addr;
+	u32 addr;
+
+	switch (nv_engidx(object->engine)) {
+	case NVDEV_ENGINE_SW   : return 0;
+	case NVDEV_ENGINE_GR   : addr = 0x0000; break;
+	case NVDEV_ENGINE_MPEG : addr = 0x0060; break;
+	default:
+		return -EINVAL;
+	}
+
+	nv_engctx(ectx)->addr = nv_gpuobj(base)->addr >> 12;
+	nv_wo32(base->eng, addr + 0x00, 0x00190000);
+	nv_wo32(base->eng, addr + 0x04, lower_32_bits(limit));
+	nv_wo32(base->eng, addr + 0x08, lower_32_bits(start));
+	nv_wo32(base->eng, addr + 0x0c, upper_32_bits(limit) << 24 |
+					upper_32_bits(start));
+	nv_wo32(base->eng, addr + 0x10, 0x00000000);
+	nv_wo32(base->eng, addr + 0x14, 0x00000000);
+	bar->flush(bar);
+	return 0;
+}
+
+static int
+nv50_fifo_context_detach(struct nouveau_object *parent, bool suspend,
+			 struct nouveau_object *object)
+{
+	struct nouveau_bar *bar = nouveau_bar(parent);
+	struct nv50_fifo_priv *priv = (void *)parent->engine;
+	struct nv50_fifo_base *base = (void *)parent->parent;
+	struct nv50_fifo_chan *chan = (void *)parent;
+	u32 addr, me;
+	int ret = 0;
+
+	switch (nv_engidx(object->engine)) {
+	case NVDEV_ENGINE_SW   : return 0;
+	case NVDEV_ENGINE_GR   : addr = 0x0000; break;
+	case NVDEV_ENGINE_MPEG : addr = 0x0060; break;
+	default:
+		return -EINVAL;
+	}
+
+	nv_wo32(base->eng, addr + 0x00, 0x00000000);
+	nv_wo32(base->eng, addr + 0x04, 0x00000000);
+	nv_wo32(base->eng, addr + 0x08, 0x00000000);
+	nv_wo32(base->eng, addr + 0x0c, 0x00000000);
+	nv_wo32(base->eng, addr + 0x10, 0x00000000);
+	nv_wo32(base->eng, addr + 0x14, 0x00000000);
+	bar->flush(bar);
+
+	/* HW bug workaround:
+	 *
+	 * PFIFO will hang forever if the connected engines don't report
+	 * that they've processed the context switch request.
+	 *
+	 * In order for the kickoff to work, we need to ensure all the
+	 * connected engines are in a state where they can answer.
+	 *
+	 * Newer chipsets don't seem to suffer from this issue, and well,
+	 * there's also a "ignore these engines" bitmask reg we can use
+	 * if we hit the issue there..
+	 */
+	me = nv_mask(priv, 0x00b860, 0x00000001, 0x00000001);
+
+	/* do the kickoff... */
+	nv_wr32(priv, 0x0032fc, nv_gpuobj(base)->addr >> 12);
+	if (!nv_wait_ne(priv, 0x0032fc, 0xffffffff, 0xffffffff)) {
+		nv_error(priv, "channel %d unload timeout\n", chan->base.chid);
+		if (suspend)
+			ret = -EBUSY;
+	}
+
+	nv_wr32(priv, 0x00b860, me);
+	return ret;
+}
+
+static int
+nv50_fifo_object_attach(struct nouveau_object *parent,
+			struct nouveau_object *object, u32 handle)
+{
+	struct nv50_fifo_chan *chan = (void *)parent;
+	u32 context;
+
+	if (nv_iclass(object, NV_GPUOBJ_CLASS))
+		context = nv_gpuobj(object)->node->offset >> 4;
+	else
+		context = 0x00000004; /* just non-zero */
+
+	switch (nv_engidx(object->engine)) {
+	case NVDEV_ENGINE_DMAOBJ:
+	case NVDEV_ENGINE_SW    : context |= 0x00000000; break;
+	case NVDEV_ENGINE_GR    : context |= 0x00100000; break;
+	case NVDEV_ENGINE_MPEG  : context |= 0x00200000; break;
+	default:
+		return -EINVAL;
+	}
+
+	return nouveau_ramht_insert(chan->ramht, 0, handle, context);
+}
+
+void
+nv50_fifo_object_detach(struct nouveau_object *parent, int cookie)
+{
+	struct nv50_fifo_chan *chan = (void *)parent;
+	nouveau_ramht_remove(chan->ramht, cookie);
+}
+
+static int
+nv50_fifo_chan_ctor_dma(struct nouveau_object *parent,
+			struct nouveau_object *engine,
+			struct nouveau_oclass *oclass, void *data, u32 size,
+			struct nouveau_object **pobject)
+{
+	struct nouveau_bar *bar = nouveau_bar(parent);
+	struct nv50_fifo_base *base = (void *)parent;
+	struct nv50_fifo_chan *chan;
+	struct nv03_channel_dma_class *args = data;
+	int ret;
+
+	if (size < sizeof(*args))
+		return -EINVAL;
+
+	ret = nouveau_fifo_channel_create(parent, engine, oclass, 0, 0xc00000,
+					  0x2000, args->pushbuf,
+					  (1 << NVDEV_ENGINE_DMAOBJ) |
+					  (1 << NVDEV_ENGINE_SW) |
+					  (1 << NVDEV_ENGINE_GR) |
+					  (1 << NVDEV_ENGINE_MPEG), &chan);
+	*pobject = nv_object(chan);
+	if (ret)
+		return ret;
+
+	nv_parent(chan)->context_attach = nv50_fifo_context_attach;
+	nv_parent(chan)->context_detach = nv50_fifo_context_detach;
+	nv_parent(chan)->object_attach = nv50_fifo_object_attach;
+	nv_parent(chan)->object_detach = nv50_fifo_object_detach;
+
+	ret = nouveau_ramht_new(parent, parent, 0x8000, 16, &chan->ramht);
+	if (ret)
+		return ret;
+
+	nv_wo32(base->ramfc, 0x08, lower_32_bits(args->offset));
+	nv_wo32(base->ramfc, 0x0c, upper_32_bits(args->offset));
+	nv_wo32(base->ramfc, 0x10, lower_32_bits(args->offset));
+	nv_wo32(base->ramfc, 0x14, upper_32_bits(args->offset));
+	nv_wo32(base->ramfc, 0x3c, 0x003f6078);
+	nv_wo32(base->ramfc, 0x44, 0x01003fff);
+	nv_wo32(base->ramfc, 0x48, chan->base.pushgpu->node->offset >> 4);
+	nv_wo32(base->ramfc, 0x4c, 0xffffffff);
+	nv_wo32(base->ramfc, 0x60, 0x7fffffff);
+	nv_wo32(base->ramfc, 0x78, 0x00000000);
+	nv_wo32(base->ramfc, 0x7c, 0x30000001);
+	nv_wo32(base->ramfc, 0x80, ((chan->ramht->bits - 9) << 27) |
+				   (4 << 24) /* SEARCH_FULL */ |
+				   (chan->ramht->base.node->offset >> 4));
+	bar->flush(bar);
+	return 0;
+}
+
+static int
+nv50_fifo_chan_ctor_ind(struct nouveau_object *parent,
+			struct nouveau_object *engine,
+			struct nouveau_oclass *oclass, void *data, u32 size,
+			struct nouveau_object **pobject)
+{
+	struct nv50_channel_ind_class *args = data;
+	struct nouveau_bar *bar = nouveau_bar(parent);
+	struct nv50_fifo_base *base = (void *)parent;
+	struct nv50_fifo_chan *chan;
+	u64 ioffset, ilength;
+	int ret;
+
+	if (size < sizeof(*args))
+		return -EINVAL;
+
+	ret = nouveau_fifo_channel_create(parent, engine, oclass, 0, 0xc00000,
+					  0x2000, args->pushbuf,
+					  (1 << NVDEV_ENGINE_DMAOBJ) |
+					  (1 << NVDEV_ENGINE_SW) |
+					  (1 << NVDEV_ENGINE_GR) |
+					  (1 << NVDEV_ENGINE_MPEG), &chan);
+	*pobject = nv_object(chan);
+	if (ret)
+		return ret;
+
+	nv_parent(chan)->context_attach = nv50_fifo_context_attach;
+	nv_parent(chan)->context_detach = nv50_fifo_context_detach;
+	nv_parent(chan)->object_attach = nv50_fifo_object_attach;
+	nv_parent(chan)->object_detach = nv50_fifo_object_detach;
+
+	ret = nouveau_ramht_new(parent, parent, 0x8000, 16, &chan->ramht);
+	if (ret)
+		return ret;
+
+	ioffset = args->ioffset;
+	ilength = log2i(args->ilength / 8);
+
+	nv_wo32(base->ramfc, 0x3c, 0x403f6078);
+	nv_wo32(base->ramfc, 0x44, 0x01003fff);
+	nv_wo32(base->ramfc, 0x48, chan->base.pushgpu->node->offset >> 4);
+	nv_wo32(base->ramfc, 0x50, lower_32_bits(ioffset));
+	nv_wo32(base->ramfc, 0x54, upper_32_bits(ioffset) | (ilength << 16));
+	nv_wo32(base->ramfc, 0x60, 0x7fffffff);
+	nv_wo32(base->ramfc, 0x78, 0x00000000);
+	nv_wo32(base->ramfc, 0x7c, 0x30000001);
+	nv_wo32(base->ramfc, 0x80, ((chan->ramht->bits - 9) << 27) |
+				   (4 << 24) /* SEARCH_FULL */ |
+				   (chan->ramht->base.node->offset >> 4));
+	bar->flush(bar);
+	return 0;
+}
+
+void
+nv50_fifo_chan_dtor(struct nouveau_object *object)
+{
+	struct nv50_fifo_chan *chan = (void *)object;
+	nouveau_ramht_ref(NULL, &chan->ramht);
+	nouveau_fifo_channel_destroy(&chan->base);
+}
+
+static int
+nv50_fifo_chan_init(struct nouveau_object *object)
+{
+	struct nv50_fifo_priv *priv = (void *)object->engine;
+	struct nv50_fifo_base *base = (void *)object->parent;
+	struct nv50_fifo_chan *chan = (void *)object;
+	struct nouveau_gpuobj *ramfc = base->ramfc;
+	u32 chid = chan->base.chid;
+	int ret;
+
+	ret = nouveau_fifo_channel_init(&chan->base);
+	if (ret)
+		return ret;
+
+	nv_wr32(priv, 0x002600 + (chid * 4), 0x80000000 | ramfc->addr >> 12);
+	nv50_fifo_playlist_update(priv);
+	return 0;
+}
+
+int
+nv50_fifo_chan_fini(struct nouveau_object *object, bool suspend)
+{
+	struct nv50_fifo_priv *priv = (void *)object->engine;
+	struct nv50_fifo_chan *chan = (void *)object;
+	u32 chid = chan->base.chid;
+
+	/* remove channel from playlist, fifo will unload context */
+	nv_mask(priv, 0x002600 + (chid * 4), 0x80000000, 0x00000000);
+	nv50_fifo_playlist_update(priv);
+	nv_wr32(priv, 0x002600 + (chid * 4), 0x00000000);
+
+	return nouveau_fifo_channel_fini(&chan->base, suspend);
+}
+
+static struct nouveau_ofuncs
+nv50_fifo_ofuncs_dma = {
+	.ctor = nv50_fifo_chan_ctor_dma,
+	.dtor = nv50_fifo_chan_dtor,
+	.init = nv50_fifo_chan_init,
+	.fini = nv50_fifo_chan_fini,
+	.rd32 = _nouveau_fifo_channel_rd32,
+	.wr32 = _nouveau_fifo_channel_wr32,
+};
+
+static struct nouveau_ofuncs
+nv50_fifo_ofuncs_ind = {
+	.ctor = nv50_fifo_chan_ctor_ind,
+	.dtor = nv50_fifo_chan_dtor,
+	.init = nv50_fifo_chan_init,
+	.fini = nv50_fifo_chan_fini,
+	.rd32 = _nouveau_fifo_channel_rd32,
+	.wr32 = _nouveau_fifo_channel_wr32,
+};
+
+static struct nouveau_oclass
+nv50_fifo_sclass[] = {
+	{ NV50_CHANNEL_DMA_CLASS, &nv50_fifo_ofuncs_dma },
+	{ NV50_CHANNEL_IND_CLASS, &nv50_fifo_ofuncs_ind },
+	{}
+};
+
+/*******************************************************************************
+ * FIFO context - basically just the instmem reserved for the channel
+ ******************************************************************************/
+
+static int
+nv50_fifo_context_ctor(struct nouveau_object *parent,
+		       struct nouveau_object *engine,
+		       struct nouveau_oclass *oclass, void *data, u32 size,
+		       struct nouveau_object **pobject)
+{
+	struct nv50_fifo_base *base;
+	int ret;
+
+	ret = nouveau_fifo_context_create(parent, engine, oclass, NULL, 0x10000,
+				          0x1000, NVOBJ_FLAG_HEAP, &base);
+	*pobject = nv_object(base);
+	if (ret)
+		return ret;
+
+	ret = nouveau_gpuobj_new(parent, nv_object(base), 0x0200, 0x1000,
+				 NVOBJ_FLAG_ZERO_ALLOC, &base->ramfc);
+	if (ret)
+		return ret;
+
+	ret = nouveau_gpuobj_new(parent, nv_object(base), 0x1200, 0,
+				 NVOBJ_FLAG_ZERO_ALLOC, &base->eng);
+	if (ret)
+		return ret;
+
+	ret = nouveau_gpuobj_new(parent, nv_object(base), 0x4000, 0, 0,
+				&base->pgd);
+	if (ret)
+		return ret;
+
+	ret = nouveau_vm_ref(nouveau_client(parent)->vm, &base->vm, base->pgd);
+	if (ret)
+		return ret;
+
+	return 0;
+}
+
+void
+nv50_fifo_context_dtor(struct nouveau_object *object)
+{
+	struct nv50_fifo_base *base = (void *)object;
+	nouveau_vm_ref(NULL, &base->vm, base->pgd);
+	nouveau_gpuobj_ref(NULL, &base->pgd);
+	nouveau_gpuobj_ref(NULL, &base->eng);
+	nouveau_gpuobj_ref(NULL, &base->ramfc);
+	nouveau_gpuobj_ref(NULL, &base->cache);
+	nouveau_fifo_context_destroy(&base->base);
+}
+
+static struct nouveau_oclass
+nv50_fifo_cclass = {
+	.handle = NV_ENGCTX(FIFO, 0x50),
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nv50_fifo_context_ctor,
+		.dtor = nv50_fifo_context_dtor,
+		.init = _nouveau_fifo_context_init,
+		.fini = _nouveau_fifo_context_fini,
+		.rd32 = _nouveau_fifo_context_rd32,
+		.wr32 = _nouveau_fifo_context_wr32,
+	},
+};
+
+/*******************************************************************************
+ * PFIFO engine
+ ******************************************************************************/
+
+static int
+nv50_fifo_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+	       struct nouveau_oclass *oclass, void *data, u32 size,
+	       struct nouveau_object **pobject)
+{
+	struct nv50_fifo_priv *priv;
+	int ret;
+
+	ret = nouveau_fifo_create(parent, engine, oclass, 1, 127, &priv);
+	*pobject = nv_object(priv);
+	if (ret)
+		return ret;
+
+	ret = nouveau_gpuobj_new(parent, NULL, 128 * 4, 0x1000, 0,
+				&priv->playlist[0]);
+	if (ret)
+		return ret;
+
+	ret = nouveau_gpuobj_new(parent, NULL, 128 * 4, 0x1000, 0,
+				&priv->playlist[1]);
+	if (ret)
+		return ret;
+
+	nv_subdev(priv)->unit = 0x00000100;
+	nv_subdev(priv)->intr = nv04_fifo_intr;
+	nv_engine(priv)->cclass = &nv50_fifo_cclass;
+	nv_engine(priv)->sclass = nv50_fifo_sclass;
+	return 0;
+}
+
+void
+nv50_fifo_dtor(struct nouveau_object *object)
+{
+	struct nv50_fifo_priv *priv = (void *)object;
+
+	nouveau_gpuobj_ref(NULL, &priv->playlist[1]);
+	nouveau_gpuobj_ref(NULL, &priv->playlist[0]);
+
+	nouveau_fifo_destroy(&priv->base);
+}
+
+int
+nv50_fifo_init(struct nouveau_object *object)
+{
+	struct nv50_fifo_priv *priv = (void *)object;
+	int ret, i;
+
+	ret = nouveau_fifo_init(&priv->base);
+	if (ret)
+		return ret;
+
+	nv_mask(priv, 0x000200, 0x00000100, 0x00000000);
+	nv_mask(priv, 0x000200, 0x00000100, 0x00000100);
+	nv_wr32(priv, 0x00250c, 0x6f3cfc34);
+	nv_wr32(priv, 0x002044, 0x01003fff);
+
+	nv_wr32(priv, 0x002100, 0xffffffff);
+	nv_wr32(priv, 0x002140, 0xffffffff);
+
+	for (i = 0; i < 128; i++)
+		nv_wr32(priv, 0x002600 + (i * 4), 0x00000000);
+	nv50_fifo_playlist_update(priv);
+
+	nv_wr32(priv, 0x003200, 0x00000001);
+	nv_wr32(priv, 0x003250, 0x00000001);
+	nv_wr32(priv, 0x002500, 0x00000001);
+	return 0;
+}
+
+struct nouveau_oclass
+nv50_fifo_oclass = {
+	.handle = NV_ENGINE(FIFO, 0x50),
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nv50_fifo_ctor,
+		.dtor = nv50_fifo_dtor,
+		.init = nv50_fifo_init,
+		.fini = _nouveau_fifo_fini,
+	},
+};
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nv50.h b/drivers/gpu/drm/nouveau/core/engine/fifo/nv50.h
new file mode 100644
index 000000000000..3a9ceb315c20
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nv50.h
@@ -0,0 +1,36 @@
+#ifndef __NV50_FIFO_H__
+#define __NV50_FIFO_H__
+
+struct nv50_fifo_priv {
+	struct nouveau_fifo base;
+	struct nouveau_gpuobj *playlist[2];
+	int cur_playlist;
+};
+
+struct nv50_fifo_base {
+	struct nouveau_fifo_base base;
+	struct nouveau_gpuobj *ramfc;
+	struct nouveau_gpuobj *cache;
+	struct nouveau_gpuobj *eng;
+	struct nouveau_gpuobj *pgd;
+	struct nouveau_vm *vm;
+};
+
+struct nv50_fifo_chan {
+	struct nouveau_fifo_chan base;
+	u32 subc[8];
+	struct nouveau_ramht *ramht;
+};
+
+void nv50_fifo_playlist_update(struct nv50_fifo_priv *);
+
+void nv50_fifo_object_detach(struct nouveau_object *, int);
+void nv50_fifo_chan_dtor(struct nouveau_object *);
+int  nv50_fifo_chan_fini(struct nouveau_object *, bool);
+
+void nv50_fifo_context_dtor(struct nouveau_object *);
+
+void nv50_fifo_dtor(struct nouveau_object *);
+int  nv50_fifo_init(struct nouveau_object *);
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nv84.c b/drivers/gpu/drm/nouveau/core/engine/fifo/nv84.c
new file mode 100644
index 000000000000..b4fd26d8f166
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nv84.c
@@ -0,0 +1,420 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <core/os.h>
+#include <core/client.h>
+#include <core/engctx.h>
+#include <core/ramht.h>
+#include <core/class.h>
+#include <core/math.h>
+
+#include <subdev/timer.h>
+#include <subdev/bar.h>
+
+#include <engine/dmaobj.h>
+#include <engine/fifo.h>
+
+#include "nv50.h"
+
+/*******************************************************************************
+ * FIFO channel objects
+ ******************************************************************************/
+
+static int
+nv84_fifo_context_attach(struct nouveau_object *parent,
+			 struct nouveau_object *object)
+{
+	struct nouveau_bar *bar = nouveau_bar(parent);
+	struct nv50_fifo_base *base = (void *)parent->parent;
+	struct nouveau_gpuobj *ectx = (void *)object;
+	u64 limit = ectx->addr + ectx->size - 1;
+	u64 start = ectx->addr;
+	u32 addr;
+
+	switch (nv_engidx(object->engine)) {
+	case NVDEV_ENGINE_SW   : return 0;
+	case NVDEV_ENGINE_GR   : addr = 0x0020; break;
+	case NVDEV_ENGINE_MPEG : addr = 0x0060; break;
+	case NVDEV_ENGINE_CRYPT: addr = 0x00a0; break;
+	case NVDEV_ENGINE_COPY0: addr = 0x00c0; break;
+	default:
+		return -EINVAL;
+	}
+
+	nv_engctx(ectx)->addr = nv_gpuobj(base)->addr >> 12;
+	nv_wo32(base->eng, addr + 0x00, 0x00190000);
+	nv_wo32(base->eng, addr + 0x04, lower_32_bits(limit));
+	nv_wo32(base->eng, addr + 0x08, lower_32_bits(start));
+	nv_wo32(base->eng, addr + 0x0c, upper_32_bits(limit) << 24 |
+					upper_32_bits(start));
+	nv_wo32(base->eng, addr + 0x10, 0x00000000);
+	nv_wo32(base->eng, addr + 0x14, 0x00000000);
+	bar->flush(bar);
+	return 0;
+}
+
+static int
+nv84_fifo_context_detach(struct nouveau_object *parent, bool suspend,
+			 struct nouveau_object *object)
+{
+	struct nouveau_bar *bar = nouveau_bar(parent);
+	struct nv50_fifo_priv *priv = (void *)parent->engine;
+	struct nv50_fifo_base *base = (void *)parent->parent;
+	struct nv50_fifo_chan *chan = (void *)parent;
+	u32 addr, save, engn;
+	bool done;
+
+	switch (nv_engidx(object->engine)) {
+	case NVDEV_ENGINE_SW   : return 0;
+	case NVDEV_ENGINE_GR   : engn = 0; addr = 0x0020; break;
+	case NVDEV_ENGINE_MPEG : engn = 1; addr = 0x0060; break;
+	case NVDEV_ENGINE_CRYPT: engn = 4; addr = 0x00a0; break;
+	case NVDEV_ENGINE_COPY0: engn = 2; addr = 0x00c0; break;
+	default:
+		return -EINVAL;
+	}
+
+	nv_wo32(base->eng, addr + 0x00, 0x00000000);
+	nv_wo32(base->eng, addr + 0x04, 0x00000000);
+	nv_wo32(base->eng, addr + 0x08, 0x00000000);
+	nv_wo32(base->eng, addr + 0x0c, 0x00000000);
+	nv_wo32(base->eng, addr + 0x10, 0x00000000);
+	nv_wo32(base->eng, addr + 0x14, 0x00000000);
+	bar->flush(bar);
+
+	save = nv_mask(priv, 0x002520, 0x0000003f, 1 << engn);
+	nv_wr32(priv, 0x0032fc, nv_gpuobj(base)->addr >> 12);
+	done = nv_wait_ne(priv, 0x0032fc, 0xffffffff, 0xffffffff);
+	nv_wr32(priv, 0x002520, save);
+	if (!done) {
+		nv_error(priv, "channel %d unload timeout\n", chan->base.chid);
+		if (suspend)
+			return -EBUSY;
+	}
+	return 0;
+}
+
+static int
+nv84_fifo_object_attach(struct nouveau_object *parent,
+			struct nouveau_object *object, u32 handle)
+{
+	struct nv50_fifo_chan *chan = (void *)parent;
+	u32 context;
+
+	if (nv_iclass(object, NV_GPUOBJ_CLASS))
+		context = nv_gpuobj(object)->node->offset >> 4;
+	else
+		context = 0x00000004; /* just non-zero */
+
+	switch (nv_engidx(object->engine)) {
+	case NVDEV_ENGINE_DMAOBJ:
+	case NVDEV_ENGINE_SW    : context |= 0x00000000; break;
+	case NVDEV_ENGINE_GR    : context |= 0x00100000; break;
+	case NVDEV_ENGINE_MPEG  :
+	case NVDEV_ENGINE_PPP   : context |= 0x00200000; break;
+	case NVDEV_ENGINE_ME    :
+	case NVDEV_ENGINE_COPY0 : context |= 0x00300000; break;
+	case NVDEV_ENGINE_VP    : context |= 0x00400000; break;
+	case NVDEV_ENGINE_CRYPT :
+	case NVDEV_ENGINE_UNK1C1: context |= 0x00500000; break;
+	case NVDEV_ENGINE_BSP   : context |= 0x00600000; break;
+	default:
+		return -EINVAL;
+	}
+
+	return nouveau_ramht_insert(chan->ramht, 0, handle, context);
+}
+
+static int
+nv84_fifo_chan_ctor_dma(struct nouveau_object *parent,
+			struct nouveau_object *engine,
+			struct nouveau_oclass *oclass, void *data, u32 size,
+			struct nouveau_object **pobject)
+{
+	struct nouveau_bar *bar = nouveau_bar(parent);
+	struct nv50_fifo_base *base = (void *)parent;
+	struct nv50_fifo_chan *chan;
+	struct nv03_channel_dma_class *args = data;
+	int ret;
+
+	if (size < sizeof(*args))
+		return -EINVAL;
+
+	ret = nouveau_fifo_channel_create(parent, engine, oclass, 0, 0xc00000,
+					  0x2000, args->pushbuf,
+					  (1 << NVDEV_ENGINE_DMAOBJ) |
+					  (1 << NVDEV_ENGINE_SW) |
+					  (1 << NVDEV_ENGINE_GR) |
+					  (1 << NVDEV_ENGINE_MPEG) |
+					  (1 << NVDEV_ENGINE_ME) |
+					  (1 << NVDEV_ENGINE_VP) |
+					  (1 << NVDEV_ENGINE_CRYPT) |
+					  (1 << NVDEV_ENGINE_BSP) |
+					  (1 << NVDEV_ENGINE_PPP) |
+					  (1 << NVDEV_ENGINE_COPY0) |
+					  (1 << NVDEV_ENGINE_UNK1C1), &chan);
+	*pobject = nv_object(chan);
+	if (ret)
+		return ret;
+
+	ret = nouveau_ramht_new(parent, parent, 0x8000, 16, &chan->ramht);
+	if (ret)
+		return ret;
+
+	nv_parent(chan)->context_attach = nv84_fifo_context_attach;
+	nv_parent(chan)->context_detach = nv84_fifo_context_detach;
+	nv_parent(chan)->object_attach = nv84_fifo_object_attach;
+	nv_parent(chan)->object_detach = nv50_fifo_object_detach;
+
+	nv_wo32(base->ramfc, 0x08, lower_32_bits(args->offset));
+	nv_wo32(base->ramfc, 0x0c, upper_32_bits(args->offset));
+	nv_wo32(base->ramfc, 0x10, lower_32_bits(args->offset));
+	nv_wo32(base->ramfc, 0x14, upper_32_bits(args->offset));
+	nv_wo32(base->ramfc, 0x3c, 0x003f6078);
+	nv_wo32(base->ramfc, 0x44, 0x01003fff);
+	nv_wo32(base->ramfc, 0x48, chan->base.pushgpu->node->offset >> 4);
+	nv_wo32(base->ramfc, 0x4c, 0xffffffff);
+	nv_wo32(base->ramfc, 0x60, 0x7fffffff);
+	nv_wo32(base->ramfc, 0x78, 0x00000000);
+	nv_wo32(base->ramfc, 0x7c, 0x30000001);
+	nv_wo32(base->ramfc, 0x80, ((chan->ramht->bits - 9) << 27) |
+				   (4 << 24) /* SEARCH_FULL */ |
+				   (chan->ramht->base.node->offset >> 4));
+	nv_wo32(base->ramfc, 0x88, base->cache->addr >> 10);
+	nv_wo32(base->ramfc, 0x98, nv_gpuobj(base)->addr >> 12);
+	bar->flush(bar);
+	return 0;
+}
+
+static int
+nv84_fifo_chan_ctor_ind(struct nouveau_object *parent,
+			struct nouveau_object *engine,
+			struct nouveau_oclass *oclass, void *data, u32 size,
+			struct nouveau_object **pobject)
+{
+	struct nouveau_bar *bar = nouveau_bar(parent);
+	struct nv50_fifo_base *base = (void *)parent;
+	struct nv50_fifo_chan *chan;
+	struct nv50_channel_ind_class *args = data;
+	u64 ioffset, ilength;
+	int ret;
+
+	if (size < sizeof(*args))
+		return -EINVAL;
+
+	ret = nouveau_fifo_channel_create(parent, engine, oclass, 0, 0xc00000,
+					  0x2000, args->pushbuf,
+					  (1 << NVDEV_ENGINE_DMAOBJ) |
+					  (1 << NVDEV_ENGINE_SW) |
+					  (1 << NVDEV_ENGINE_GR) |
+					  (1 << NVDEV_ENGINE_MPEG) |
+					  (1 << NVDEV_ENGINE_ME) |
+					  (1 << NVDEV_ENGINE_VP) |
+					  (1 << NVDEV_ENGINE_CRYPT) |
+					  (1 << NVDEV_ENGINE_BSP) |
+					  (1 << NVDEV_ENGINE_PPP) |
+					  (1 << NVDEV_ENGINE_COPY0) |
+					  (1 << NVDEV_ENGINE_UNK1C1), &chan);
+	*pobject = nv_object(chan);
+	if (ret)
+		return ret;
+
+	ret = nouveau_ramht_new(parent, parent, 0x8000, 16, &chan->ramht);
+	if (ret)
+		return ret;
+
+	nv_parent(chan)->context_attach = nv84_fifo_context_attach;
+	nv_parent(chan)->context_detach = nv84_fifo_context_detach;
+	nv_parent(chan)->object_attach = nv84_fifo_object_attach;
+	nv_parent(chan)->object_detach = nv50_fifo_object_detach;
+
+	ioffset = args->ioffset;
+	ilength = log2i(args->ilength / 8);
+
+	nv_wo32(base->ramfc, 0x3c, 0x403f6078);
+	nv_wo32(base->ramfc, 0x44, 0x01003fff);
+	nv_wo32(base->ramfc, 0x48, chan->base.pushgpu->node->offset >> 4);
+	nv_wo32(base->ramfc, 0x50, lower_32_bits(ioffset));
+	nv_wo32(base->ramfc, 0x54, upper_32_bits(ioffset) | (ilength << 16));
+	nv_wo32(base->ramfc, 0x60, 0x7fffffff);
+	nv_wo32(base->ramfc, 0x78, 0x00000000);
+	nv_wo32(base->ramfc, 0x7c, 0x30000001);
+	nv_wo32(base->ramfc, 0x80, ((chan->ramht->bits - 9) << 27) |
+				   (4 << 24) /* SEARCH_FULL */ |
+				   (chan->ramht->base.node->offset >> 4));
+	nv_wo32(base->ramfc, 0x88, base->cache->addr >> 10);
+	nv_wo32(base->ramfc, 0x98, nv_gpuobj(base)->addr >> 12);
+	bar->flush(bar);
+	return 0;
+}
+
+static int
+nv84_fifo_chan_init(struct nouveau_object *object)
+{
+	struct nv50_fifo_priv *priv = (void *)object->engine;
+	struct nv50_fifo_base *base = (void *)object->parent;
+	struct nv50_fifo_chan *chan = (void *)object;
+	struct nouveau_gpuobj *ramfc = base->ramfc;
+	u32 chid = chan->base.chid;
+	int ret;
+
+	ret = nouveau_fifo_channel_init(&chan->base);
+	if (ret)
+		return ret;
+
+	nv_wr32(priv, 0x002600 + (chid * 4), 0x80000000 | ramfc->addr >> 8);
+	nv50_fifo_playlist_update(priv);
+	return 0;
+}
+
+static struct nouveau_ofuncs
+nv84_fifo_ofuncs_dma = {
+	.ctor = nv84_fifo_chan_ctor_dma,
+	.dtor = nv50_fifo_chan_dtor,
+	.init = nv84_fifo_chan_init,
+	.fini = nv50_fifo_chan_fini,
+	.rd32 = _nouveau_fifo_channel_rd32,
+	.wr32 = _nouveau_fifo_channel_wr32,
+};
+
+static struct nouveau_ofuncs
+nv84_fifo_ofuncs_ind = {
+	.ctor = nv84_fifo_chan_ctor_ind,
+	.dtor = nv50_fifo_chan_dtor,
+	.init = nv84_fifo_chan_init,
+	.fini = nv50_fifo_chan_fini,
+	.rd32 = _nouveau_fifo_channel_rd32,
+	.wr32 = _nouveau_fifo_channel_wr32,
+};
+
+static struct nouveau_oclass
+nv84_fifo_sclass[] = {
+	{ NV84_CHANNEL_DMA_CLASS, &nv84_fifo_ofuncs_dma },
+	{ NV84_CHANNEL_IND_CLASS, &nv84_fifo_ofuncs_ind },
+	{}
+};
+
+/*******************************************************************************
+ * FIFO context - basically just the instmem reserved for the channel
+ ******************************************************************************/
+
+static int
+nv84_fifo_context_ctor(struct nouveau_object *parent,
+		       struct nouveau_object *engine,
+		       struct nouveau_oclass *oclass, void *data, u32 size,
+		       struct nouveau_object **pobject)
+{
+	struct nv50_fifo_base *base;
+	int ret;
+
+	ret = nouveau_fifo_context_create(parent, engine, oclass, NULL, 0x10000,
+				          0x1000, NVOBJ_FLAG_HEAP, &base);
+	*pobject = nv_object(base);
+	if (ret)
+		return ret;
+
+	ret = nouveau_gpuobj_new(parent, nv_object(base), 0x0200, 0,
+				 NVOBJ_FLAG_ZERO_ALLOC, &base->eng);
+	if (ret)
+		return ret;
+
+	ret = nouveau_gpuobj_new(parent, nv_object(base), 0x4000, 0,
+				 0, &base->pgd);
+	if (ret)
+		return ret;
+
+	ret = nouveau_vm_ref(nouveau_client(parent)->vm, &base->vm, base->pgd);
+	if (ret)
+		return ret;
+
+	ret = nouveau_gpuobj_new(parent, nv_object(base), 0x1000, 0x400,
+				 NVOBJ_FLAG_ZERO_ALLOC, &base->cache);
+	if (ret)
+		return ret;
+
+	ret = nouveau_gpuobj_new(parent, nv_object(base), 0x0100, 0x100,
+				 NVOBJ_FLAG_ZERO_ALLOC, &base->ramfc);
+	if (ret)
+		return ret;
+
+	return 0;
+}
+
+static struct nouveau_oclass
+nv84_fifo_cclass = {
+	.handle = NV_ENGCTX(FIFO, 0x84),
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nv84_fifo_context_ctor,
+		.dtor = nv50_fifo_context_dtor,
+		.init = _nouveau_fifo_context_init,
+		.fini = _nouveau_fifo_context_fini,
+		.rd32 = _nouveau_fifo_context_rd32,
+		.wr32 = _nouveau_fifo_context_wr32,
+	},
+};
+
+/*******************************************************************************
+ * PFIFO engine
+ ******************************************************************************/
+
+static int
+nv84_fifo_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+	       struct nouveau_oclass *oclass, void *data, u32 size,
+	       struct nouveau_object **pobject)
+{
+	struct nv50_fifo_priv *priv;
+	int ret;
+
+	ret = nouveau_fifo_create(parent, engine, oclass, 1, 127, &priv);
+	*pobject = nv_object(priv);
+	if (ret)
+		return ret;
+
+	ret = nouveau_gpuobj_new(parent, NULL, 128 * 4, 0x1000, 0,
+				&priv->playlist[0]);
+	if (ret)
+		return ret;
+
+	ret = nouveau_gpuobj_new(parent, NULL, 128 * 4, 0x1000, 0,
+				&priv->playlist[1]);
+	if (ret)
+		return ret;
+
+	nv_subdev(priv)->unit = 0x00000100;
+	nv_subdev(priv)->intr = nv04_fifo_intr;
+	nv_engine(priv)->cclass = &nv84_fifo_cclass;
+	nv_engine(priv)->sclass = nv84_fifo_sclass;
+	return 0;
+}
+
+struct nouveau_oclass
+nv84_fifo_oclass = {
+	.handle = NV_ENGINE(FIFO, 0x84),
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nv84_fifo_ctor,
+		.dtor = nv50_fifo_dtor,
+		.init = nv50_fifo_init,
+		.fini = _nouveau_fifo_fini,
+	},
+};
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nvc0.c b/drivers/gpu/drm/nouveau/core/engine/fifo/nvc0.c
new file mode 100644
index 000000000000..6f21be600557
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nvc0.c
@@ -0,0 +1,647 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <core/client.h>
+#include <core/handle.h>
+#include <core/namedb.h>
+#include <core/gpuobj.h>
+#include <core/engctx.h>
+#include <core/class.h>
+#include <core/math.h>
+#include <core/enum.h>
+
+#include <subdev/timer.h>
+#include <subdev/bar.h>
+#include <subdev/vm.h>
+
+#include <engine/dmaobj.h>
+#include <engine/fifo.h>
+
+struct nvc0_fifo_priv {
+	struct nouveau_fifo base;
+	struct nouveau_gpuobj *playlist[2];
+	int cur_playlist;
+	struct {
+		struct nouveau_gpuobj *mem;
+		struct nouveau_vma bar;
+	} user;
+	int spoon_nr;
+};
+
+struct nvc0_fifo_base {
+	struct nouveau_fifo_base base;
+	struct nouveau_gpuobj *pgd;
+	struct nouveau_vm *vm;
+};
+
+struct nvc0_fifo_chan {
+	struct nouveau_fifo_chan base;
+};
+
+/*******************************************************************************
+ * FIFO channel objects
+ ******************************************************************************/
+
+static void
+nvc0_fifo_playlist_update(struct nvc0_fifo_priv *priv)
+{
+	struct nouveau_bar *bar = nouveau_bar(priv);
+	struct nouveau_gpuobj *cur;
+	int i, p;
+
+	cur = priv->playlist[priv->cur_playlist];
+	priv->cur_playlist = !priv->cur_playlist;
+
+	for (i = 0, p = 0; i < 128; i++) {
+		if (!(nv_rd32(priv, 0x003004 + (i * 8)) & 1))
+			continue;
+		nv_wo32(cur, p + 0, i);
+		nv_wo32(cur, p + 4, 0x00000004);
+		p += 8;
+	}
+	bar->flush(bar);
+
+	nv_wr32(priv, 0x002270, cur->addr >> 12);
+	nv_wr32(priv, 0x002274, 0x01f00000 | (p >> 3));
+	if (!nv_wait(priv, 0x00227c, 0x00100000, 0x00000000))
+		nv_error(priv, "playlist update failed\n");
+}
+
+static int
+nvc0_fifo_context_attach(struct nouveau_object *parent,
+			 struct nouveau_object *object)
+{
+	struct nouveau_bar *bar = nouveau_bar(parent);
+	struct nvc0_fifo_base *base = (void *)parent->parent;
+	struct nouveau_engctx *ectx = (void *)object;
+	u32 addr;
+	int ret;
+
+	switch (nv_engidx(object->engine)) {
+	case NVDEV_ENGINE_SW   : return 0;
+	case NVDEV_ENGINE_GR   : addr = 0x0210; break;
+	case NVDEV_ENGINE_COPY0: addr = 0x0230; break;
+	case NVDEV_ENGINE_COPY1: addr = 0x0240; break;
+	default:
+		return -EINVAL;
+	}
+
+	if (!ectx->vma.node) {
+		ret = nouveau_gpuobj_map_vm(nv_gpuobj(ectx), base->vm,
+					    NV_MEM_ACCESS_RW, &ectx->vma);
+		if (ret)
+			return ret;
+
+		nv_engctx(ectx)->addr = nv_gpuobj(base)->addr >> 12;
+	}
+
+	nv_wo32(base, addr + 0x00, lower_32_bits(ectx->vma.offset) | 4);
+	nv_wo32(base, addr + 0x04, upper_32_bits(ectx->vma.offset));
+	bar->flush(bar);
+	return 0;
+}
+
+static int
+nvc0_fifo_context_detach(struct nouveau_object *parent, bool suspend,
+			 struct nouveau_object *object)
+{
+	struct nouveau_bar *bar = nouveau_bar(parent);
+	struct nvc0_fifo_priv *priv = (void *)parent->engine;
+	struct nvc0_fifo_base *base = (void *)parent->parent;
+	struct nvc0_fifo_chan *chan = (void *)parent;
+	u32 addr;
+
+	switch (nv_engidx(object->engine)) {
+	case NVDEV_ENGINE_SW   : return 0;
+	case NVDEV_ENGINE_GR   : addr = 0x0210; break;
+	case NVDEV_ENGINE_COPY0: addr = 0x0230; break;
+	case NVDEV_ENGINE_COPY1: addr = 0x0240; break;
+	default:
+		return -EINVAL;
+	}
+
+	nv_wo32(base, addr + 0x00, 0x00000000);
+	nv_wo32(base, addr + 0x04, 0x00000000);
+	bar->flush(bar);
+
+	nv_wr32(priv, 0x002634, chan->base.chid);
+	if (!nv_wait(priv, 0x002634, 0xffffffff, chan->base.chid)) {
+		nv_error(priv, "channel %d kick timeout\n", chan->base.chid);
+		if (suspend)
+			return -EBUSY;
+	}
+
+	return 0;
+}
+
+static int
+nvc0_fifo_chan_ctor(struct nouveau_object *parent,
+		    struct nouveau_object *engine,
+		    struct nouveau_oclass *oclass, void *data, u32 size,
+		    struct nouveau_object **pobject)
+{
+	struct nouveau_bar *bar = nouveau_bar(parent);
+	struct nvc0_fifo_priv *priv = (void *)engine;
+	struct nvc0_fifo_base *base = (void *)parent;
+	struct nvc0_fifo_chan *chan;
+	struct nv50_channel_ind_class *args = data;
+	u64 usermem, ioffset, ilength;
+	int ret, i;
+
+	if (size < sizeof(*args))
+		return -EINVAL;
+
+	ret = nouveau_fifo_channel_create(parent, engine, oclass, 1,
+					  priv->user.bar.offset, 0x1000,
+					  args->pushbuf,
+					  (1 << NVDEV_ENGINE_SW) |
+					  (1 << NVDEV_ENGINE_GR) |
+					  (1 << NVDEV_ENGINE_COPY0) |
+					  (1 << NVDEV_ENGINE_COPY1), &chan);
+	*pobject = nv_object(chan);
+	if (ret)
+		return ret;
+
+	nv_parent(chan)->context_attach = nvc0_fifo_context_attach;
+	nv_parent(chan)->context_detach = nvc0_fifo_context_detach;
+
+	usermem = chan->base.chid * 0x1000;
+	ioffset = args->ioffset;
+	ilength = log2i(args->ilength / 8);
+
+	for (i = 0; i < 0x1000; i += 4)
+		nv_wo32(priv->user.mem, usermem + i, 0x00000000);
+
+	nv_wo32(base, 0x08, lower_32_bits(priv->user.mem->addr + usermem));
+	nv_wo32(base, 0x0c, upper_32_bits(priv->user.mem->addr + usermem));
+	nv_wo32(base, 0x10, 0x0000face);
+	nv_wo32(base, 0x30, 0xfffff902);
+	nv_wo32(base, 0x48, lower_32_bits(ioffset));
+	nv_wo32(base, 0x4c, upper_32_bits(ioffset) | (ilength << 16));
+	nv_wo32(base, 0x54, 0x00000002);
+	nv_wo32(base, 0x84, 0x20400000);
+	nv_wo32(base, 0x94, 0x30000001);
+	nv_wo32(base, 0x9c, 0x00000100);
+	nv_wo32(base, 0xa4, 0x1f1f1f1f);
+	nv_wo32(base, 0xa8, 0x1f1f1f1f);
+	nv_wo32(base, 0xac, 0x0000001f);
+	nv_wo32(base, 0xb8, 0xf8000000);
+	nv_wo32(base, 0xf8, 0x10003080); /* 0x002310 */
+	nv_wo32(base, 0xfc, 0x10000010); /* 0x002350 */
+	bar->flush(bar);
+	return 0;
+}
+
+static int
+nvc0_fifo_chan_init(struct nouveau_object *object)
+{
+	struct nouveau_gpuobj *base = nv_gpuobj(object->parent);
+	struct nvc0_fifo_priv *priv = (void *)object->engine;
+	struct nvc0_fifo_chan *chan = (void *)object;
+	u32 chid = chan->base.chid;
+	int ret;
+
+	ret = nouveau_fifo_channel_init(&chan->base);
+	if (ret)
+		return ret;
+
+	nv_wr32(priv, 0x003000 + (chid * 8), 0xc0000000 | base->addr >> 12);
+	nv_wr32(priv, 0x003004 + (chid * 8), 0x001f0001);
+	nvc0_fifo_playlist_update(priv);
+	return 0;
+}
+
+static int
+nvc0_fifo_chan_fini(struct nouveau_object *object, bool suspend)
+{
+	struct nvc0_fifo_priv *priv = (void *)object->engine;
+	struct nvc0_fifo_chan *chan = (void *)object;
+	u32 chid = chan->base.chid;
+
+	nv_mask(priv, 0x003004 + (chid * 8), 0x00000001, 0x00000000);
+	nvc0_fifo_playlist_update(priv);
+	nv_wr32(priv, 0x003000 + (chid * 8), 0x00000000);
+
+	return nouveau_fifo_channel_fini(&chan->base, suspend);
+}
+
+static struct nouveau_ofuncs
+nvc0_fifo_ofuncs = {
+	.ctor = nvc0_fifo_chan_ctor,
+	.dtor = _nouveau_fifo_channel_dtor,
+	.init = nvc0_fifo_chan_init,
+	.fini = nvc0_fifo_chan_fini,
+	.rd32 = _nouveau_fifo_channel_rd32,
+	.wr32 = _nouveau_fifo_channel_wr32,
+};
+
+static struct nouveau_oclass
+nvc0_fifo_sclass[] = {
+	{ NVC0_CHANNEL_IND_CLASS, &nvc0_fifo_ofuncs },
+	{}
+};
+
+/*******************************************************************************
+ * FIFO context - instmem heap and vm setup
+ ******************************************************************************/
+
+static int
+nvc0_fifo_context_ctor(struct nouveau_object *parent,
+		       struct nouveau_object *engine,
+		       struct nouveau_oclass *oclass, void *data, u32 size,
+		       struct nouveau_object **pobject)
+{
+	struct nvc0_fifo_base *base;
+	int ret;
+
+	ret = nouveau_fifo_context_create(parent, engine, oclass, NULL, 0x1000,
+				          0x1000, NVOBJ_FLAG_ZERO_ALLOC |
+					  NVOBJ_FLAG_HEAP, &base);
+	*pobject = nv_object(base);
+	if (ret)
+		return ret;
+
+	ret = nouveau_gpuobj_new(parent, NULL, 0x10000, 0x1000, 0, &base->pgd);
+	if (ret)
+		return ret;
+
+	nv_wo32(base, 0x0200, lower_32_bits(base->pgd->addr));
+	nv_wo32(base, 0x0204, upper_32_bits(base->pgd->addr));
+	nv_wo32(base, 0x0208, 0xffffffff);
+	nv_wo32(base, 0x020c, 0x000000ff);
+
+	ret = nouveau_vm_ref(nouveau_client(parent)->vm, &base->vm, base->pgd);
+	if (ret)
+		return ret;
+
+	return 0;
+}
+
+static void
+nvc0_fifo_context_dtor(struct nouveau_object *object)
+{
+	struct nvc0_fifo_base *base = (void *)object;
+	nouveau_vm_ref(NULL, &base->vm, base->pgd);
+	nouveau_gpuobj_ref(NULL, &base->pgd);
+	nouveau_fifo_context_destroy(&base->base);
+}
+
+static struct nouveau_oclass
+nvc0_fifo_cclass = {
+	.handle = NV_ENGCTX(FIFO, 0xc0),
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nvc0_fifo_context_ctor,
+		.dtor = nvc0_fifo_context_dtor,
+		.init = _nouveau_fifo_context_init,
+		.fini = _nouveau_fifo_context_fini,
+		.rd32 = _nouveau_fifo_context_rd32,
+		.wr32 = _nouveau_fifo_context_wr32,
+	},
+};
+
+/*******************************************************************************
+ * PFIFO engine
+ ******************************************************************************/
+
+static const struct nouveau_enum nvc0_fifo_fault_unit[] = {
+	{ 0x00, "PGRAPH" },
+	{ 0x03, "PEEPHOLE" },
+	{ 0x04, "BAR1" },
+	{ 0x05, "BAR3" },
+	{ 0x07, "PFIFO" },
+	{ 0x10, "PBSP" },
+	{ 0x11, "PPPP" },
+	{ 0x13, "PCOUNTER" },
+	{ 0x14, "PVP" },
+	{ 0x15, "PCOPY0" },
+	{ 0x16, "PCOPY1" },
+	{ 0x17, "PDAEMON" },
+	{}
+};
+
+static const struct nouveau_enum nvc0_fifo_fault_reason[] = {
+	{ 0x00, "PT_NOT_PRESENT" },
+	{ 0x01, "PT_TOO_SHORT" },
+	{ 0x02, "PAGE_NOT_PRESENT" },
+	{ 0x03, "VM_LIMIT_EXCEEDED" },
+	{ 0x04, "NO_CHANNEL" },
+	{ 0x05, "PAGE_SYSTEM_ONLY" },
+	{ 0x06, "PAGE_READ_ONLY" },
+	{ 0x0a, "COMPRESSED_SYSRAM" },
+	{ 0x0c, "INVALID_STORAGE_TYPE" },
+	{}
+};
+
+static const struct nouveau_enum nvc0_fifo_fault_hubclient[] = {
+	{ 0x01, "PCOPY0" },
+	{ 0x02, "PCOPY1" },
+	{ 0x04, "DISPATCH" },
+	{ 0x05, "CTXCTL" },
+	{ 0x06, "PFIFO" },
+	{ 0x07, "BAR_READ" },
+	{ 0x08, "BAR_WRITE" },
+	{ 0x0b, "PVP" },
+	{ 0x0c, "PPPP" },
+	{ 0x0d, "PBSP" },
+	{ 0x11, "PCOUNTER" },
+	{ 0x12, "PDAEMON" },
+	{ 0x14, "CCACHE" },
+	{ 0x15, "CCACHE_POST" },
+	{}
+};
+
+static const struct nouveau_enum nvc0_fifo_fault_gpcclient[] = {
+	{ 0x01, "TEX" },
+	{ 0x0c, "ESETUP" },
+	{ 0x0e, "CTXCTL" },
+	{ 0x0f, "PROP" },
+	{}
+};
+
+static const struct nouveau_bitfield nvc0_fifo_subfifo_intr[] = {
+/*	{ 0x00008000, "" }	seen with null ib push */
+	{ 0x00200000, "ILLEGAL_MTHD" },
+	{ 0x00800000, "EMPTY_SUBC" },
+	{}
+};
+
+static void
+nvc0_fifo_isr_vm_fault(struct nvc0_fifo_priv *priv, int unit)
+{
+	u32 inst = nv_rd32(priv, 0x002800 + (unit * 0x10));
+	u32 valo = nv_rd32(priv, 0x002804 + (unit * 0x10));
+	u32 vahi = nv_rd32(priv, 0x002808 + (unit * 0x10));
+	u32 stat = nv_rd32(priv, 0x00280c + (unit * 0x10));
+	u32 client = (stat & 0x00001f00) >> 8;
+
+	switch (unit) {
+	case 3: /* PEEPHOLE */
+		nv_mask(priv, 0x001718, 0x00000000, 0x00000000);
+		break;
+	case 4: /* BAR1 */
+		nv_mask(priv, 0x001704, 0x00000000, 0x00000000);
+		break;
+	case 5: /* BAR3 */
+		nv_mask(priv, 0x001714, 0x00000000, 0x00000000);
+		break;
+	default:
+		break;
+	}
+
+	nv_error(priv, "%s fault at 0x%010llx [", (stat & 0x00000080) ?
+		 "write" : "read", (u64)vahi << 32 | valo);
+	nouveau_enum_print(nvc0_fifo_fault_reason, stat & 0x0000000f);
+	printk("] from ");
+	nouveau_enum_print(nvc0_fifo_fault_unit, unit);
+	if (stat & 0x00000040) {
+		printk("/");
+		nouveau_enum_print(nvc0_fifo_fault_hubclient, client);
+	} else {
+		printk("/GPC%d/", (stat & 0x1f000000) >> 24);
+		nouveau_enum_print(nvc0_fifo_fault_gpcclient, client);
+	}
+	printk(" on channel 0x%010llx\n", (u64)inst << 12);
+}
+
+static int
+nvc0_fifo_swmthd(struct nvc0_fifo_priv *priv, u32 chid, u32 mthd, u32 data)
+{
+	struct nvc0_fifo_chan *chan = NULL;
+	struct nouveau_handle *bind;
+	unsigned long flags;
+	int ret = -EINVAL;
+
+	spin_lock_irqsave(&priv->base.lock, flags);
+	if (likely(chid >= priv->base.min && chid <= priv->base.max))
+		chan = (void *)priv->base.channel[chid];
+	if (unlikely(!chan))
+		goto out;
+
+	bind = nouveau_namedb_get_class(nv_namedb(chan), 0x906e);
+	if (likely(bind)) {
+		if (!mthd || !nv_call(bind->object, mthd, data))
+			ret = 0;
+		nouveau_namedb_put(bind);
+	}
+
+out:
+	spin_unlock_irqrestore(&priv->base.lock, flags);
+	return ret;
+}
+
+static void
+nvc0_fifo_isr_subfifo_intr(struct nvc0_fifo_priv *priv, int unit)
+{
+	u32 stat = nv_rd32(priv, 0x040108 + (unit * 0x2000));
+	u32 addr = nv_rd32(priv, 0x0400c0 + (unit * 0x2000));
+	u32 data = nv_rd32(priv, 0x0400c4 + (unit * 0x2000));
+	u32 chid = nv_rd32(priv, 0x040120 + (unit * 0x2000)) & 0x7f;
+	u32 subc = (addr & 0x00070000) >> 16;
+	u32 mthd = (addr & 0x00003ffc);
+	u32 show = stat;
+
+	if (stat & 0x00200000) {
+		if (mthd == 0x0054) {
+			if (!nvc0_fifo_swmthd(priv, chid, 0x0500, 0x00000000))
+				show &= ~0x00200000;
+		}
+	}
+
+	if (stat & 0x00800000) {
+		if (!nvc0_fifo_swmthd(priv, chid, mthd, data))
+			show &= ~0x00800000;
+	}
+
+	if (show) {
+		nv_error(priv, "SUBFIFO%d:", unit);
+		nouveau_bitfield_print(nvc0_fifo_subfifo_intr, show);
+		printk("\n");
+		nv_error(priv, "SUBFIFO%d: ch %d subc %d mthd 0x%04x "
+			       "data 0x%08x\n",
+			 unit, chid, subc, mthd, data);
+	}
+
+	nv_wr32(priv, 0x0400c0 + (unit * 0x2000), 0x80600008);
+	nv_wr32(priv, 0x040108 + (unit * 0x2000), stat);
+}
+
+static void
+nvc0_fifo_intr(struct nouveau_subdev *subdev)
+{
+	struct nvc0_fifo_priv *priv = (void *)subdev;
+	u32 mask = nv_rd32(priv, 0x002140);
+	u32 stat = nv_rd32(priv, 0x002100) & mask;
+
+	if (stat & 0x00000100) {
+		nv_info(priv, "unknown status 0x00000100\n");
+		nv_wr32(priv, 0x002100, 0x00000100);
+		stat &= ~0x00000100;
+	}
+
+	if (stat & 0x10000000) {
+		u32 units = nv_rd32(priv, 0x00259c);
+		u32 u = units;
+
+		while (u) {
+			int i = ffs(u) - 1;
+			nvc0_fifo_isr_vm_fault(priv, i);
+			u &= ~(1 << i);
+		}
+
+		nv_wr32(priv, 0x00259c, units);
+		stat &= ~0x10000000;
+	}
+
+	if (stat & 0x20000000) {
+		u32 units = nv_rd32(priv, 0x0025a0);
+		u32 u = units;
+
+		while (u) {
+			int i = ffs(u) - 1;
+			nvc0_fifo_isr_subfifo_intr(priv, i);
+			u &= ~(1 << i);
+		}
+
+		nv_wr32(priv, 0x0025a0, units);
+		stat &= ~0x20000000;
+	}
+
+	if (stat & 0x40000000) {
+		nv_warn(priv, "unknown status 0x40000000\n");
+		nv_mask(priv, 0x002a00, 0x00000000, 0x00000000);
+		stat &= ~0x40000000;
+	}
+
+	if (stat) {
+		nv_fatal(priv, "unhandled status 0x%08x\n", stat);
+		nv_wr32(priv, 0x002100, stat);
+		nv_wr32(priv, 0x002140, 0);
+	}
+}
+
+static int
+nvc0_fifo_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+	       struct nouveau_oclass *oclass, void *data, u32 size,
+	       struct nouveau_object **pobject)
+{
+	struct nvc0_fifo_priv *priv;
+	int ret;
+
+	ret = nouveau_fifo_create(parent, engine, oclass, 0, 127, &priv);
+	*pobject = nv_object(priv);
+	if (ret)
+		return ret;
+
+	ret = nouveau_gpuobj_new(parent, NULL, 0x1000, 0x1000, 0,
+				&priv->playlist[0]);
+	if (ret)
+		return ret;
+
+	ret = nouveau_gpuobj_new(parent, NULL, 0x1000, 0x1000, 0,
+				&priv->playlist[1]);
+	if (ret)
+		return ret;
+
+	ret = nouveau_gpuobj_new(parent, NULL, 128 * 0x1000, 0x1000, 0,
+				&priv->user.mem);
+	if (ret)
+		return ret;
+
+	ret = nouveau_gpuobj_map(priv->user.mem, NV_MEM_ACCESS_RW,
+				&priv->user.bar);
+	if (ret)
+		return ret;
+
+	nv_subdev(priv)->unit = 0x00000100;
+	nv_subdev(priv)->intr = nvc0_fifo_intr;
+	nv_engine(priv)->cclass = &nvc0_fifo_cclass;
+	nv_engine(priv)->sclass = nvc0_fifo_sclass;
+	return 0;
+}
+
+static void
+nvc0_fifo_dtor(struct nouveau_object *object)
+{
+	struct nvc0_fifo_priv *priv = (void *)object;
+
+	nouveau_gpuobj_unmap(&priv->user.bar);
+	nouveau_gpuobj_ref(NULL, &priv->user.mem);
+	nouveau_gpuobj_ref(NULL, &priv->playlist[1]);
+	nouveau_gpuobj_ref(NULL, &priv->playlist[0]);
+
+	nouveau_fifo_destroy(&priv->base);
+}
+
+static int
+nvc0_fifo_init(struct nouveau_object *object)
+{
+	struct nvc0_fifo_priv *priv = (void *)object;
+	int ret, i;
+
+	ret = nouveau_fifo_init(&priv->base);
+	if (ret)
+		return ret;
+
+	nv_wr32(priv, 0x000204, 0xffffffff);
+	nv_wr32(priv, 0x002204, 0xffffffff);
+
+	priv->spoon_nr = hweight32(nv_rd32(priv, 0x002204));
+	nv_debug(priv, "%d subfifo(s)\n", priv->spoon_nr);
+
+	/* assign engines to subfifos */
+	if (priv->spoon_nr >= 3) {
+		nv_wr32(priv, 0x002208, ~(1 << 0)); /* PGRAPH */
+		nv_wr32(priv, 0x00220c, ~(1 << 1)); /* PVP */
+		nv_wr32(priv, 0x002210, ~(1 << 1)); /* PPP */
+		nv_wr32(priv, 0x002214, ~(1 << 1)); /* PBSP */
+		nv_wr32(priv, 0x002218, ~(1 << 2)); /* PCE0 */
+		nv_wr32(priv, 0x00221c, ~(1 << 1)); /* PCE1 */
+	}
+
+	/* PSUBFIFO[n] */
+	for (i = 0; i < priv->spoon_nr; i++) {
+		nv_mask(priv, 0x04013c + (i * 0x2000), 0x10000100, 0x00000000);
+		nv_wr32(priv, 0x040108 + (i * 0x2000), 0xffffffff); /* INTR */
+		nv_wr32(priv, 0x04010c + (i * 0x2000), 0xfffffeff); /* INTREN */
+	}
+
+	nv_mask(priv, 0x002200, 0x00000001, 0x00000001);
+	nv_wr32(priv, 0x002254, 0x10000000 | priv->user.bar.offset >> 12);
+
+	nv_wr32(priv, 0x002a00, 0xffffffff); /* clears PFIFO.INTR bit 30 */
+	nv_wr32(priv, 0x002100, 0xffffffff);
+	nv_wr32(priv, 0x002140, 0xbfffffff);
+	return 0;
+}
+
+struct nouveau_oclass
+nvc0_fifo_oclass = {
+	.handle = NV_ENGINE(FIFO, 0xc0),
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nvc0_fifo_ctor,
+		.dtor = nvc0_fifo_dtor,
+		.init = nvc0_fifo_init,
+		.fini = _nouveau_fifo_fini,
+	},
+};
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c b/drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c
new file mode 100644
index 000000000000..36e81b6fafbc
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c
@@ -0,0 +1,628 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <core/client.h>
+#include <core/handle.h>
+#include <core/namedb.h>
+#include <core/gpuobj.h>
+#include <core/engctx.h>
+#include <core/class.h>
+#include <core/math.h>
+#include <core/enum.h>
+
+#include <subdev/timer.h>
+#include <subdev/bar.h>
+#include <subdev/vm.h>
+
+#include <engine/dmaobj.h>
+#include <engine/fifo.h>
+
+#define _(a,b) { (a), ((1 << (a)) | (b)) }
+static const struct {
+	int subdev;
+	u32 mask;
+} fifo_engine[] = {
+	_(NVDEV_ENGINE_GR      , (1 << NVDEV_ENGINE_SW)),
+	_(NVDEV_ENGINE_VP      , 0),
+	_(NVDEV_ENGINE_PPP     , 0),
+	_(NVDEV_ENGINE_BSP     , 0),
+	_(NVDEV_ENGINE_COPY0   , 0),
+	_(NVDEV_ENGINE_COPY1   , 0),
+	_(NVDEV_ENGINE_VENC    , 0),
+};
+#undef _
+#define FIFO_ENGINE_NR ARRAY_SIZE(fifo_engine)
+
+struct nve0_fifo_engn {
+	struct nouveau_gpuobj *playlist[2];
+	int cur_playlist;
+};
+
+struct nve0_fifo_priv {
+	struct nouveau_fifo base;
+	struct nve0_fifo_engn engine[FIFO_ENGINE_NR];
+	struct {
+		struct nouveau_gpuobj *mem;
+		struct nouveau_vma bar;
+	} user;
+	int spoon_nr;
+};
+
+struct nve0_fifo_base {
+	struct nouveau_fifo_base base;
+	struct nouveau_gpuobj *pgd;
+	struct nouveau_vm *vm;
+};
+
+struct nve0_fifo_chan {
+	struct nouveau_fifo_chan base;
+	u32 engine;
+};
+
+/*******************************************************************************
+ * FIFO channel objects
+ ******************************************************************************/
+
+static void
+nve0_fifo_playlist_update(struct nve0_fifo_priv *priv, u32 engine)
+{
+	struct nouveau_bar *bar = nouveau_bar(priv);
+	struct nve0_fifo_engn *engn = &priv->engine[engine];
+	struct nouveau_gpuobj *cur;
+	u32 match = (engine << 16) | 0x00000001;
+	int i, p;
+
+	cur = engn->playlist[engn->cur_playlist];
+	if (unlikely(cur == NULL)) {
+		int ret = nouveau_gpuobj_new(nv_object(priv)->parent, NULL,
+					     0x8000, 0x1000, 0, &cur);
+		if (ret) {
+			nv_error(priv, "playlist alloc failed\n");
+			return;
+		}
+
+		engn->playlist[engn->cur_playlist] = cur;
+	}
+
+	engn->cur_playlist = !engn->cur_playlist;
+
+	for (i = 0, p = 0; i < priv->base.max; i++) {
+		u32 ctrl = nv_rd32(priv, 0x800004 + (i * 8)) & 0x001f0001;
+		if (ctrl != match)
+			continue;
+		nv_wo32(cur, p + 0, i);
+		nv_wo32(cur, p + 4, 0x00000000);
+		p += 8;
+	}
+	bar->flush(bar);
+
+	nv_wr32(priv, 0x002270, cur->addr >> 12);
+	nv_wr32(priv, 0x002274, (engine << 20) | (p >> 3));
+	if (!nv_wait(priv, 0x002284 + (engine * 4), 0x00100000, 0x00000000))
+		nv_error(priv, "playlist %d update timeout\n", engine);
+}
+
+static int
+nve0_fifo_context_attach(struct nouveau_object *parent,
+			 struct nouveau_object *object)
+{
+	struct nouveau_bar *bar = nouveau_bar(parent);
+	struct nve0_fifo_base *base = (void *)parent->parent;
+	struct nouveau_engctx *ectx = (void *)object;
+	u32 addr;
+	int ret;
+
+	switch (nv_engidx(object->engine)) {
+	case NVDEV_ENGINE_SW   : return 0;
+	case NVDEV_ENGINE_GR   :
+	case NVDEV_ENGINE_COPY0:
+	case NVDEV_ENGINE_COPY1: addr = 0x0210; break;
+	default:
+		return -EINVAL;
+	}
+
+	if (!ectx->vma.node) {
+		ret = nouveau_gpuobj_map_vm(nv_gpuobj(ectx), base->vm,
+					    NV_MEM_ACCESS_RW, &ectx->vma);
+		if (ret)
+			return ret;
+
+		nv_engctx(ectx)->addr = nv_gpuobj(base)->addr >> 12;
+	}
+
+	nv_wo32(base, addr + 0x00, lower_32_bits(ectx->vma.offset) | 4);
+	nv_wo32(base, addr + 0x04, upper_32_bits(ectx->vma.offset));
+	bar->flush(bar);
+	return 0;
+}
+
+static int
+nve0_fifo_context_detach(struct nouveau_object *parent, bool suspend,
+			 struct nouveau_object *object)
+{
+	struct nouveau_bar *bar = nouveau_bar(parent);
+	struct nve0_fifo_priv *priv = (void *)parent->engine;
+	struct nve0_fifo_base *base = (void *)parent->parent;
+	struct nve0_fifo_chan *chan = (void *)parent;
+	u32 addr;
+
+	switch (nv_engidx(object->engine)) {
+	case NVDEV_ENGINE_SW   : return 0;
+	case NVDEV_ENGINE_GR   :
+	case NVDEV_ENGINE_COPY0:
+	case NVDEV_ENGINE_COPY1: addr = 0x0210; break;
+	default:
+		return -EINVAL;
+	}
+
+	nv_wo32(base, addr + 0x00, 0x00000000);
+	nv_wo32(base, addr + 0x04, 0x00000000);
+	bar->flush(bar);
+
+	nv_wr32(priv, 0x002634, chan->base.chid);
+	if (!nv_wait(priv, 0x002634, 0xffffffff, chan->base.chid)) {
+		nv_error(priv, "channel %d kick timeout\n", chan->base.chid);
+		if (suspend)
+			return -EBUSY;
+	}
+
+	return 0;
+}
+
+static int
+nve0_fifo_chan_ctor(struct nouveau_object *parent,
+		    struct nouveau_object *engine,
+		    struct nouveau_oclass *oclass, void *data, u32 size,
+		    struct nouveau_object **pobject)
+{
+	struct nouveau_bar *bar = nouveau_bar(parent);
+	struct nve0_fifo_priv *priv = (void *)engine;
+	struct nve0_fifo_base *base = (void *)parent;
+	struct nve0_fifo_chan *chan;
+	struct nve0_channel_ind_class *args = data;
+	u64 usermem, ioffset, ilength;
+	int ret, i;
+
+	if (size < sizeof(*args))
+		return -EINVAL;
+
+	for (i = 0; i < FIFO_ENGINE_NR; i++) {
+		if (args->engine & (1 << i)) {
+			if (nouveau_engine(parent, fifo_engine[i].subdev)) {
+				args->engine = (1 << i);
+				break;
+			}
+		}
+	}
+
+	if (i == FIFO_ENGINE_NR)
+		return -ENODEV;
+
+	ret = nouveau_fifo_channel_create(parent, engine, oclass, 1,
+					  priv->user.bar.offset, 0x200,
+					  args->pushbuf,
+					  fifo_engine[i].mask, &chan);
+	*pobject = nv_object(chan);
+	if (ret)
+		return ret;
+
+	nv_parent(chan)->context_attach = nve0_fifo_context_attach;
+	nv_parent(chan)->context_detach = nve0_fifo_context_detach;
+	chan->engine = i;
+
+	usermem = chan->base.chid * 0x200;
+	ioffset = args->ioffset;
+	ilength = log2i(args->ilength / 8);
+
+	for (i = 0; i < 0x200; i += 4)
+		nv_wo32(priv->user.mem, usermem + i, 0x00000000);
+
+	nv_wo32(base, 0x08, lower_32_bits(priv->user.mem->addr + usermem));
+	nv_wo32(base, 0x0c, upper_32_bits(priv->user.mem->addr + usermem));
+	nv_wo32(base, 0x10, 0x0000face);
+	nv_wo32(base, 0x30, 0xfffff902);
+	nv_wo32(base, 0x48, lower_32_bits(ioffset));
+	nv_wo32(base, 0x4c, upper_32_bits(ioffset) | (ilength << 16));
+	nv_wo32(base, 0x84, 0x20400000);
+	nv_wo32(base, 0x94, 0x30000001);
+	nv_wo32(base, 0x9c, 0x00000100);
+	nv_wo32(base, 0xac, 0x0000001f);
+	nv_wo32(base, 0xe8, chan->base.chid);
+	nv_wo32(base, 0xb8, 0xf8000000);
+	nv_wo32(base, 0xf8, 0x10003080); /* 0x002310 */
+	nv_wo32(base, 0xfc, 0x10000010); /* 0x002350 */
+	bar->flush(bar);
+	return 0;
+}
+
+static int
+nve0_fifo_chan_init(struct nouveau_object *object)
+{
+	struct nouveau_gpuobj *base = nv_gpuobj(object->parent);
+	struct nve0_fifo_priv *priv = (void *)object->engine;
+	struct nve0_fifo_chan *chan = (void *)object;
+	u32 chid = chan->base.chid;
+	int ret;
+
+	ret = nouveau_fifo_channel_init(&chan->base);
+	if (ret)
+		return ret;
+
+	nv_mask(priv, 0x800004 + (chid * 8), 0x000f0000, chan->engine << 16);
+	nv_wr32(priv, 0x800000 + (chid * 8), 0x80000000 | base->addr >> 12);
+	nv_mask(priv, 0x800004 + (chid * 8), 0x00000400, 0x00000400);
+	nve0_fifo_playlist_update(priv, chan->engine);
+	nv_mask(priv, 0x800004 + (chid * 8), 0x00000400, 0x00000400);
+	return 0;
+}
+
+static int
+nve0_fifo_chan_fini(struct nouveau_object *object, bool suspend)
+{
+	struct nve0_fifo_priv *priv = (void *)object->engine;
+	struct nve0_fifo_chan *chan = (void *)object;
+	u32 chid = chan->base.chid;
+
+	nv_mask(priv, 0x800004 + (chid * 8), 0x00000800, 0x00000800);
+	nve0_fifo_playlist_update(priv, chan->engine);
+	nv_wr32(priv, 0x800000 + (chid * 8), 0x00000000);
+
+	return nouveau_fifo_channel_fini(&chan->base, suspend);
+}
+
+static struct nouveau_ofuncs
+nve0_fifo_ofuncs = {
+	.ctor = nve0_fifo_chan_ctor,
+	.dtor = _nouveau_fifo_channel_dtor,
+	.init = nve0_fifo_chan_init,
+	.fini = nve0_fifo_chan_fini,
+	.rd32 = _nouveau_fifo_channel_rd32,
+	.wr32 = _nouveau_fifo_channel_wr32,
+};
+
+static struct nouveau_oclass
+nve0_fifo_sclass[] = {
+	{ NVE0_CHANNEL_IND_CLASS, &nve0_fifo_ofuncs },
+	{}
+};
+
+/*******************************************************************************
+ * FIFO context - instmem heap and vm setup
+ ******************************************************************************/
+
+static int
+nve0_fifo_context_ctor(struct nouveau_object *parent,
+		    struct nouveau_object *engine,
+		    struct nouveau_oclass *oclass, void *data, u32 size,
+		    struct nouveau_object **pobject)
+{
+	struct nve0_fifo_base *base;
+	int ret;
+
+	ret = nouveau_fifo_context_create(parent, engine, oclass, NULL, 0x1000,
+				          0x1000, NVOBJ_FLAG_ZERO_ALLOC, &base);
+	*pobject = nv_object(base);
+	if (ret)
+		return ret;
+
+	ret = nouveau_gpuobj_new(parent, NULL, 0x10000, 0x1000, 0, &base->pgd);
+	if (ret)
+		return ret;
+
+	nv_wo32(base, 0x0200, lower_32_bits(base->pgd->addr));
+	nv_wo32(base, 0x0204, upper_32_bits(base->pgd->addr));
+	nv_wo32(base, 0x0208, 0xffffffff);
+	nv_wo32(base, 0x020c, 0x000000ff);
+
+	ret = nouveau_vm_ref(nouveau_client(parent)->vm, &base->vm, base->pgd);
+	if (ret)
+		return ret;
+
+	return 0;
+}
+
+static void
+nve0_fifo_context_dtor(struct nouveau_object *object)
+{
+	struct nve0_fifo_base *base = (void *)object;
+	nouveau_vm_ref(NULL, &base->vm, base->pgd);
+	nouveau_gpuobj_ref(NULL, &base->pgd);
+	nouveau_fifo_context_destroy(&base->base);
+}
+
+static struct nouveau_oclass
+nve0_fifo_cclass = {
+	.handle = NV_ENGCTX(FIFO, 0xe0),
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nve0_fifo_context_ctor,
+		.dtor = nve0_fifo_context_dtor,
+		.init = _nouveau_fifo_context_init,
+		.fini = _nouveau_fifo_context_fini,
+		.rd32 = _nouveau_fifo_context_rd32,
+		.wr32 = _nouveau_fifo_context_wr32,
+	},
+};
+
+/*******************************************************************************
+ * PFIFO engine
+ ******************************************************************************/
+
+static const struct nouveau_enum nve0_fifo_fault_unit[] = {
+	{}
+};
+
+static const struct nouveau_enum nve0_fifo_fault_reason[] = {
+	{ 0x00, "PT_NOT_PRESENT" },
+	{ 0x01, "PT_TOO_SHORT" },
+	{ 0x02, "PAGE_NOT_PRESENT" },
+	{ 0x03, "VM_LIMIT_EXCEEDED" },
+	{ 0x04, "NO_CHANNEL" },
+	{ 0x05, "PAGE_SYSTEM_ONLY" },
+	{ 0x06, "PAGE_READ_ONLY" },
+	{ 0x0a, "COMPRESSED_SYSRAM" },
+	{ 0x0c, "INVALID_STORAGE_TYPE" },
+	{}
+};
+
+static const struct nouveau_enum nve0_fifo_fault_hubclient[] = {
+	{}
+};
+
+static const struct nouveau_enum nve0_fifo_fault_gpcclient[] = {
+	{}
+};
+
+static const struct nouveau_bitfield nve0_fifo_subfifo_intr[] = {
+	{ 0x00200000, "ILLEGAL_MTHD" },
+	{ 0x00800000, "EMPTY_SUBC" },
+	{}
+};
+
+static void
+nve0_fifo_isr_vm_fault(struct nve0_fifo_priv *priv, int unit)
+{
+	u32 inst = nv_rd32(priv, 0x2800 + (unit * 0x10));
+	u32 valo = nv_rd32(priv, 0x2804 + (unit * 0x10));
+	u32 vahi = nv_rd32(priv, 0x2808 + (unit * 0x10));
+	u32 stat = nv_rd32(priv, 0x280c + (unit * 0x10));
+	u32 client = (stat & 0x00001f00) >> 8;
+
+	nv_error(priv, "PFIFO: %s fault at 0x%010llx [", (stat & 0x00000080) ?
+		       "write" : "read", (u64)vahi << 32 | valo);
+	nouveau_enum_print(nve0_fifo_fault_reason, stat & 0x0000000f);
+	printk("] from ");
+	nouveau_enum_print(nve0_fifo_fault_unit, unit);
+	if (stat & 0x00000040) {
+		printk("/");
+		nouveau_enum_print(nve0_fifo_fault_hubclient, client);
+	} else {
+		printk("/GPC%d/", (stat & 0x1f000000) >> 24);
+		nouveau_enum_print(nve0_fifo_fault_gpcclient, client);
+	}
+	printk(" on channel 0x%010llx\n", (u64)inst << 12);
+}
+
+static int
+nve0_fifo_swmthd(struct nve0_fifo_priv *priv, u32 chid, u32 mthd, u32 data)
+{
+	struct nve0_fifo_chan *chan = NULL;
+	struct nouveau_handle *bind;
+	unsigned long flags;
+	int ret = -EINVAL;
+
+	spin_lock_irqsave(&priv->base.lock, flags);
+	if (likely(chid >= priv->base.min && chid <= priv->base.max))
+		chan = (void *)priv->base.channel[chid];
+	if (unlikely(!chan))
+		goto out;
+
+	bind = nouveau_namedb_get_class(nv_namedb(chan), 0x906e);
+	if (likely(bind)) {
+		if (!mthd || !nv_call(bind->object, mthd, data))
+			ret = 0;
+		nouveau_namedb_put(bind);
+	}
+
+out:
+	spin_unlock_irqrestore(&priv->base.lock, flags);
+	return ret;
+}
+
+static void
+nve0_fifo_isr_subfifo_intr(struct nve0_fifo_priv *priv, int unit)
+{
+	u32 stat = nv_rd32(priv, 0x040108 + (unit * 0x2000));
+	u32 addr = nv_rd32(priv, 0x0400c0 + (unit * 0x2000));
+	u32 data = nv_rd32(priv, 0x0400c4 + (unit * 0x2000));
+	u32 chid = nv_rd32(priv, 0x040120 + (unit * 0x2000)) & 0xfff;
+	u32 subc = (addr & 0x00070000) >> 16;
+	u32 mthd = (addr & 0x00003ffc);
+	u32 show = stat;
+
+	if (stat & 0x00200000) {
+		if (mthd == 0x0054) {
+			if (!nve0_fifo_swmthd(priv, chid, 0x0500, 0x00000000))
+				show &= ~0x00200000;
+		}
+	}
+
+	if (stat & 0x00800000) {
+		if (!nve0_fifo_swmthd(priv, chid, mthd, data))
+			show &= ~0x00800000;
+	}
+
+	if (show) {
+		nv_error(priv, "SUBFIFO%d:", unit);
+		nouveau_bitfield_print(nve0_fifo_subfifo_intr, show);
+		printk("\n");
+		nv_error(priv, "SUBFIFO%d: ch %d subc %d mthd 0x%04x "
+			       "data 0x%08x\n",
+			 unit, chid, subc, mthd, data);
+	}
+
+	nv_wr32(priv, 0x0400c0 + (unit * 0x2000), 0x80600008);
+	nv_wr32(priv, 0x040108 + (unit * 0x2000), stat);
+}
+
+static void
+nve0_fifo_intr(struct nouveau_subdev *subdev)
+{
+	struct nve0_fifo_priv *priv = (void *)subdev;
+	u32 mask = nv_rd32(priv, 0x002140);
+	u32 stat = nv_rd32(priv, 0x002100) & mask;
+
+	if (stat & 0x00000100) {
+		nv_warn(priv, "unknown status 0x00000100\n");
+		nv_wr32(priv, 0x002100, 0x00000100);
+		stat &= ~0x00000100;
+	}
+
+	if (stat & 0x10000000) {
+		u32 units = nv_rd32(priv, 0x00259c);
+		u32 u = units;
+
+		while (u) {
+			int i = ffs(u) - 1;
+			nve0_fifo_isr_vm_fault(priv, i);
+			u &= ~(1 << i);
+		}
+
+		nv_wr32(priv, 0x00259c, units);
+		stat &= ~0x10000000;
+	}
+
+	if (stat & 0x20000000) {
+		u32 units = nv_rd32(priv, 0x0025a0);
+		u32 u = units;
+
+		while (u) {
+			int i = ffs(u) - 1;
+			nve0_fifo_isr_subfifo_intr(priv, i);
+			u &= ~(1 << i);
+		}
+
+		nv_wr32(priv, 0x0025a0, units);
+		stat &= ~0x20000000;
+	}
+
+	if (stat & 0x40000000) {
+		nv_warn(priv, "unknown status 0x40000000\n");
+		nv_mask(priv, 0x002a00, 0x00000000, 0x00000000);
+		stat &= ~0x40000000;
+	}
+
+	if (stat) {
+		nv_fatal(priv, "unhandled status 0x%08x\n", stat);
+		nv_wr32(priv, 0x002100, stat);
+		nv_wr32(priv, 0x002140, 0);
+	}
+}
+
+static int
+nve0_fifo_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+	       struct nouveau_oclass *oclass, void *data, u32 size,
+	       struct nouveau_object **pobject)
+{
+	struct nve0_fifo_priv *priv;
+	int ret;
+
+	ret = nouveau_fifo_create(parent, engine, oclass, 0, 4095, &priv);
+	*pobject = nv_object(priv);
+	if (ret)
+		return ret;
+
+	ret = nouveau_gpuobj_new(parent, NULL, 4096 * 0x200, 0x1000,
+				 NVOBJ_FLAG_ZERO_ALLOC, &priv->user.mem);
+	if (ret)
+		return ret;
+
+	ret = nouveau_gpuobj_map(priv->user.mem, NV_MEM_ACCESS_RW,
+				&priv->user.bar);
+	if (ret)
+		return ret;
+
+	nv_subdev(priv)->unit = 0x00000100;
+	nv_subdev(priv)->intr = nve0_fifo_intr;
+	nv_engine(priv)->cclass = &nve0_fifo_cclass;
+	nv_engine(priv)->sclass = nve0_fifo_sclass;
+	return 0;
+}
+
+static void
+nve0_fifo_dtor(struct nouveau_object *object)
+{
+	struct nve0_fifo_priv *priv = (void *)object;
+	int i;
+
+	nouveau_gpuobj_unmap(&priv->user.bar);
+	nouveau_gpuobj_ref(NULL, &priv->user.mem);
+
+	for (i = 0; i < ARRAY_SIZE(priv->engine); i++) {
+		nouveau_gpuobj_ref(NULL, &priv->engine[i].playlist[1]);
+		nouveau_gpuobj_ref(NULL, &priv->engine[i].playlist[0]);
+	}
+
+	nouveau_fifo_destroy(&priv->base);
+}
+
+static int
+nve0_fifo_init(struct nouveau_object *object)
+{
+	struct nve0_fifo_priv *priv = (void *)object;
+	int ret, i;
+
+	ret = nouveau_fifo_init(&priv->base);
+	if (ret)
+		return ret;
+
+	/* enable all available PSUBFIFOs */
+	nv_wr32(priv, 0x000204, 0xffffffff);
+	priv->spoon_nr = hweight32(nv_rd32(priv, 0x000204));
+	nv_debug(priv, "%d subfifo(s)\n", priv->spoon_nr);
+
+	/* PSUBFIFO[n] */
+	for (i = 0; i < priv->spoon_nr; i++) {
+		nv_mask(priv, 0x04013c + (i * 0x2000), 0x10000100, 0x00000000);
+		nv_wr32(priv, 0x040108 + (i * 0x2000), 0xffffffff); /* INTR */
+		nv_wr32(priv, 0x04010c + (i * 0x2000), 0xfffffeff); /* INTREN */
+	}
+
+	nv_wr32(priv, 0x002254, 0x10000000 | priv->user.bar.offset >> 12);
+
+	nv_wr32(priv, 0x002a00, 0xffffffff);
+	nv_wr32(priv, 0x002100, 0xffffffff);
+	nv_wr32(priv, 0x002140, 0xbfffffff);
+	return 0;
+}
+
+struct nouveau_oclass
+nve0_fifo_oclass = {
+	.handle = NV_ENGINE(FIFO, 0xe0),
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nve0_fifo_ctor,
+		.dtor = nve0_fifo_dtor,
+		.init = nve0_fifo_init,
+		.fini = _nouveau_fifo_fini,
+	},
+};
diff --git a/drivers/gpu/drm/nouveau/nouveau_grctx.h b/drivers/gpu/drm/nouveau/core/engine/graph/ctx.h
index b0795ececbda..e1947013d3bc 100644
--- a/drivers/gpu/drm/nouveau/nouveau_grctx.h
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/ctx.h
@@ -2,7 +2,7 @@
 #define __NOUVEAU_GRCTX_H__
 
 struct nouveau_grctx {
-	struct drm_device *dev;
+	struct nouveau_device *device;
 
 	enum {
 		NOUVEAU_GRCTX_PROG,
@@ -10,18 +10,18 @@ struct nouveau_grctx {
 	} mode;
 	void *data;
 
-	uint32_t ctxprog_max;
-	uint32_t ctxprog_len;
-	uint32_t ctxprog_reg;
-	int      ctxprog_label[32];
-	uint32_t ctxvals_pos;
-	uint32_t ctxvals_base;
+	u32 ctxprog_max;
+	u32 ctxprog_len;
+	u32 ctxprog_reg;
+	int ctxprog_label[32];
+	u32 ctxvals_pos;
+	u32 ctxvals_base;
 };
 
 static inline void
-cp_out(struct nouveau_grctx *ctx, uint32_t inst)
+cp_out(struct nouveau_grctx *ctx, u32 inst)
 {
-	uint32_t *ctxprog = ctx->data;
+	u32 *ctxprog = ctx->data;
 
 	if (ctx->mode != NOUVEAU_GRCTX_PROG)
 		return;
@@ -31,13 +31,13 @@ cp_out(struct nouveau_grctx *ctx, uint32_t inst)
 }
 
 static inline void
-cp_lsr(struct nouveau_grctx *ctx, uint32_t val)
+cp_lsr(struct nouveau_grctx *ctx, u32 val)
 {
 	cp_out(ctx, CP_LOAD_SR | val);
 }
 
 static inline void
-cp_ctx(struct nouveau_grctx *ctx, uint32_t reg, uint32_t length)
+cp_ctx(struct nouveau_grctx *ctx, u32 reg, u32 length)
 {
 	ctx->ctxprog_reg = (reg - 0x00400000) >> 2;
 
@@ -55,7 +55,7 @@ cp_ctx(struct nouveau_grctx *ctx, uint32_t reg, uint32_t length)
 static inline void
 cp_name(struct nouveau_grctx *ctx, int name)
 {
-	uint32_t *ctxprog = ctx->data;
+	u32 *ctxprog = ctx->data;
 	int i;
 
 	if (ctx->mode != NOUVEAU_GRCTX_PROG)
@@ -115,7 +115,7 @@ cp_pos(struct nouveau_grctx *ctx, int offset)
 }
 
 static inline void
-gr_def(struct nouveau_grctx *ctx, uint32_t reg, uint32_t val)
+gr_def(struct nouveau_grctx *ctx, u32 reg, u32 val)
 {
 	if (ctx->mode != NOUVEAU_GRCTX_VALS)
 		return;
diff --git a/drivers/gpu/drm/nouveau/nv40_grctx.c b/drivers/gpu/drm/nouveau/core/engine/graph/ctxnv40.c
index cf115ad4dad1..e45035efb8ca 100644
--- a/drivers/gpu/drm/nouveau/nv40_grctx.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/ctxnv40.c
@@ -22,6 +22,8 @@
  * Authors: Ben Skeggs
  */
 
+#include <core/gpuobj.h>
+
 /* NVIDIA context programs handle a number of other conditions which are
  * not implemented in our versions.  It's not clear why NVIDIA context
  * programs have this code, nor whether it's strictly necessary for
@@ -109,20 +111,18 @@
 #define CP_LOAD_MAGIC_NV44TCL    0x00800029 /* per-vs state (0x4497) */
 #define CP_LOAD_MAGIC_NV40TCL    0x00800041 /* per-vs state (0x4097) */
 
-#include <drm/drmP.h>
-#include "nouveau_drv.h"
-#include "nouveau_grctx.h"
+#include "nv40.h"
+#include "ctx.h"
 
 /* TODO:
  *  - get vs count from 0x1540
  */
 
 static int
-nv40_graph_vs_count(struct drm_device *dev)
+nv40_graph_vs_count(struct nouveau_device *device)
 {
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
 
-	switch (dev_priv->chipset) {
+	switch (device->chipset) {
 	case 0x47:
 	case 0x49:
 	case 0x4b:
@@ -160,7 +160,7 @@ enum cp_label {
 static void
 nv40_graph_construct_general(struct nouveau_grctx *ctx)
 {
-	struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
+	struct nouveau_device *device = ctx->device;
 	int i;
 
 	cp_ctx(ctx, 0x4000a4, 1);
@@ -187,7 +187,7 @@ nv40_graph_construct_general(struct nouveau_grctx *ctx)
 	cp_ctx(ctx, 0x400724, 1);
 	gr_def(ctx, 0x400724, 0x02008821);
 	cp_ctx(ctx, 0x400770, 3);
-	if (dev_priv->chipset == 0x40) {
+	if (device->chipset == 0x40) {
 		cp_ctx(ctx, 0x400814, 4);
 		cp_ctx(ctx, 0x400828, 5);
 		cp_ctx(ctx, 0x400840, 5);
@@ -208,7 +208,7 @@ nv40_graph_construct_general(struct nouveau_grctx *ctx)
 		gr_def(ctx, 0x4009dc, 0x80000000);
 	} else {
 		cp_ctx(ctx, 0x400840, 20);
-		if (nv44_graph_class(ctx->dev)) {
+		if (nv44_graph_class(ctx->device)) {
 			for (i = 0; i < 8; i++)
 				gr_def(ctx, 0x400860 + (i * 4), 0x00000001);
 		}
@@ -217,21 +217,21 @@ nv40_graph_construct_general(struct nouveau_grctx *ctx)
 		gr_def(ctx, 0x400888, 0x00000040);
 		cp_ctx(ctx, 0x400894, 11);
 		gr_def(ctx, 0x400894, 0x00000040);
-		if (!nv44_graph_class(ctx->dev)) {
+		if (!nv44_graph_class(ctx->device)) {
 			for (i = 0; i < 8; i++)
 				gr_def(ctx, 0x4008a0 + (i * 4), 0x80000000);
 		}
 		cp_ctx(ctx, 0x4008e0, 2);
 		cp_ctx(ctx, 0x4008f8, 2);
-		if (dev_priv->chipset == 0x4c ||
-		    (dev_priv->chipset & 0xf0) == 0x60)
+		if (device->chipset == 0x4c ||
+		    (device->chipset & 0xf0) == 0x60)
 			cp_ctx(ctx, 0x4009f8, 1);
 	}
 	cp_ctx(ctx, 0x400a00, 73);
 	gr_def(ctx, 0x400b0c, 0x0b0b0b0c);
 	cp_ctx(ctx, 0x401000, 4);
 	cp_ctx(ctx, 0x405004, 1);
-	switch (dev_priv->chipset) {
+	switch (device->chipset) {
 	case 0x47:
 	case 0x49:
 	case 0x4b:
@@ -240,7 +240,7 @@ nv40_graph_construct_general(struct nouveau_grctx *ctx)
 		break;
 	default:
 		cp_ctx(ctx, 0x403440, 1);
-		switch (dev_priv->chipset) {
+		switch (device->chipset) {
 		case 0x40:
 			gr_def(ctx, 0x403440, 0x00000010);
 			break;
@@ -266,19 +266,19 @@ nv40_graph_construct_general(struct nouveau_grctx *ctx)
 static void
 nv40_graph_construct_state3d(struct nouveau_grctx *ctx)
 {
-	struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
+	struct nouveau_device *device = ctx->device;
 	int i;
 
-	if (dev_priv->chipset == 0x40) {
+	if (device->chipset == 0x40) {
 		cp_ctx(ctx, 0x401880, 51);
 		gr_def(ctx, 0x401940, 0x00000100);
 	} else
-	if (dev_priv->chipset == 0x46 || dev_priv->chipset == 0x47 ||
-	    dev_priv->chipset == 0x49 || dev_priv->chipset == 0x4b) {
+	if (device->chipset == 0x46 || device->chipset == 0x47 ||
+	    device->chipset == 0x49 || device->chipset == 0x4b) {
 		cp_ctx(ctx, 0x401880, 32);
 		for (i = 0; i < 16; i++)
 			gr_def(ctx, 0x401880 + (i * 4), 0x00000111);
-		if (dev_priv->chipset == 0x46)
+		if (device->chipset == 0x46)
 			cp_ctx(ctx, 0x401900, 16);
 		cp_ctx(ctx, 0x401940, 3);
 	}
@@ -289,7 +289,7 @@ nv40_graph_construct_state3d(struct nouveau_grctx *ctx)
 	gr_def(ctx, 0x401978, 0xffff0000);
 	gr_def(ctx, 0x40197c, 0x00000001);
 	gr_def(ctx, 0x401990, 0x46400000);
-	if (dev_priv->chipset == 0x40) {
+	if (device->chipset == 0x40) {
 		cp_ctx(ctx, 0x4019a0, 2);
 		cp_ctx(ctx, 0x4019ac, 5);
 	} else {
@@ -297,7 +297,7 @@ nv40_graph_construct_state3d(struct nouveau_grctx *ctx)
 		cp_ctx(ctx, 0x4019b4, 3);
 	}
 	gr_def(ctx, 0x4019bc, 0xffff0000);
-	switch (dev_priv->chipset) {
+	switch (device->chipset) {
 	case 0x46:
 	case 0x47:
 	case 0x49:
@@ -316,7 +316,7 @@ nv40_graph_construct_state3d(struct nouveau_grctx *ctx)
 	for (i = 0; i < 16; i++)
 		gr_def(ctx, 0x401a44 + (i * 4), 0x07ff0000);
 	gr_def(ctx, 0x401a8c, 0x4b7fffff);
-	if (dev_priv->chipset == 0x40) {
+	if (device->chipset == 0x40) {
 		cp_ctx(ctx, 0x401ab8, 3);
 	} else {
 		cp_ctx(ctx, 0x401ab8, 1);
@@ -327,10 +327,10 @@ nv40_graph_construct_state3d(struct nouveau_grctx *ctx)
 	gr_def(ctx, 0x401ad4, 0x70605040);
 	gr_def(ctx, 0x401ad8, 0xb8a89888);
 	gr_def(ctx, 0x401adc, 0xf8e8d8c8);
-	cp_ctx(ctx, 0x401b10, dev_priv->chipset == 0x40 ? 2 : 1);
+	cp_ctx(ctx, 0x401b10, device->chipset == 0x40 ? 2 : 1);
 	gr_def(ctx, 0x401b10, 0x40100000);
-	cp_ctx(ctx, 0x401b18, dev_priv->chipset == 0x40 ? 6 : 5);
-	gr_def(ctx, 0x401b28, dev_priv->chipset == 0x40 ?
+	cp_ctx(ctx, 0x401b18, device->chipset == 0x40 ? 6 : 5);
+	gr_def(ctx, 0x401b28, device->chipset == 0x40 ?
 			      0x00000004 : 0x00000000);
 	cp_ctx(ctx, 0x401b30, 25);
 	gr_def(ctx, 0x401b34, 0x0000ffff);
@@ -341,8 +341,8 @@ nv40_graph_construct_state3d(struct nouveau_grctx *ctx)
 	gr_def(ctx, 0x401b84, 0xffffffff);
 	gr_def(ctx, 0x401b88, 0x00ff7000);
 	gr_def(ctx, 0x401b8c, 0x0000ffff);
-	if (dev_priv->chipset != 0x44 && dev_priv->chipset != 0x4a &&
-	    dev_priv->chipset != 0x4e)
+	if (device->chipset != 0x44 && device->chipset != 0x4a &&
+	    device->chipset != 0x4e)
 		cp_ctx(ctx, 0x401b94, 1);
 	cp_ctx(ctx, 0x401b98, 8);
 	gr_def(ctx, 0x401b9c, 0x00ff0000);
@@ -371,12 +371,12 @@ nv40_graph_construct_state3d(struct nouveau_grctx *ctx)
 static void
 nv40_graph_construct_state3d_2(struct nouveau_grctx *ctx)
 {
-	struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
+	struct nouveau_device *device = ctx->device;
 	int i;
 
 	cp_ctx(ctx, 0x402000, 1);
-	cp_ctx(ctx, 0x402404, dev_priv->chipset == 0x40 ? 1 : 2);
-	switch (dev_priv->chipset) {
+	cp_ctx(ctx, 0x402404, device->chipset == 0x40 ? 1 : 2);
+	switch (device->chipset) {
 	case 0x40:
 		gr_def(ctx, 0x402404, 0x00000001);
 		break;
@@ -393,9 +393,9 @@ nv40_graph_construct_state3d_2(struct nouveau_grctx *ctx)
 	default:
 		gr_def(ctx, 0x402404, 0x00000021);
 	}
-	if (dev_priv->chipset != 0x40)
+	if (device->chipset != 0x40)
 		gr_def(ctx, 0x402408, 0x030c30c3);
-	switch (dev_priv->chipset) {
+	switch (device->chipset) {
 	case 0x44:
 	case 0x46:
 	case 0x4a:
@@ -408,10 +408,10 @@ nv40_graph_construct_state3d_2(struct nouveau_grctx *ctx)
 	default:
 		break;
 	}
-	cp_ctx(ctx, 0x402480, dev_priv->chipset == 0x40 ? 8 : 9);
+	cp_ctx(ctx, 0x402480, device->chipset == 0x40 ? 8 : 9);
 	gr_def(ctx, 0x402488, 0x3e020200);
 	gr_def(ctx, 0x40248c, 0x00ffffff);
-	switch (dev_priv->chipset) {
+	switch (device->chipset) {
 	case 0x40:
 		gr_def(ctx, 0x402490, 0x60103f00);
 		break;
@@ -428,16 +428,16 @@ nv40_graph_construct_state3d_2(struct nouveau_grctx *ctx)
 		gr_def(ctx, 0x402490, 0x0c103f00);
 		break;
 	}
-	gr_def(ctx, 0x40249c, dev_priv->chipset <= 0x43 ?
+	gr_def(ctx, 0x40249c, device->chipset <= 0x43 ?
 			      0x00020000 : 0x00040000);
 	cp_ctx(ctx, 0x402500, 31);
 	gr_def(ctx, 0x402530, 0x00008100);
-	if (dev_priv->chipset == 0x40)
+	if (device->chipset == 0x40)
 		cp_ctx(ctx, 0x40257c, 6);
 	cp_ctx(ctx, 0x402594, 16);
 	cp_ctx(ctx, 0x402800, 17);
 	gr_def(ctx, 0x402800, 0x00000001);
-	switch (dev_priv->chipset) {
+	switch (device->chipset) {
 	case 0x47:
 	case 0x49:
 	case 0x4b:
@@ -445,7 +445,7 @@ nv40_graph_construct_state3d_2(struct nouveau_grctx *ctx)
 		gr_def(ctx, 0x402864, 0x00001001);
 		cp_ctx(ctx, 0x402870, 3);
 		gr_def(ctx, 0x402878, 0x00000003);
-		if (dev_priv->chipset != 0x47) { /* belong at end!! */
+		if (device->chipset != 0x47) { /* belong at end!! */
 			cp_ctx(ctx, 0x402900, 1);
 			cp_ctx(ctx, 0x402940, 1);
 			cp_ctx(ctx, 0x402980, 1);
@@ -470,9 +470,9 @@ nv40_graph_construct_state3d_2(struct nouveau_grctx *ctx)
 	}
 
 	cp_ctx(ctx, 0x402c00, 4);
-	gr_def(ctx, 0x402c00, dev_priv->chipset == 0x40 ?
+	gr_def(ctx, 0x402c00, device->chipset == 0x40 ?
 			      0x80800001 : 0x00888001);
-	switch (dev_priv->chipset) {
+	switch (device->chipset) {
 	case 0x47:
 	case 0x49:
 	case 0x4b:
@@ -485,30 +485,30 @@ nv40_graph_construct_state3d_2(struct nouveau_grctx *ctx)
 		break;
 	default:
 		cp_ctx(ctx, 0x402c10, 4);
-		if (dev_priv->chipset == 0x40)
+		if (device->chipset == 0x40)
 			cp_ctx(ctx, 0x402c20, 36);
 		else
-		if (dev_priv->chipset <= 0x42)
+		if (device->chipset <= 0x42)
 			cp_ctx(ctx, 0x402c20, 24);
 		else
-		if (dev_priv->chipset <= 0x4a)
+		if (device->chipset <= 0x4a)
 			cp_ctx(ctx, 0x402c20, 16);
 		else
 			cp_ctx(ctx, 0x402c20, 8);
-		cp_ctx(ctx, 0x402cb0, dev_priv->chipset == 0x40 ? 12 : 13);
+		cp_ctx(ctx, 0x402cb0, device->chipset == 0x40 ? 12 : 13);
 		gr_def(ctx, 0x402cd4, 0x00000005);
-		if (dev_priv->chipset != 0x40)
+		if (device->chipset != 0x40)
 			gr_def(ctx, 0x402ce0, 0x0000ffff);
 		break;
 	}
 
-	cp_ctx(ctx, 0x403400, dev_priv->chipset == 0x40 ? 4 : 3);
-	cp_ctx(ctx, 0x403410, dev_priv->chipset == 0x40 ? 4 : 3);
-	cp_ctx(ctx, 0x403420, nv40_graph_vs_count(ctx->dev));
-	for (i = 0; i < nv40_graph_vs_count(ctx->dev); i++)
+	cp_ctx(ctx, 0x403400, device->chipset == 0x40 ? 4 : 3);
+	cp_ctx(ctx, 0x403410, device->chipset == 0x40 ? 4 : 3);
+	cp_ctx(ctx, 0x403420, nv40_graph_vs_count(ctx->device));
+	for (i = 0; i < nv40_graph_vs_count(ctx->device); i++)
 		gr_def(ctx, 0x403420 + (i * 4), 0x00005555);
 
-	if (dev_priv->chipset != 0x40) {
+	if (device->chipset != 0x40) {
 		cp_ctx(ctx, 0x403600, 1);
 		gr_def(ctx, 0x403600, 0x00000001);
 	}
@@ -516,7 +516,7 @@ nv40_graph_construct_state3d_2(struct nouveau_grctx *ctx)
 
 	cp_ctx(ctx, 0x403c18, 1);
 	gr_def(ctx, 0x403c18, 0x00000001);
-	switch (dev_priv->chipset) {
+	switch (device->chipset) {
 	case 0x46:
 	case 0x47:
 	case 0x49:
@@ -527,7 +527,7 @@ nv40_graph_construct_state3d_2(struct nouveau_grctx *ctx)
 		gr_def(ctx, 0x405c24, 0x000e3000);
 		break;
 	}
-	if (dev_priv->chipset != 0x4e)
+	if (device->chipset != 0x4e)
 		cp_ctx(ctx, 0x405800, 11);
 	cp_ctx(ctx, 0x407000, 1);
 }
@@ -535,7 +535,7 @@ nv40_graph_construct_state3d_2(struct nouveau_grctx *ctx)
 static void
 nv40_graph_construct_state3d_3(struct nouveau_grctx *ctx)
 {
-	int len = nv44_graph_class(ctx->dev) ? 0x0084 : 0x0684;
+	int len = nv44_graph_class(ctx->device) ? 0x0084 : 0x0684;
 
 	cp_out (ctx, 0x300000);
 	cp_lsr (ctx, len - 4);
@@ -550,32 +550,31 @@ nv40_graph_construct_state3d_3(struct nouveau_grctx *ctx)
 static void
 nv40_graph_construct_shader(struct nouveau_grctx *ctx)
 {
-	struct drm_device *dev = ctx->dev;
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_device *device = ctx->device;
 	struct nouveau_gpuobj *obj = ctx->data;
 	int vs, vs_nr, vs_len, vs_nr_b0, vs_nr_b1, b0_offset, b1_offset;
 	int offset, i;
 
-	vs_nr    = nv40_graph_vs_count(ctx->dev);
+	vs_nr    = nv40_graph_vs_count(ctx->device);
 	vs_nr_b0 = 363;
-	vs_nr_b1 = dev_priv->chipset == 0x40 ? 128 : 64;
-	if (dev_priv->chipset == 0x40) {
+	vs_nr_b1 = device->chipset == 0x40 ? 128 : 64;
+	if (device->chipset == 0x40) {
 		b0_offset = 0x2200/4; /* 33a0 */
 		b1_offset = 0x55a0/4; /* 1500 */
 		vs_len = 0x6aa0/4;
 	} else
-	if (dev_priv->chipset == 0x41 || dev_priv->chipset == 0x42) {
+	if (device->chipset == 0x41 || device->chipset == 0x42) {
 		b0_offset = 0x2200/4; /* 2200 */
 		b1_offset = 0x4400/4; /* 0b00 */
 		vs_len = 0x4f00/4;
 	} else {
 		b0_offset = 0x1d40/4; /* 2200 */
 		b1_offset = 0x3f40/4; /* 0b00 : 0a40 */
-		vs_len = nv44_graph_class(dev) ? 0x4980/4 : 0x4a40/4;
+		vs_len = nv44_graph_class(device) ? 0x4980/4 : 0x4a40/4;
 	}
 
 	cp_lsr(ctx, vs_len * vs_nr + 0x300/4);
-	cp_out(ctx, nv44_graph_class(dev) ? 0x800029 : 0x800041);
+	cp_out(ctx, nv44_graph_class(device) ? 0x800029 : 0x800041);
 
 	offset = ctx->ctxvals_pos;
 	ctx->ctxvals_pos += (0x0300/4 + (vs_nr * vs_len));
@@ -661,21 +660,21 @@ nv40_grctx_generate(struct nouveau_grctx *ctx)
 }
 
 void
-nv40_grctx_fill(struct drm_device *dev, struct nouveau_gpuobj *mem)
+nv40_grctx_fill(struct nouveau_device *device, struct nouveau_gpuobj *mem)
 {
 	nv40_grctx_generate(&(struct nouveau_grctx) {
-			     .dev = dev,
+			     .device = device,
 			     .mode = NOUVEAU_GRCTX_VALS,
 			     .data = mem,
 			   });
 }
 
 void
-nv40_grctx_init(struct drm_device *dev, u32 *size)
+nv40_grctx_init(struct nouveau_device *device, u32 *size)
 {
 	u32 ctxprog[256], i;
 	struct nouveau_grctx ctx = {
-		.dev = dev,
+		.device = device,
 		.mode = NOUVEAU_GRCTX_PROG,
 		.data = ctxprog,
 		.ctxprog_max = ARRAY_SIZE(ctxprog)
@@ -683,8 +682,8 @@ nv40_grctx_init(struct drm_device *dev, u32 *size)
 
 	nv40_grctx_generate(&ctx);
 
-	nv_wr32(dev, NV40_PGRAPH_CTXCTL_UCODE_INDEX, 0);
+	nv_wr32(device, 0x400324, 0);
 	for (i = 0; i < ctx.ctxprog_len; i++)
-		nv_wr32(dev, NV40_PGRAPH_CTXCTL_UCODE_DATA, ctxprog[i]);
+		nv_wr32(device, 0x400328, ctxprog[i]);
 	*size = ctx.ctxvals_pos * 4;
 }
diff --git a/drivers/gpu/drm/nouveau/nv50_grctx.c b/drivers/gpu/drm/nouveau/core/engine/graph/ctxnv50.c
index 3bb96a029d66..552fdbd45ebe 100644
--- a/drivers/gpu/drm/nouveau/nv50_grctx.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/ctxnv50.c
@@ -20,6 +20,8 @@
  * OTHER DEALINGS IN THE SOFTWARE.
  */
 
+#include <core/gpuobj.h>
+
 #define CP_FLAG_CLEAR                 0
 #define CP_FLAG_SET                   1
 #define CP_FLAG_SWAP_DIRECTION        ((0 * 32) + 0)
@@ -105,9 +107,8 @@
 #define CP_SEEK_1      0x00c000ff
 #define CP_SEEK_2      0x00c800ff
 
-#include <drm/drmP.h>
-#include "nouveau_drv.h"
-#include "nouveau_grctx.h"
+#include "nv50.h"
+#include "ctx.h"
 
 #define IS_NVA3F(x) (((x) > 0xa0 && (x) < 0xaa) || (x) == 0xaf)
 #define IS_NVAAF(x) ((x) >= 0xaa && (x) <= 0xac)
@@ -175,32 +176,6 @@ static void nv50_graph_construct_xfer2(struct nouveau_grctx *ctx);
 static int
 nv50_grctx_generate(struct nouveau_grctx *ctx)
 {
-	struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
-
-	switch (dev_priv->chipset) {
-	case 0x50:
-	case 0x84:
-	case 0x86:
-	case 0x92:
-	case 0x94:
-	case 0x96:
-	case 0x98:
-	case 0xa0:
-	case 0xa3:
-	case 0xa5:
-	case 0xa8:
-	case 0xaa:
-	case 0xac:
-	case 0xaf:
-		break;
-	default:
-		NV_ERROR(ctx->dev, "I don't know how to make a ctxprog for "
-				   "your NV%x card.\n", dev_priv->chipset);
-		NV_ERROR(ctx->dev, "Disabling acceleration. Please contact "
-				   "the devs.\n");
-		return -ENOSYS;
-	}
-
 	cp_set (ctx, STATE, RUNNING);
 	cp_set (ctx, XFER_SWITCH, ENABLE);
 	/* decide whether we're loading/unloading the context */
@@ -278,30 +253,36 @@ nv50_grctx_generate(struct nouveau_grctx *ctx)
 }
 
 void
-nv50_grctx_fill(struct drm_device *dev, struct nouveau_gpuobj *mem)
+nv50_grctx_fill(struct nouveau_device *device, struct nouveau_gpuobj *mem)
 {
 	nv50_grctx_generate(&(struct nouveau_grctx) {
-			     .dev = dev,
+			     .device = device,
 			     .mode = NOUVEAU_GRCTX_VALS,
 			     .data = mem,
 			   });
 }
 
 int
-nv50_grctx_init(struct drm_device *dev, u32 *data, u32 max, u32 *len, u32 *cnt)
+nv50_grctx_init(struct nouveau_device *device, u32 *size)
 {
+	u32 *ctxprog = kmalloc(512 * 4, GFP_KERNEL), i;
 	struct nouveau_grctx ctx = {
-		.dev = dev,
+		.device = device,
 		.mode = NOUVEAU_GRCTX_PROG,
-		.data = data,
-		.ctxprog_max = max
+		.data = ctxprog,
+		.ctxprog_max = 512,
 	};
-	int ret;
 
-	ret = nv50_grctx_generate(&ctx);
-	*cnt = ctx.ctxvals_pos * 4;
-	*len = ctx.ctxprog_len;
-	return ret;
+	if (!ctxprog)
+		return -ENOMEM;
+	nv50_grctx_generate(&ctx);
+
+	nv_wr32(device, 0x400324, 0);
+	for (i = 0; i < ctx.ctxprog_len; i++)
+		nv_wr32(device, 0x400328, ctxprog[i]);
+	*size = ctx.ctxvals_pos * 4;
+	kfree(ctxprog);
+	return 0;
 }
 
 /*
@@ -315,36 +296,36 @@ nv50_graph_construct_mmio_ddata(struct nouveau_grctx *ctx);
 static void
 nv50_graph_construct_mmio(struct nouveau_grctx *ctx)
 {
-	struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
+	struct nouveau_device *device = ctx->device;
 	int i, j;
 	int offset, base;
-	uint32_t units = nv_rd32 (ctx->dev, 0x1540);
+	u32 units = nv_rd32 (ctx->device, 0x1540);
 
 	/* 0800: DISPATCH */
 	cp_ctx(ctx, 0x400808, 7);
 	gr_def(ctx, 0x400814, 0x00000030);
 	cp_ctx(ctx, 0x400834, 0x32);
-	if (dev_priv->chipset == 0x50) {
+	if (device->chipset == 0x50) {
 		gr_def(ctx, 0x400834, 0xff400040);
 		gr_def(ctx, 0x400838, 0xfff00080);
 		gr_def(ctx, 0x40083c, 0xfff70090);
 		gr_def(ctx, 0x400840, 0xffe806a8);
 	}
 	gr_def(ctx, 0x400844, 0x00000002);
-	if (IS_NVA3F(dev_priv->chipset))
+	if (IS_NVA3F(device->chipset))
 		gr_def(ctx, 0x400894, 0x00001000);
 	gr_def(ctx, 0x4008e8, 0x00000003);
 	gr_def(ctx, 0x4008ec, 0x00001000);
-	if (dev_priv->chipset == 0x50)
+	if (device->chipset == 0x50)
 		cp_ctx(ctx, 0x400908, 0xb);
-	else if (dev_priv->chipset < 0xa0)
+	else if (device->chipset < 0xa0)
 		cp_ctx(ctx, 0x400908, 0xc);
 	else
 		cp_ctx(ctx, 0x400908, 0xe);
 
-	if (dev_priv->chipset >= 0xa0)
+	if (device->chipset >= 0xa0)
 		cp_ctx(ctx, 0x400b00, 0x1);
-	if (IS_NVA3F(dev_priv->chipset)) {
+	if (IS_NVA3F(device->chipset)) {
 		cp_ctx(ctx, 0x400b10, 0x1);
 		gr_def(ctx, 0x400b10, 0x0001629d);
 		cp_ctx(ctx, 0x400b20, 0x1);
@@ -358,10 +339,10 @@ nv50_graph_construct_mmio(struct nouveau_grctx *ctx)
 	gr_def(ctx, 0x400c08, 0x0000fe0c);
 
 	/* 1000 */
-	if (dev_priv->chipset < 0xa0) {
+	if (device->chipset < 0xa0) {
 		cp_ctx(ctx, 0x401008, 0x4);
 		gr_def(ctx, 0x401014, 0x00001000);
-	} else if (!IS_NVA3F(dev_priv->chipset)) {
+	} else if (!IS_NVA3F(device->chipset)) {
 		cp_ctx(ctx, 0x401008, 0x5);
 		gr_def(ctx, 0x401018, 0x00001000);
 	} else {
@@ -372,7 +353,7 @@ nv50_graph_construct_mmio(struct nouveau_grctx *ctx)
 	/* 1400 */
 	cp_ctx(ctx, 0x401400, 0x8);
 	cp_ctx(ctx, 0x401424, 0x3);
-	if (dev_priv->chipset == 0x50)
+	if (device->chipset == 0x50)
 		gr_def(ctx, 0x40142c, 0x0001fd87);
 	else
 		gr_def(ctx, 0x40142c, 0x00000187);
@@ -382,10 +363,10 @@ nv50_graph_construct_mmio(struct nouveau_grctx *ctx)
 	/* 1800: STREAMOUT */
 	cp_ctx(ctx, 0x401814, 0x1);
 	gr_def(ctx, 0x401814, 0x000000ff);
-	if (dev_priv->chipset == 0x50) {
+	if (device->chipset == 0x50) {
 		cp_ctx(ctx, 0x40181c, 0xe);
 		gr_def(ctx, 0x401850, 0x00000004);
-	} else if (dev_priv->chipset < 0xa0) {
+	} else if (device->chipset < 0xa0) {
 		cp_ctx(ctx, 0x40181c, 0xf);
 		gr_def(ctx, 0x401854, 0x00000004);
 	} else {
@@ -395,7 +376,7 @@ nv50_graph_construct_mmio(struct nouveau_grctx *ctx)
 
 	/* 1C00 */
 	cp_ctx(ctx, 0x401c00, 0x1);
-	switch (dev_priv->chipset) {
+	switch (device->chipset) {
 	case 0x50:
 		gr_def(ctx, 0x401c00, 0x0001005f);
 		break;
@@ -424,7 +405,7 @@ nv50_graph_construct_mmio(struct nouveau_grctx *ctx)
 
 	/* 2400 */
 	cp_ctx(ctx, 0x402400, 0x1);
-	if (dev_priv->chipset == 0x50)
+	if (device->chipset == 0x50)
 		cp_ctx(ctx, 0x402408, 0x1);
 	else
 		cp_ctx(ctx, 0x402408, 0x2);
@@ -432,21 +413,21 @@ nv50_graph_construct_mmio(struct nouveau_grctx *ctx)
 
 	/* 2800: CSCHED */
 	cp_ctx(ctx, 0x402800, 0x1);
-	if (dev_priv->chipset == 0x50)
+	if (device->chipset == 0x50)
 		gr_def(ctx, 0x402800, 0x00000006);
 
 	/* 2C00: ZCULL */
 	cp_ctx(ctx, 0x402c08, 0x6);
-	if (dev_priv->chipset != 0x50)
+	if (device->chipset != 0x50)
 		gr_def(ctx, 0x402c14, 0x01000000);
 	gr_def(ctx, 0x402c18, 0x000000ff);
-	if (dev_priv->chipset == 0x50)
+	if (device->chipset == 0x50)
 		cp_ctx(ctx, 0x402ca0, 0x1);
 	else
 		cp_ctx(ctx, 0x402ca0, 0x2);
-	if (dev_priv->chipset < 0xa0)
+	if (device->chipset < 0xa0)
 		gr_def(ctx, 0x402ca0, 0x00000400);
-	else if (!IS_NVA3F(dev_priv->chipset))
+	else if (!IS_NVA3F(device->chipset))
 		gr_def(ctx, 0x402ca0, 0x00000800);
 	else
 		gr_def(ctx, 0x402ca0, 0x00000400);
@@ -457,14 +438,14 @@ nv50_graph_construct_mmio(struct nouveau_grctx *ctx)
 	gr_def(ctx, 0x403004, 0x00000001);
 
 	/* 3400 */
-	if (dev_priv->chipset >= 0xa0) {
+	if (device->chipset >= 0xa0) {
 		cp_ctx(ctx, 0x403404, 0x1);
 		gr_def(ctx, 0x403404, 0x00000001);
 	}
 
 	/* 5000: CCACHE */
 	cp_ctx(ctx, 0x405000, 0x1);
-	switch (dev_priv->chipset) {
+	switch (device->chipset) {
 	case 0x50:
 		gr_def(ctx, 0x405000, 0x00300080);
 		break;
@@ -493,22 +474,22 @@ nv50_graph_construct_mmio(struct nouveau_grctx *ctx)
 	cp_ctx(ctx, 0x40502c, 0x1);
 
 	/* 6000? */
-	if (dev_priv->chipset == 0x50)
+	if (device->chipset == 0x50)
 		cp_ctx(ctx, 0x4063e0, 0x1);
 
 	/* 6800: M2MF */
-	if (dev_priv->chipset < 0x90) {
+	if (device->chipset < 0x90) {
 		cp_ctx(ctx, 0x406814, 0x2b);
 		gr_def(ctx, 0x406818, 0x00000f80);
 		gr_def(ctx, 0x406860, 0x007f0080);
 		gr_def(ctx, 0x40689c, 0x007f0080);
 	} else {
 		cp_ctx(ctx, 0x406814, 0x4);
-		if (dev_priv->chipset == 0x98)
+		if (device->chipset == 0x98)
 			gr_def(ctx, 0x406818, 0x00000f80);
 		else
 			gr_def(ctx, 0x406818, 0x00001f80);
-		if (IS_NVA3F(dev_priv->chipset))
+		if (IS_NVA3F(device->chipset))
 			gr_def(ctx, 0x40681c, 0x00000030);
 		cp_ctx(ctx, 0x406830, 0x3);
 	}
@@ -517,43 +498,43 @@ nv50_graph_construct_mmio(struct nouveau_grctx *ctx)
 	for (i = 0; i < 8; i++) {
 		if (units & (1<<(i+16))) {
 			cp_ctx(ctx, 0x407000 + (i<<8), 3);
-			if (dev_priv->chipset == 0x50)
+			if (device->chipset == 0x50)
 				gr_def(ctx, 0x407000 + (i<<8), 0x1b74f820);
-			else if (dev_priv->chipset != 0xa5)
+			else if (device->chipset != 0xa5)
 				gr_def(ctx, 0x407000 + (i<<8), 0x3b74f821);
 			else
 				gr_def(ctx, 0x407000 + (i<<8), 0x7b74f821);
 			gr_def(ctx, 0x407004 + (i<<8), 0x89058001);
 
-			if (dev_priv->chipset == 0x50) {
+			if (device->chipset == 0x50) {
 				cp_ctx(ctx, 0x407010 + (i<<8), 1);
-			} else if (dev_priv->chipset < 0xa0) {
+			} else if (device->chipset < 0xa0) {
 				cp_ctx(ctx, 0x407010 + (i<<8), 2);
 				gr_def(ctx, 0x407010 + (i<<8), 0x00001000);
 				gr_def(ctx, 0x407014 + (i<<8), 0x0000001f);
 			} else {
 				cp_ctx(ctx, 0x407010 + (i<<8), 3);
 				gr_def(ctx, 0x407010 + (i<<8), 0x00001000);
-				if (dev_priv->chipset != 0xa5)
+				if (device->chipset != 0xa5)
 					gr_def(ctx, 0x407014 + (i<<8), 0x000000ff);
 				else
 					gr_def(ctx, 0x407014 + (i<<8), 0x000001ff);
 			}
 
 			cp_ctx(ctx, 0x407080 + (i<<8), 4);
-			if (dev_priv->chipset != 0xa5)
+			if (device->chipset != 0xa5)
 				gr_def(ctx, 0x407080 + (i<<8), 0x027c10fa);
 			else
 				gr_def(ctx, 0x407080 + (i<<8), 0x827c10fa);
-			if (dev_priv->chipset == 0x50)
+			if (device->chipset == 0x50)
 				gr_def(ctx, 0x407084 + (i<<8), 0x000000c0);
 			else
 				gr_def(ctx, 0x407084 + (i<<8), 0x400000c0);
 			gr_def(ctx, 0x407088 + (i<<8), 0xb7892080);
 
-			if (dev_priv->chipset < 0xa0)
+			if (device->chipset < 0xa0)
 				cp_ctx(ctx, 0x407094 + (i<<8), 1);
-			else if (!IS_NVA3F(dev_priv->chipset))
+			else if (!IS_NVA3F(device->chipset))
 				cp_ctx(ctx, 0x407094 + (i<<8), 3);
 			else {
 				cp_ctx(ctx, 0x407094 + (i<<8), 4);
@@ -563,30 +544,30 @@ nv50_graph_construct_mmio(struct nouveau_grctx *ctx)
 	}
 
 	cp_ctx(ctx, 0x407c00, 0x3);
-	if (dev_priv->chipset < 0x90)
+	if (device->chipset < 0x90)
 		gr_def(ctx, 0x407c00, 0x00010040);
-	else if (dev_priv->chipset < 0xa0)
+	else if (device->chipset < 0xa0)
 		gr_def(ctx, 0x407c00, 0x00390040);
 	else
 		gr_def(ctx, 0x407c00, 0x003d0040);
 	gr_def(ctx, 0x407c08, 0x00000022);
-	if (dev_priv->chipset >= 0xa0) {
+	if (device->chipset >= 0xa0) {
 		cp_ctx(ctx, 0x407c10, 0x3);
 		cp_ctx(ctx, 0x407c20, 0x1);
 		cp_ctx(ctx, 0x407c2c, 0x1);
 	}
 
-	if (dev_priv->chipset < 0xa0) {
+	if (device->chipset < 0xa0) {
 		cp_ctx(ctx, 0x407d00, 0x9);
 	} else {
 		cp_ctx(ctx, 0x407d00, 0x15);
 	}
-	if (dev_priv->chipset == 0x98)
+	if (device->chipset == 0x98)
 		gr_def(ctx, 0x407d08, 0x00380040);
 	else {
-		if (dev_priv->chipset < 0x90)
+		if (device->chipset < 0x90)
 			gr_def(ctx, 0x407d08, 0x00010040);
-		else if (dev_priv->chipset < 0xa0)
+		else if (device->chipset < 0xa0)
 			gr_def(ctx, 0x407d08, 0x00390040);
 		else
 			gr_def(ctx, 0x407d08, 0x003d0040);
@@ -596,11 +577,11 @@ nv50_graph_construct_mmio(struct nouveau_grctx *ctx)
 	/* 8000+: per-TP state */
 	for (i = 0; i < 10; i++) {
 		if (units & (1<<i)) {
-			if (dev_priv->chipset < 0xa0)
+			if (device->chipset < 0xa0)
 				base = 0x408000 + (i<<12);
 			else
 				base = 0x408000 + (i<<11);
-			if (dev_priv->chipset < 0xa0)
+			if (device->chipset < 0xa0)
 				offset = base + 0xc00;
 			else
 				offset = base + 0x80;
@@ -609,9 +590,9 @@ nv50_graph_construct_mmio(struct nouveau_grctx *ctx)
 			cp_ctx(ctx, offset + 0x08, 1);
 
 			/* per-MP state */
-			for (j = 0; j < (dev_priv->chipset < 0xa0 ? 2 : 4); j++) {
+			for (j = 0; j < (device->chipset < 0xa0 ? 2 : 4); j++) {
 				if (!(units & (1 << (j+24)))) continue;
-				if (dev_priv->chipset < 0xa0)
+				if (device->chipset < 0xa0)
 					offset = base + 0x200 + (j<<7);
 				else
 					offset = base + 0x100 + (j<<7);
@@ -620,7 +601,7 @@ nv50_graph_construct_mmio(struct nouveau_grctx *ctx)
 				gr_def(ctx, offset + 0x04, 0x00160000);
 				gr_def(ctx, offset + 0x08, 0x01800000);
 				gr_def(ctx, offset + 0x18, 0x0003ffff);
-				switch (dev_priv->chipset) {
+				switch (device->chipset) {
 				case 0x50:
 					gr_def(ctx, offset + 0x1c, 0x00080000);
 					break;
@@ -651,53 +632,53 @@ nv50_graph_construct_mmio(struct nouveau_grctx *ctx)
 					break;
 				}
 				gr_def(ctx, offset + 0x40, 0x00010401);
-				if (dev_priv->chipset == 0x50)
+				if (device->chipset == 0x50)
 					gr_def(ctx, offset + 0x48, 0x00000040);
 				else
 					gr_def(ctx, offset + 0x48, 0x00000078);
 				gr_def(ctx, offset + 0x50, 0x000000bf);
 				gr_def(ctx, offset + 0x58, 0x00001210);
-				if (dev_priv->chipset == 0x50)
+				if (device->chipset == 0x50)
 					gr_def(ctx, offset + 0x5c, 0x00000080);
 				else
 					gr_def(ctx, offset + 0x5c, 0x08000080);
-				if (dev_priv->chipset >= 0xa0)
+				if (device->chipset >= 0xa0)
 					gr_def(ctx, offset + 0x68, 0x0000003e);
 			}
 
-			if (dev_priv->chipset < 0xa0)
+			if (device->chipset < 0xa0)
 				cp_ctx(ctx, base + 0x300, 0x4);
 			else
 				cp_ctx(ctx, base + 0x300, 0x5);
-			if (dev_priv->chipset == 0x50)
+			if (device->chipset == 0x50)
 				gr_def(ctx, base + 0x304, 0x00007070);
-			else if (dev_priv->chipset < 0xa0)
+			else if (device->chipset < 0xa0)
 				gr_def(ctx, base + 0x304, 0x00027070);
-			else if (!IS_NVA3F(dev_priv->chipset))
+			else if (!IS_NVA3F(device->chipset))
 				gr_def(ctx, base + 0x304, 0x01127070);
 			else
 				gr_def(ctx, base + 0x304, 0x05127070);
 
-			if (dev_priv->chipset < 0xa0)
+			if (device->chipset < 0xa0)
 				cp_ctx(ctx, base + 0x318, 1);
 			else
 				cp_ctx(ctx, base + 0x320, 1);
-			if (dev_priv->chipset == 0x50)
+			if (device->chipset == 0x50)
 				gr_def(ctx, base + 0x318, 0x0003ffff);
-			else if (dev_priv->chipset < 0xa0)
+			else if (device->chipset < 0xa0)
 				gr_def(ctx, base + 0x318, 0x03ffffff);
 			else
 				gr_def(ctx, base + 0x320, 0x07ffffff);
 
-			if (dev_priv->chipset < 0xa0)
+			if (device->chipset < 0xa0)
 				cp_ctx(ctx, base + 0x324, 5);
 			else
 				cp_ctx(ctx, base + 0x328, 4);
 
-			if (dev_priv->chipset < 0xa0) {
+			if (device->chipset < 0xa0) {
 				cp_ctx(ctx, base + 0x340, 9);
 				offset = base + 0x340;
-			} else if (!IS_NVA3F(dev_priv->chipset)) {
+			} else if (!IS_NVA3F(device->chipset)) {
 				cp_ctx(ctx, base + 0x33c, 0xb);
 				offset = base + 0x344;
 			} else {
@@ -706,12 +687,12 @@ nv50_graph_construct_mmio(struct nouveau_grctx *ctx)
 			}
 			gr_def(ctx, offset + 0x0, 0x00120407);
 			gr_def(ctx, offset + 0x4, 0x05091507);
-			if (dev_priv->chipset == 0x84)
+			if (device->chipset == 0x84)
 				gr_def(ctx, offset + 0x8, 0x05100202);
 			else
 				gr_def(ctx, offset + 0x8, 0x05010202);
 			gr_def(ctx, offset + 0xc, 0x00030201);
-			if (dev_priv->chipset == 0xa3)
+			if (device->chipset == 0xa3)
 				cp_ctx(ctx, base + 0x36c, 1);
 
 			cp_ctx(ctx, base + 0x400, 2);
@@ -720,7 +701,7 @@ nv50_graph_construct_mmio(struct nouveau_grctx *ctx)
 			gr_def(ctx, base + 0x40c, 0x0d0c0b0a);
 			gr_def(ctx, base + 0x410, 0x00141210);
 
-			if (dev_priv->chipset < 0xa0)
+			if (device->chipset < 0xa0)
 				offset = base + 0x800;
 			else
 				offset = base + 0x500;
@@ -728,55 +709,55 @@ nv50_graph_construct_mmio(struct nouveau_grctx *ctx)
 			gr_def(ctx, offset + 0x0, 0x000001f0);
 			gr_def(ctx, offset + 0x4, 0x00000001);
 			gr_def(ctx, offset + 0x8, 0x00000003);
-			if (dev_priv->chipset == 0x50 || IS_NVAAF(dev_priv->chipset))
+			if (device->chipset == 0x50 || IS_NVAAF(device->chipset))
 				gr_def(ctx, offset + 0xc, 0x00008000);
 			gr_def(ctx, offset + 0x14, 0x00039e00);
 			cp_ctx(ctx, offset + 0x1c, 2);
-			if (dev_priv->chipset == 0x50)
+			if (device->chipset == 0x50)
 				gr_def(ctx, offset + 0x1c, 0x00000040);
 			else
 				gr_def(ctx, offset + 0x1c, 0x00000100);
 			gr_def(ctx, offset + 0x20, 0x00003800);
 
-			if (dev_priv->chipset >= 0xa0) {
+			if (device->chipset >= 0xa0) {
 				cp_ctx(ctx, base + 0x54c, 2);
-				if (!IS_NVA3F(dev_priv->chipset))
+				if (!IS_NVA3F(device->chipset))
 					gr_def(ctx, base + 0x54c, 0x003fe006);
 				else
 					gr_def(ctx, base + 0x54c, 0x003fe007);
 				gr_def(ctx, base + 0x550, 0x003fe000);
 			}
 
-			if (dev_priv->chipset < 0xa0)
+			if (device->chipset < 0xa0)
 				offset = base + 0xa00;
 			else
 				offset = base + 0x680;
 			cp_ctx(ctx, offset, 1);
 			gr_def(ctx, offset, 0x00404040);
 
-			if (dev_priv->chipset < 0xa0)
+			if (device->chipset < 0xa0)
 				offset = base + 0xe00;
 			else
 				offset = base + 0x700;
 			cp_ctx(ctx, offset, 2);
-			if (dev_priv->chipset < 0xa0)
+			if (device->chipset < 0xa0)
 				gr_def(ctx, offset, 0x0077f005);
-			else if (dev_priv->chipset == 0xa5)
+			else if (device->chipset == 0xa5)
 				gr_def(ctx, offset, 0x6cf7f007);
-			else if (dev_priv->chipset == 0xa8)
+			else if (device->chipset == 0xa8)
 				gr_def(ctx, offset, 0x6cfff007);
-			else if (dev_priv->chipset == 0xac)
+			else if (device->chipset == 0xac)
 				gr_def(ctx, offset, 0x0cfff007);
 			else
 				gr_def(ctx, offset, 0x0cf7f007);
-			if (dev_priv->chipset == 0x50)
+			if (device->chipset == 0x50)
 				gr_def(ctx, offset + 0x4, 0x00007fff);
-			else if (dev_priv->chipset < 0xa0)
+			else if (device->chipset < 0xa0)
 				gr_def(ctx, offset + 0x4, 0x003f7fff);
 			else
 				gr_def(ctx, offset + 0x4, 0x02bf7fff);
 			cp_ctx(ctx, offset + 0x2c, 1);
-			if (dev_priv->chipset == 0x50) {
+			if (device->chipset == 0x50) {
 				cp_ctx(ctx, offset + 0x50, 9);
 				gr_def(ctx, offset + 0x54, 0x000003ff);
 				gr_def(ctx, offset + 0x58, 0x00000003);
@@ -785,7 +766,7 @@ nv50_graph_construct_mmio(struct nouveau_grctx *ctx)
 				gr_def(ctx, offset + 0x64, 0x0000001f);
 				gr_def(ctx, offset + 0x68, 0x0000000f);
 				gr_def(ctx, offset + 0x6c, 0x0000000f);
-			} else if (dev_priv->chipset < 0xa0) {
+			} else if (device->chipset < 0xa0) {
 				cp_ctx(ctx, offset + 0x50, 1);
 				cp_ctx(ctx, offset + 0x70, 1);
 			} else {
@@ -797,7 +778,7 @@ nv50_graph_construct_mmio(struct nouveau_grctx *ctx)
 }
 
 static void
-dd_emit(struct nouveau_grctx *ctx, int num, uint32_t val) {
+dd_emit(struct nouveau_grctx *ctx, int num, u32 val) {
 	int i;
 	if (val && ctx->mode == NOUVEAU_GRCTX_VALS)
 		for (i = 0; i < num; i++)
@@ -808,7 +789,7 @@ dd_emit(struct nouveau_grctx *ctx, int num, uint32_t val) {
 static void
 nv50_graph_construct_mmio_ddata(struct nouveau_grctx *ctx)
 {
-	struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
+	struct nouveau_device *device = ctx->device;
 	int base, num;
 	base = ctx->ctxvals_pos;
 
@@ -822,7 +803,7 @@ nv50_graph_construct_mmio_ddata(struct nouveau_grctx *ctx)
 	dd_emit(ctx, 1, 1);	/* 00000001 SRC_LINEAR #1 */
 	dd_emit(ctx, 1, 0);	/* 000000ff SRC_ADDRESS_HIGH */
 	dd_emit(ctx, 1, 0);	/* 00000001 SRC_SRGB */
-	if (dev_priv->chipset >= 0x94)
+	if (device->chipset >= 0x94)
 		dd_emit(ctx, 1, 0);	/* 00000003 eng2d UNK0258 */
 	dd_emit(ctx, 1, 1);	/* 00000fff SRC_DEPTH */
 	dd_emit(ctx, 1, 0x100);	/* 0000ffff SRC_HEIGHT */
@@ -851,7 +832,7 @@ nv50_graph_construct_mmio_ddata(struct nouveau_grctx *ctx)
 	dd_emit(ctx, 1, 1);		/* 0000007f BLOCKDIM_Z */
 	dd_emit(ctx, 1, 4);		/* 000000ff CP_REG_ALLOC_TEMP */
 	dd_emit(ctx, 1, 1);		/* 00000001 BLOCKDIM_DIRTY */
-	if (IS_NVA3F(dev_priv->chipset))
+	if (IS_NVA3F(device->chipset))
 		dd_emit(ctx, 1, 0);	/* 00000003 UNK03E8 */
 	dd_emit(ctx, 1, 1);		/* 0000007f BLOCK_ALLOC_HALFWARPS */
 	dd_emit(ctx, 1, 1);		/* 00000007 LOCAL_WARPS_NO_CLAMP */
@@ -863,7 +844,7 @@ nv50_graph_construct_mmio_ddata(struct nouveau_grctx *ctx)
 	dd_emit(ctx, 1, 1);		/* 000007ff BLOCK_ALLOC_THREADS */
 
 	/* compat 2d state */
-	if (dev_priv->chipset == 0x50) {
+	if (device->chipset == 0x50) {
 		dd_emit(ctx, 4, 0);		/* 0000ffff clip X, Y, W, H */
 
 		dd_emit(ctx, 1, 1);		/* ffffffff chroma COLOR_FORMAT */
@@ -923,7 +904,7 @@ nv50_graph_construct_mmio_ddata(struct nouveau_grctx *ctx)
 	dd_emit(ctx, 1, 0x100);		/* ffffffff m2mf TILING_PITCH_IN */
 
 	/* more compat 2d state */
-	if (dev_priv->chipset == 0x50) {
+	if (device->chipset == 0x50) {
 		dd_emit(ctx, 1, 1);		/* ffffffff line COLOR_FORMAT */
 		dd_emit(ctx, 1, 0);		/* ffffffff line OPERATION */
 
@@ -957,18 +938,18 @@ nv50_graph_construct_mmio_ddata(struct nouveau_grctx *ctx)
 	dd_emit(ctx, 1, 0);		/* 000000ff UNK12B0_2 */
 	dd_emit(ctx, 1, 0);		/* 0000000f FP_TEXTURES_LOG2 */
 	dd_emit(ctx, 1, 0);		/* 0000000f FP_SAMPLERS_LOG2 */
-	if (IS_NVA3F(dev_priv->chipset)) {
+	if (IS_NVA3F(device->chipset)) {
 		dd_emit(ctx, 1, 0);	/* ffffffff */
 		dd_emit(ctx, 1, 0);	/* 0000007f MULTISAMPLE_SAMPLES_LOG2 */
 	} else {
 		dd_emit(ctx, 1, 0);	/* 0000000f MULTISAMPLE_SAMPLES_LOG2 */
 	}
 	dd_emit(ctx, 1, 0xc);		/* 000000ff SEMANTIC_COLOR.BFC0_ID */
-	if (dev_priv->chipset != 0x50)
+	if (device->chipset != 0x50)
 		dd_emit(ctx, 1, 0);	/* 00000001 SEMANTIC_COLOR.CLMP_EN */
 	dd_emit(ctx, 1, 8);		/* 000000ff SEMANTIC_COLOR.COLR_NR */
 	dd_emit(ctx, 1, 0x14);		/* 000000ff SEMANTIC_COLOR.FFC0_ID */
-	if (dev_priv->chipset == 0x50) {
+	if (device->chipset == 0x50) {
 		dd_emit(ctx, 1, 0);	/* 000000ff SEMANTIC_LAYER */
 		dd_emit(ctx, 1, 0);	/* 00000001 */
 	} else {
@@ -994,7 +975,7 @@ nv50_graph_construct_mmio_ddata(struct nouveau_grctx *ctx)
 	dd_emit(ctx, 8, 0);		/* ffffffff RT_ADDRESS_LOW */
 	dd_emit(ctx, 1, 0xcf);		/* 000000ff RT_FORMAT */
 	dd_emit(ctx, 7, 0);		/* 000000ff RT_FORMAT */
-	if (dev_priv->chipset != 0x50)
+	if (device->chipset != 0x50)
 		dd_emit(ctx, 3, 0);	/* 1, 1, 1 */
 	else
 		dd_emit(ctx, 2, 0);	/* 1, 1 */
@@ -1002,15 +983,15 @@ nv50_graph_construct_mmio_ddata(struct nouveau_grctx *ctx)
 	dd_emit(ctx, 1, 0x80);		/* 0000ffff GP_VERTEX_OUTPUT_COUNT*/
 	dd_emit(ctx, 1, 4);		/* 000000ff GP_REG_ALLOC_RESULT */
 	dd_emit(ctx, 1, 4);		/* 000000ff GP_RESULT_MAP_SIZE */
-	if (IS_NVA3F(dev_priv->chipset)) {
+	if (IS_NVA3F(device->chipset)) {
 		dd_emit(ctx, 1, 3);	/* 00000003 */
 		dd_emit(ctx, 1, 0);	/* 00000001 UNK1418. Alone. */
 	}
-	if (dev_priv->chipset != 0x50)
+	if (device->chipset != 0x50)
 		dd_emit(ctx, 1, 3);	/* 00000003 UNK15AC */
 	dd_emit(ctx, 1, 1);		/* ffffffff RASTERIZE_ENABLE */
 	dd_emit(ctx, 1, 0);		/* 00000001 FP_CONTROL.EXPORTS_Z */
-	if (dev_priv->chipset != 0x50)
+	if (device->chipset != 0x50)
 		dd_emit(ctx, 1, 0);	/* 00000001 FP_CONTROL.MULTIPLE_RESULTS */
 	dd_emit(ctx, 1, 0x12);		/* 000000ff FP_INTERPOLANT_CTRL.COUNT */
 	dd_emit(ctx, 1, 0x10);		/* 000000ff FP_INTERPOLANT_CTRL.COUNT_NONFLAT */
@@ -1022,16 +1003,16 @@ nv50_graph_construct_mmio_ddata(struct nouveau_grctx *ctx)
 	dd_emit(ctx, 1, 4);		/* 000000ff FP_RESULT_COUNT */
 	dd_emit(ctx, 1, 2);		/* ffffffff REG_MODE */
 	dd_emit(ctx, 1, 4);		/* 000000ff FP_REG_ALLOC_TEMP */
-	if (dev_priv->chipset >= 0xa0)
+	if (device->chipset >= 0xa0)
 		dd_emit(ctx, 1, 0);	/* ffffffff */
 	dd_emit(ctx, 1, 0);		/* 00000001 GP_BUILTIN_RESULT_EN.LAYER_IDX */
 	dd_emit(ctx, 1, 0);		/* ffffffff STRMOUT_ENABLE */
 	dd_emit(ctx, 1, 0x3fffff);	/* 003fffff TIC_LIMIT */
 	dd_emit(ctx, 1, 0x1fff);	/* 000fffff TSC_LIMIT */
 	dd_emit(ctx, 1, 0);		/* 00000001 VERTEX_TWO_SIDE_ENABLE*/
-	if (dev_priv->chipset != 0x50)
+	if (device->chipset != 0x50)
 		dd_emit(ctx, 8, 0);	/* 00000001 */
-	if (dev_priv->chipset >= 0xa0) {
+	if (device->chipset >= 0xa0) {
 		dd_emit(ctx, 1, 1);	/* 00000007 VTX_ATTR_DEFINE.COMP */
 		dd_emit(ctx, 1, 1);	/* 00000007 VTX_ATTR_DEFINE.SIZE */
 		dd_emit(ctx, 1, 2);	/* 00000007 VTX_ATTR_DEFINE.TYPE */
@@ -1042,20 +1023,20 @@ nv50_graph_construct_mmio_ddata(struct nouveau_grctx *ctx)
 	dd_emit(ctx, 1, 1);		/* 00000001 ZETA_ENABLE */
 	dd_emit(ctx, 1, 0);		/* 0000000f VP_TEXTURES_LOG2 */
 	dd_emit(ctx, 1, 0);		/* 0000000f VP_SAMPLERS_LOG2 */
-	if (IS_NVA3F(dev_priv->chipset))
+	if (IS_NVA3F(device->chipset))
 		dd_emit(ctx, 1, 0);	/* 00000001 */
 	dd_emit(ctx, 1, 2);		/* 00000003 POLYGON_MODE_BACK */
-	if (dev_priv->chipset >= 0xa0)
+	if (device->chipset >= 0xa0)
 		dd_emit(ctx, 1, 0);	/* 00000003 VTX_ATTR_DEFINE.SIZE - 1 */
 	dd_emit(ctx, 1, 0);		/* 0000ffff CB_ADDR_INDEX */
-	if (dev_priv->chipset >= 0xa0)
+	if (device->chipset >= 0xa0)
 		dd_emit(ctx, 1, 0);	/* 00000003 */
 	dd_emit(ctx, 1, 0);		/* 00000001 CULL_FACE_ENABLE */
 	dd_emit(ctx, 1, 1);		/* 00000003 CULL_FACE */
 	dd_emit(ctx, 1, 0);		/* 00000001 FRONT_FACE */
 	dd_emit(ctx, 1, 2);		/* 00000003 POLYGON_MODE_FRONT */
 	dd_emit(ctx, 1, 0x1000);	/* 00007fff UNK141C */
-	if (dev_priv->chipset != 0x50) {
+	if (device->chipset != 0x50) {
 		dd_emit(ctx, 1, 0xe00);		/* 7fff */
 		dd_emit(ctx, 1, 0x1000);	/* 7fff */
 		dd_emit(ctx, 1, 0x1e00);	/* 7fff */
@@ -1070,10 +1051,10 @@ nv50_graph_construct_mmio_ddata(struct nouveau_grctx *ctx)
 	dd_emit(ctx, 1, 0);		/* 00000001 VTX_ATTR_MASK_UNK0 nonempty */
 	dd_emit(ctx, 1, 0);		/* 00000001 VTX_ATTR_MASK_UNK1 nonempty */
 	dd_emit(ctx, 1, 0x200);		/* 0003ffff GP_VERTEX_OUTPUT_COUNT*GP_REG_ALLOC_RESULT */
-	if (IS_NVA3F(dev_priv->chipset))
+	if (IS_NVA3F(device->chipset))
 		dd_emit(ctx, 1, 0x200);
 	dd_emit(ctx, 1, 0);		/* 00000001 */
-	if (dev_priv->chipset < 0xa0) {
+	if (device->chipset < 0xa0) {
 		dd_emit(ctx, 1, 1);	/* 00000001 */
 		dd_emit(ctx, 1, 0x70);	/* 000000ff */
 		dd_emit(ctx, 1, 0x80);	/* 000000ff */
@@ -1120,7 +1101,7 @@ nv50_graph_construct_mmio_ddata(struct nouveau_grctx *ctx)
 
 	num = ctx->ctxvals_pos - base;
 	ctx->ctxvals_pos = base;
-	if (IS_NVA3F(dev_priv->chipset))
+	if (IS_NVA3F(device->chipset))
 		cp_ctx(ctx, 0x404800, num);
 	else
 		cp_ctx(ctx, 0x405400, num);
@@ -1169,7 +1150,7 @@ nv50_graph_construct_mmio_ddata(struct nouveau_grctx *ctx)
  */
 
 static void
-xf_emit(struct nouveau_grctx *ctx, int num, uint32_t val) {
+xf_emit(struct nouveau_grctx *ctx, int num, u32 val) {
 	int i;
 	if (val && ctx->mode == NOUVEAU_GRCTX_VALS)
 		for (i = 0; i < num; i++)
@@ -1201,16 +1182,16 @@ static void nv50_graph_construct_xfer_tp(struct nouveau_grctx *ctx);
 static void
 nv50_graph_construct_xfer1(struct nouveau_grctx *ctx)
 {
-	struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
+	struct nouveau_device *device = ctx->device;
 	int i;
 	int offset;
 	int size = 0;
-	uint32_t units = nv_rd32 (ctx->dev, 0x1540);
+	u32 units = nv_rd32 (ctx->device, 0x1540);
 
 	offset = (ctx->ctxvals_pos+0x3f)&~0x3f;
 	ctx->ctxvals_base = offset;
 
-	if (dev_priv->chipset < 0xa0) {
+	if (device->chipset < 0xa0) {
 		/* Strand 0 */
 		ctx->ctxvals_pos = offset;
 		nv50_graph_construct_gene_dispatch(ctx);
@@ -1280,7 +1261,7 @@ nv50_graph_construct_xfer1(struct nouveau_grctx *ctx)
 
 		/* Strand 2 */
 		ctx->ctxvals_pos = offset + 2;
-		if (dev_priv->chipset == 0xa0)
+		if (device->chipset == 0xa0)
 			nv50_graph_construct_gene_unk14xx(ctx);
 		nv50_graph_construct_gene_unk24xx(ctx);
 		if ((ctx->ctxvals_pos-offset)/8 > size)
@@ -1327,7 +1308,7 @@ nv50_graph_construct_xfer1(struct nouveau_grctx *ctx)
 
 		/* Strand 7 */
 		ctx->ctxvals_pos = offset + 7;
-		if (dev_priv->chipset == 0xa0) {
+		if (device->chipset == 0xa0) {
 			if (units & (1 << 4))
 				nv50_graph_construct_xfer_tp(ctx);
 			if (units & (1 << 5))
@@ -1365,24 +1346,24 @@ static void
 nv50_graph_construct_gene_dispatch(struct nouveau_grctx *ctx)
 {
 	/* start of strand 0 */
-	struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
+	struct nouveau_device *device = ctx->device;
 	/* SEEK */
-	if (dev_priv->chipset == 0x50)
+	if (device->chipset == 0x50)
 		xf_emit(ctx, 5, 0);
-	else if (!IS_NVA3F(dev_priv->chipset))
+	else if (!IS_NVA3F(device->chipset))
 		xf_emit(ctx, 6, 0);
 	else
 		xf_emit(ctx, 4, 0);
 	/* SEEK */
 	/* the PGRAPH's internal FIFO */
-	if (dev_priv->chipset == 0x50)
+	if (device->chipset == 0x50)
 		xf_emit(ctx, 8*3, 0);
 	else
 		xf_emit(ctx, 0x100*3, 0);
 	/* and another bonus slot?!? */
 	xf_emit(ctx, 3, 0);
 	/* and YET ANOTHER bonus slot? */
-	if (IS_NVA3F(dev_priv->chipset))
+	if (IS_NVA3F(device->chipset))
 		xf_emit(ctx, 3, 0);
 	/* SEEK */
 	/* CTX_SWITCH: caches of gr objects bound to subchannels. 8 values, last used index */
@@ -1394,7 +1375,7 @@ nv50_graph_construct_gene_dispatch(struct nouveau_grctx *ctx)
 	/* SEEK */
 	xf_emit(ctx, 9, 0);
 	/* SEEK */
-	if (dev_priv->chipset < 0x90)
+	if (device->chipset < 0x90)
 		xf_emit(ctx, 4, 0);
 	/* SEEK */
 	xf_emit(ctx, 2, 0);
@@ -1407,9 +1388,9 @@ nv50_graph_construct_gene_dispatch(struct nouveau_grctx *ctx)
 	xf_emit(ctx, 6*2, 0);
 	xf_emit(ctx, 2, 0);
 	/* SEEK */
-	if (dev_priv->chipset == 0x50)
+	if (device->chipset == 0x50)
 		xf_emit(ctx, 0x1c, 0);
-	else if (dev_priv->chipset < 0xa0)
+	else if (device->chipset < 0xa0)
 		xf_emit(ctx, 0x1e, 0);
 	else
 		xf_emit(ctx, 0x22, 0);
@@ -1421,9 +1402,9 @@ static void
 nv50_graph_construct_gene_m2mf(struct nouveau_grctx *ctx)
 {
 	/* Strand 0, right after dispatch */
-	struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
+	struct nouveau_device *device = ctx->device;
 	int smallm2mf = 0;
-	if (dev_priv->chipset < 0x92 || dev_priv->chipset == 0x98)
+	if (device->chipset < 0x92 || device->chipset == 0x98)
 		smallm2mf = 1;
 	/* SEEK */
 	xf_emit (ctx, 1, 0);		/* DMA_NOTIFY instance >> 4 */
@@ -1472,10 +1453,10 @@ nv50_graph_construct_gene_m2mf(struct nouveau_grctx *ctx)
 static void
 nv50_graph_construct_gene_ccache(struct nouveau_grctx *ctx)
 {
-	struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
+	struct nouveau_device *device = ctx->device;
 	xf_emit(ctx, 2, 0);		/* RO */
 	xf_emit(ctx, 0x800, 0);		/* ffffffff */
-	switch (dev_priv->chipset) {
+	switch (device->chipset) {
 	case 0x50:
 	case 0x92:
 	case 0xa0:
@@ -1540,7 +1521,7 @@ nv50_graph_construct_gene_ccache(struct nouveau_grctx *ctx)
 static void
 nv50_graph_construct_gene_unk10xx(struct nouveau_grctx *ctx)
 {
-	struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
+	struct nouveau_device *device = ctx->device;
 	int i;
 	/* end of area 2 on pre-NVA0, area 1 on NVAx */
 	xf_emit(ctx, 1, 4);		/* 000000ff GP_RESULT_MAP_SIZE */
@@ -1550,14 +1531,14 @@ nv50_graph_construct_gene_unk10xx(struct nouveau_grctx *ctx)
 	xf_emit(ctx, 1, 4);		/* 000000ff GP_REG_ALLOC_RESULT */
 	xf_emit(ctx, 1, 0x80c14);	/* 01ffffff SEMANTIC_COLOR */
 	xf_emit(ctx, 1, 0);		/* 00000001 VERTEX_TWO_SIDE_ENABLE */
-	if (dev_priv->chipset == 0x50)
+	if (device->chipset == 0x50)
 		xf_emit(ctx, 1, 0x3ff);
 	else
 		xf_emit(ctx, 1, 0x7ff);	/* 000007ff */
 	xf_emit(ctx, 1, 0);		/* 111/113 */
 	xf_emit(ctx, 1, 0);		/* ffffffff tesla UNK1A30 */
 	for (i = 0; i < 8; i++) {
-		switch (dev_priv->chipset) {
+		switch (device->chipset) {
 		case 0x50:
 		case 0x86:
 		case 0x98:
@@ -1600,7 +1581,7 @@ nv50_graph_construct_gene_unk10xx(struct nouveau_grctx *ctx)
 static void
 nv50_graph_construct_gene_unk34xx(struct nouveau_grctx *ctx)
 {
-	struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
+	struct nouveau_device *device = ctx->device;
 	/* end of area 2 on pre-NVA0, area 1 on NVAx */
 	xf_emit(ctx, 1, 0);		/* 00000001 VIEWPORT_CLIP_RECTS_EN */
 	xf_emit(ctx, 1, 0);		/* 00000003 VIEWPORT_CLIP_MODE */
@@ -1614,9 +1595,9 @@ nv50_graph_construct_gene_unk34xx(struct nouveau_grctx *ctx)
 	xf_emit(ctx, 1, 0);		/* 00000003 WINDOW_ORIGIN */
 	xf_emit(ctx, 1, 0);		/* 00000007 */
 	xf_emit(ctx, 1, 0x1fe21);	/* 0001ffff tesla UNK0FAC */
-	if (dev_priv->chipset >= 0xa0)
+	if (device->chipset >= 0xa0)
 		xf_emit(ctx, 1, 0x0fac6881);
-	if (IS_NVA3F(dev_priv->chipset)) {
+	if (IS_NVA3F(device->chipset)) {
 		xf_emit(ctx, 1, 1);
 		xf_emit(ctx, 3, 0);
 	}
@@ -1625,9 +1606,9 @@ nv50_graph_construct_gene_unk34xx(struct nouveau_grctx *ctx)
 static void
 nv50_graph_construct_gene_unk14xx(struct nouveau_grctx *ctx)
 {
-	struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
+	struct nouveau_device *device = ctx->device;
 	/* middle of area 2 on pre-NVA0, beginning of area 2 on NVA0, area 7 on >NVA0 */
-	if (dev_priv->chipset != 0x50) {
+	if (device->chipset != 0x50) {
 		xf_emit(ctx, 5, 0);		/* ffffffff */
 		xf_emit(ctx, 1, 0x80c14);	/* 01ffffff SEMANTIC_COLOR */
 		xf_emit(ctx, 1, 0);		/* 00000001 */
@@ -1643,14 +1624,14 @@ nv50_graph_construct_gene_unk14xx(struct nouveau_grctx *ctx)
 	xf_emit(ctx, 1, 0);			/* 00000001 GP_ENABLE */
 	xf_emit(ctx, 1, 0x10);			/* 7f/ff VIEW_VOLUME_CLIP_CTRL */
 	xf_emit(ctx, 1, 0);			/* 000000ff VP_CLIP_DISTANCE_ENABLE */
-	if (dev_priv->chipset != 0x50)
+	if (device->chipset != 0x50)
 		xf_emit(ctx, 1, 0);		/* 3ff */
 	xf_emit(ctx, 1, 0);			/* 000000ff tesla UNK1940 */
 	xf_emit(ctx, 1, 0);			/* 00000001 tesla UNK0D7C */
 	xf_emit(ctx, 1, 0x804);			/* 00000fff SEMANTIC_CLIP */
 	xf_emit(ctx, 1, 1);			/* 00000001 VIEWPORT_TRANSFORM_EN */
 	xf_emit(ctx, 1, 0x1a);			/* 0000001f POLYGON_MODE */
-	if (dev_priv->chipset != 0x50)
+	if (device->chipset != 0x50)
 		xf_emit(ctx, 1, 0x7f);		/* 000000ff tesla UNK0FFC */
 	xf_emit(ctx, 1, 0);			/* ffffffff tesla UNK1A30 */
 	xf_emit(ctx, 1, 1);			/* 00000001 SHADE_MODEL */
@@ -1669,7 +1650,7 @@ nv50_graph_construct_gene_unk14xx(struct nouveau_grctx *ctx)
 	xf_emit(ctx, 4, 0);			/* ffffffff NOPERSPECTIVE_BITMAP */
 	xf_emit(ctx, 1, 0);			/* 00000001 tesla UNK1900 */
 	xf_emit(ctx, 1, 0);			/* 0000000f */
-	if (dev_priv->chipset == 0x50)
+	if (device->chipset == 0x50)
 		xf_emit(ctx, 1, 0x3ff);		/* 000003ff tesla UNK0D68 */
 	else
 		xf_emit(ctx, 1, 0x7ff);		/* 000007ff tesla UNK0D68 */
@@ -1704,11 +1685,11 @@ nv50_graph_construct_gene_unk14xx(struct nouveau_grctx *ctx)
 	xf_emit(ctx, 1, 0);			/* 00000001 LINE_STIPPLE_ENABLE */
 	xf_emit(ctx, 1, 0);			/* 00000001 LINE_SMOOTH_ENABLE */
 	xf_emit(ctx, 1, 0);			/* 00000007 MULTISAMPLE_SAMPLES_LOG2 */
-	if (IS_NVA3F(dev_priv->chipset))
+	if (IS_NVA3F(device->chipset))
 		xf_emit(ctx, 1, 0);		/* 00000001 */
 	xf_emit(ctx, 1, 0x1a);			/* 0000001f POLYGON_MODE */
 	xf_emit(ctx, 1, 0x10);			/* 000000ff VIEW_VOLUME_CLIP_CTRL */
-	if (dev_priv->chipset != 0x50) {
+	if (device->chipset != 0x50) {
 		xf_emit(ctx, 1, 0);		/* ffffffff */
 		xf_emit(ctx, 1, 0);		/* 00000001 */
 		xf_emit(ctx, 1, 0);		/* 000003ff */
@@ -1736,7 +1717,7 @@ nv50_graph_construct_gene_unk14xx(struct nouveau_grctx *ctx)
 static void
 nv50_graph_construct_gene_zcull(struct nouveau_grctx *ctx)
 {
-	struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
+	struct nouveau_device *device = ctx->device;
 	/* end of strand 0 on pre-NVA0, beginning of strand 6 on NVAx */
 	/* SEEK */
 	xf_emit(ctx, 1, 0x3f);		/* 0000003f UNK1590 */
@@ -1774,7 +1755,7 @@ nv50_graph_construct_gene_zcull(struct nouveau_grctx *ctx)
 	xf_emit(ctx, 1, 0);		/* 00000001 STENCIL_BACK_ENABLE */
 	xf_emit(ctx, 1, 0);		/* ffffffff CLEAR_DEPTH */
 	xf_emit(ctx, 1, 0);		/* 00000007 */
-	if (dev_priv->chipset != 0x50)
+	if (device->chipset != 0x50)
 		xf_emit(ctx, 1, 0);	/* 00000003 tesla UNK1108 */
 	xf_emit(ctx, 1, 0);		/* 00000001 SAMPLECNT_ENABLE */
 	xf_emit(ctx, 1, 0);		/* 0000000f ZETA_FORMAT */
@@ -1789,7 +1770,7 @@ nv50_graph_construct_gene_zcull(struct nouveau_grctx *ctx)
 	xf_emit(ctx, 1, 0);		/* 00000001 VIEWPORT_CLIP_RECTS_EN */
 	xf_emit(ctx, 1, 3);		/* 00000003 FP_CTRL_UNK196C */
 	xf_emit(ctx, 1, 0);		/* 00000003 tesla UNK1968 */
-	if (dev_priv->chipset != 0x50)
+	if (device->chipset != 0x50)
 		xf_emit(ctx, 1, 0);	/* 0fffffff tesla UNK1104 */
 	xf_emit(ctx, 1, 0);		/* 00000001 tesla UNK151C */
 }
@@ -1817,7 +1798,7 @@ nv50_graph_construct_gene_clipid(struct nouveau_grctx *ctx)
 static void
 nv50_graph_construct_gene_unk24xx(struct nouveau_grctx *ctx)
 {
-	struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
+	struct nouveau_device *device = ctx->device;
 	int i;
 	/* middle of strand 0 on pre-NVA0 [after m2mf], end of strand 2 on NVAx */
 	/* SEEK */
@@ -1829,7 +1810,7 @@ nv50_graph_construct_gene_unk24xx(struct nouveau_grctx *ctx)
 	xf_emit(ctx, 1, 4);		/* 0000007f VP_RESULT_MAP_SIZE */
 	xf_emit(ctx, 1, 4);		/* 000000ff GP_RESULT_MAP_SIZE */
 	/* SEEK */
-	if (IS_NVA3F(dev_priv->chipset)) {
+	if (IS_NVA3F(device->chipset)) {
 		xf_emit(ctx, 4, 0);	/* RO */
 		xf_emit(ctx, 0xe10, 0); /* 190 * 9: 8*ffffffff, 7ff */
 		xf_emit(ctx, 1, 0);	/* 1ff */
@@ -1860,7 +1841,7 @@ nv50_graph_construct_gene_unk24xx(struct nouveau_grctx *ctx)
 	xf_emit(ctx, 1, 4);		/* 000000ff GP_RESULT_MAP_SIZE */
 	xf_emit(ctx, 1, 4);		/* 0000007f VP_RESULT_MAP_SIZE */
 	xf_emit(ctx, 1, 0x8100c12);	/* 1fffffff FP_INTERPOLANT_CTRL */
-	if (dev_priv->chipset != 0x50)
+	if (device->chipset != 0x50)
 		xf_emit(ctx, 1, 3);	/* 00000003 tesla UNK1100 */
 	/* SEEK */
 	xf_emit(ctx, 1, 0);		/* 00000001 GP_ENABLE */
@@ -1869,7 +1850,7 @@ nv50_graph_construct_gene_unk24xx(struct nouveau_grctx *ctx)
 	xf_emit(ctx, 1, 0x80c14);	/* 01ffffff SEMANTIC_COLOR */
 	xf_emit(ctx, 1, 1);		/* 00000001 */
 	/* SEEK */
-	if (dev_priv->chipset >= 0xa0)
+	if (device->chipset >= 0xa0)
 		xf_emit(ctx, 2, 4);	/* 000000ff */
 	xf_emit(ctx, 1, 0x80c14);	/* 01ffffff SEMANTIC_COLOR */
 	xf_emit(ctx, 1, 0);		/* 00000001 VERTEX_TWO_SIDE_ENABLE */
@@ -1893,20 +1874,20 @@ nv50_graph_construct_gene_unk24xx(struct nouveau_grctx *ctx)
 	xf_emit(ctx, 0x10, 0);		/* 00ffffff POINT_COORD_REPLACE_MAP */
 	xf_emit(ctx, 1, 0);		/* 00000003 WINDOW_ORIGIN */
 	xf_emit(ctx, 1, 0x8100c12);	/* 1fffffff FP_INTERPOLANT_CTRL */
-	if (dev_priv->chipset != 0x50)
+	if (device->chipset != 0x50)
 		xf_emit(ctx, 1, 0);	/* 000003ff */
 }
 
 static void
 nv50_graph_construct_gene_vfetch(struct nouveau_grctx *ctx)
 {
-	struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
+	struct nouveau_device *device = ctx->device;
 	int acnt = 0x10, rep, i;
 	/* beginning of strand 1 on pre-NVA0, strand 3 on NVAx */
-	if (IS_NVA3F(dev_priv->chipset))
+	if (IS_NVA3F(device->chipset))
 		acnt = 0x20;
 	/* SEEK */
-	if (dev_priv->chipset >= 0xa0) {
+	if (device->chipset >= 0xa0) {
 		xf_emit(ctx, 1, 0);	/* ffffffff tesla UNK13A4 */
 		xf_emit(ctx, 1, 1);	/* 00000fff tesla UNK1318 */
 	}
@@ -1923,9 +1904,9 @@ nv50_graph_construct_gene_vfetch(struct nouveau_grctx *ctx)
 	xf_emit(ctx, 1, 0);		/* 0000ffff turing USER_PARAM_COUNT */
 	xf_emit(ctx, 1, 0);		/* ffffffff tesla UNK1A30 */
 	/* SEEK */
-	if (IS_NVA3F(dev_priv->chipset))
+	if (IS_NVA3F(device->chipset))
 		xf_emit(ctx, 0xb, 0);	/* RO */
-	else if (dev_priv->chipset >= 0xa0)
+	else if (device->chipset >= 0xa0)
 		xf_emit(ctx, 0x9, 0);	/* RO */
 	else
 		xf_emit(ctx, 0x8, 0);	/* RO */
@@ -1944,11 +1925,11 @@ nv50_graph_construct_gene_vfetch(struct nouveau_grctx *ctx)
 	xf_emit(ctx, 1, 4);		/* 000001ff UNK1A28 */
 	xf_emit(ctx, 1, 8);		/* 000001ff UNK0DF0 */
 	xf_emit(ctx, 1, 0);		/* 00000001 GP_ENABLE */
-	if (dev_priv->chipset == 0x50)
+	if (device->chipset == 0x50)
 		xf_emit(ctx, 1, 0x3ff);	/* 3ff tesla UNK0D68 */
 	else
 		xf_emit(ctx, 1, 0x7ff);	/* 7ff tesla UNK0D68 */
-	if (dev_priv->chipset == 0xa8)
+	if (device->chipset == 0xa8)
 		xf_emit(ctx, 1, 0x1e00);	/* 7fff */
 	/* SEEK */
 	xf_emit(ctx, 0xc, 0);		/* RO or close */
@@ -1956,13 +1937,13 @@ nv50_graph_construct_gene_vfetch(struct nouveau_grctx *ctx)
 	xf_emit(ctx, 1, 0xf);		/* ffffffff VP_ATTR_EN */
 	xf_emit(ctx, (acnt/8)-1, 0);	/* ffffffff VP_ATTR_EN */
 	xf_emit(ctx, 1, 0);		/* 0000000f VP_GP_BUILTIN_ATTR_EN */
-	if (dev_priv->chipset > 0x50 && dev_priv->chipset < 0xa0)
+	if (device->chipset > 0x50 && device->chipset < 0xa0)
 		xf_emit(ctx, 2, 0);	/* ffffffff */
 	else
 		xf_emit(ctx, 1, 0);	/* ffffffff */
 	xf_emit(ctx, 1, 0);		/* 00000003 tesla UNK0FD8 */
 	/* SEEK */
-	if (IS_NVA3F(dev_priv->chipset)) {
+	if (IS_NVA3F(device->chipset)) {
 		xf_emit(ctx, 0x10, 0);	/* 0? */
 		xf_emit(ctx, 2, 0);	/* weird... */
 		xf_emit(ctx, 2, 0);	/* RO */
@@ -1975,7 +1956,7 @@ nv50_graph_construct_gene_vfetch(struct nouveau_grctx *ctx)
 	xf_emit(ctx, 1, 0);		/* ffffffff VB_ELEMENT_BASE */
 	xf_emit(ctx, 1, 0);		/* ffffffff UNK1438 */
 	xf_emit(ctx, acnt, 0);		/* 1 tesla UNK1000 */
-	if (dev_priv->chipset >= 0xa0)
+	if (device->chipset >= 0xa0)
 		xf_emit(ctx, 1, 0);	/* ffffffff tesla UNK1118? */
 	/* SEEK */
 	xf_emit(ctx, acnt, 0);		/* ffffffff VERTEX_ARRAY_UNK90C */
@@ -2013,23 +1994,23 @@ nv50_graph_construct_gene_vfetch(struct nouveau_grctx *ctx)
 	xf_emit(ctx, acnt, 0);		/* 000000ff VERTEX_LIMIT_HIGH */
 	xf_emit(ctx, 3, 0);		/* f/1f */
 	/* SEEK */
-	if (IS_NVA3F(dev_priv->chipset)) {
+	if (IS_NVA3F(device->chipset)) {
 		xf_emit(ctx, acnt, 0);		/* f */
 		xf_emit(ctx, 3, 0);		/* f/1f */
 	}
 	/* SEEK */
-	if (IS_NVA3F(dev_priv->chipset))
+	if (IS_NVA3F(device->chipset))
 		xf_emit(ctx, 2, 0);	/* RO */
 	else
 		xf_emit(ctx, 5, 0);	/* RO */
 	/* SEEK */
 	xf_emit(ctx, 1, 0);		/* ffff DMA_VTXBUF */
 	/* SEEK */
-	if (dev_priv->chipset < 0xa0) {
+	if (device->chipset < 0xa0) {
 		xf_emit(ctx, 0x41, 0);	/* RO */
 		/* SEEK */
 		xf_emit(ctx, 0x11, 0);	/* RO */
-	} else if (!IS_NVA3F(dev_priv->chipset))
+	} else if (!IS_NVA3F(device->chipset))
 		xf_emit(ctx, 0x50, 0);	/* RO */
 	else
 		xf_emit(ctx, 0x58, 0);	/* RO */
@@ -2041,7 +2022,7 @@ nv50_graph_construct_gene_vfetch(struct nouveau_grctx *ctx)
 	xf_emit(ctx, acnt*4, 0);	/* ffffffff VTX_ATTR */
 	xf_emit(ctx, 4, 0);		/* f/1f, 0, 0, 0 */
 	/* SEEK */
-	if (IS_NVA3F(dev_priv->chipset))
+	if (IS_NVA3F(device->chipset))
 		xf_emit(ctx, 0x1d, 0);	/* RO */
 	else
 		xf_emit(ctx, 0x16, 0);	/* RO */
@@ -2049,21 +2030,21 @@ nv50_graph_construct_gene_vfetch(struct nouveau_grctx *ctx)
 	xf_emit(ctx, 1, 0xf);		/* ffffffff VP_ATTR_EN */
 	xf_emit(ctx, (acnt/8)-1, 0);	/* ffffffff VP_ATTR_EN */
 	/* SEEK */
-	if (dev_priv->chipset < 0xa0)
+	if (device->chipset < 0xa0)
 		xf_emit(ctx, 8, 0);	/* RO */
-	else if (IS_NVA3F(dev_priv->chipset))
+	else if (IS_NVA3F(device->chipset))
 		xf_emit(ctx, 0xc, 0);	/* RO */
 	else
 		xf_emit(ctx, 7, 0);	/* RO */
 	/* SEEK */
 	xf_emit(ctx, 0xa, 0);		/* RO */
-	if (dev_priv->chipset == 0xa0)
+	if (device->chipset == 0xa0)
 		rep = 0xc;
 	else
 		rep = 4;
 	for (i = 0; i < rep; i++) {
 		/* SEEK */
-		if (IS_NVA3F(dev_priv->chipset))
+		if (IS_NVA3F(device->chipset))
 			xf_emit(ctx, 0x20, 0);	/* ffffffff */
 		xf_emit(ctx, 0x200, 0);	/* ffffffff */
 		xf_emit(ctx, 4, 0);	/* 7f/ff, 0, 0, 0 */
@@ -2077,7 +2058,7 @@ nv50_graph_construct_gene_vfetch(struct nouveau_grctx *ctx)
 	xf_emit(ctx, 1, 0);		/* 0000000f VP_GP_BUILTIN_ATTR_EN */
 	xf_emit(ctx, 1, 0);		/* ffffffff tesla UNK1A30 */
 	/* SEEK */
-	if (IS_NVA3F(dev_priv->chipset))
+	if (IS_NVA3F(device->chipset))
 		xf_emit(ctx, 7, 0);	/* weird... */
 	else
 		xf_emit(ctx, 5, 0);	/* weird... */
@@ -2086,13 +2067,13 @@ nv50_graph_construct_gene_vfetch(struct nouveau_grctx *ctx)
 static void
 nv50_graph_construct_gene_eng2d(struct nouveau_grctx *ctx)
 {
-	struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
+	struct nouveau_device *device = ctx->device;
 	/* middle of strand 1 on pre-NVA0 [after vfetch], middle of strand 6 on NVAx */
 	/* SEEK */
 	xf_emit(ctx, 2, 0);		/* 0001ffff CLIP_X, CLIP_Y */
 	xf_emit(ctx, 2, 0);		/* 0000ffff CLIP_W, CLIP_H */
 	xf_emit(ctx, 1, 0);		/* 00000001 CLIP_ENABLE */
-	if (dev_priv->chipset < 0xa0) {
+	if (device->chipset < 0xa0) {
 		/* this is useless on everything but the original NV50,
 		 * guess they forgot to nuke it. Or just didn't bother. */
 		xf_emit(ctx, 2, 0);	/* 0000ffff IFC_CLIP_X, Y */
@@ -2148,7 +2129,7 @@ nv50_graph_construct_gene_eng2d(struct nouveau_grctx *ctx)
 static void
 nv50_graph_construct_gene_csched(struct nouveau_grctx *ctx)
 {
-	struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
+	struct nouveau_device *device = ctx->device;
 	/* middle of strand 1 on pre-NVA0 [after eng2d], middle of strand 0 on NVAx */
 	/* SEEK */
 	xf_emit(ctx, 2, 0);		/* 00007fff WINDOW_OFFSET_XY... what is it doing here??? */
@@ -2173,7 +2154,7 @@ nv50_graph_construct_gene_csched(struct nouveau_grctx *ctx)
 	xf_emit(ctx, 1, 2);		/* 00000003 REG_MODE */
 	/* SEEK */
 	xf_emit(ctx, 0x40, 0);		/* ffffffff USER_PARAM */
-	switch (dev_priv->chipset) {
+	switch (device->chipset) {
 	case 0x50:
 	case 0x92:
 		xf_emit(ctx, 8, 0);	/* 7, 0, 0, 0, ... */
@@ -2247,7 +2228,7 @@ nv50_graph_construct_gene_csched(struct nouveau_grctx *ctx)
 static void
 nv50_graph_construct_gene_unk1cxx(struct nouveau_grctx *ctx)
 {
-	struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
+	struct nouveau_device *device = ctx->device;
 	xf_emit(ctx, 2, 0);		/* 00007fff WINDOW_OFFSET_XY */
 	xf_emit(ctx, 1, 0x3f800000);	/* ffffffff LINE_WIDTH */
 	xf_emit(ctx, 1, 0);		/* 00000001 LINE_SMOOTH_ENABLE */
@@ -2277,9 +2258,9 @@ nv50_graph_construct_gene_unk1cxx(struct nouveau_grctx *ctx)
 	xf_emit(ctx, 1, 4);		/* 00000007 FP_CONTROL */
 	xf_emit(ctx, 1, 0);		/* 00000001 ALPHA_TEST_ENABLE */
 	xf_emit(ctx, 1, 0);		/* 00000007 ALPHA_TEST_FUNC */
-	if (IS_NVA3F(dev_priv->chipset))
+	if (IS_NVA3F(device->chipset))
 		xf_emit(ctx, 1, 3);	/* 00000003 UNK16B4 */
-	else if (dev_priv->chipset >= 0xa0)
+	else if (device->chipset >= 0xa0)
 		xf_emit(ctx, 1, 1);	/* 00000001 UNK16B4 */
 	xf_emit(ctx, 1, 0);		/* 00000003 MULTISAMPLE_CTRL */
 	xf_emit(ctx, 1, 0);		/* 00000003 tesla UNK0F90 */
@@ -2293,11 +2274,11 @@ nv50_graph_construct_gene_unk1cxx(struct nouveau_grctx *ctx)
 	xf_emit(ctx, 1, 0);		/* ffffffff POINT_SIZE */
 	xf_emit(ctx, 1, 0);		/* 00000001 */
 	xf_emit(ctx, 1, 0);		/* 00000007 tesla UNK0FB4 */
-	if (dev_priv->chipset != 0x50) {
+	if (device->chipset != 0x50) {
 		xf_emit(ctx, 1, 0);	/* 3ff */
 		xf_emit(ctx, 1, 1);	/* 00000001 tesla UNK1110 */
 	}
-	if (IS_NVA3F(dev_priv->chipset))
+	if (IS_NVA3F(device->chipset))
 		xf_emit(ctx, 1, 0);	/* 00000003 tesla UNK1928 */
 	xf_emit(ctx, 0x10, 0);		/* ffffffff DEPTH_RANGE_NEAR */
 	xf_emit(ctx, 0x10, 0x3f800000);	/* ffffffff DEPTH_RANGE_FAR */
@@ -2316,11 +2297,11 @@ nv50_graph_construct_gene_unk1cxx(struct nouveau_grctx *ctx)
 	xf_emit(ctx, 1, 1);		/* 00000001 ZETA_ENABLE */
 	xf_emit(ctx, 1, 0);		/* 00000001 VERTEX_TWO_SIDE_ENABLE */
 	xf_emit(ctx, 4, 0xffff);	/* 0000ffff MSAA_MASK */
-	if (dev_priv->chipset != 0x50)
+	if (device->chipset != 0x50)
 		xf_emit(ctx, 1, 3);	/* 00000003 tesla UNK1100 */
-	if (dev_priv->chipset < 0xa0)
+	if (device->chipset < 0xa0)
 		xf_emit(ctx, 0x1c, 0);	/* RO */
-	else if (IS_NVA3F(dev_priv->chipset))
+	else if (IS_NVA3F(device->chipset))
 		xf_emit(ctx, 0x9, 0);
 	xf_emit(ctx, 1, 0);		/* 00000001 UNK1534 */
 	xf_emit(ctx, 1, 0);		/* 00000001 LINE_SMOOTH_ENABLE */
@@ -2328,13 +2309,13 @@ nv50_graph_construct_gene_unk1cxx(struct nouveau_grctx *ctx)
 	xf_emit(ctx, 1, 0x00ffff00);	/* 00ffffff LINE_STIPPLE_PATTERN */
 	xf_emit(ctx, 1, 0x1a);		/* 0000001f POLYGON_MODE */
 	xf_emit(ctx, 1, 0);		/* 00000003 WINDOW_ORIGIN */
-	if (dev_priv->chipset != 0x50) {
+	if (device->chipset != 0x50) {
 		xf_emit(ctx, 1, 3);	/* 00000003 tesla UNK1100 */
 		xf_emit(ctx, 1, 0);	/* 3ff */
 	}
 	/* XXX: the following block could belong either to unk1cxx, or
 	 * to STRMOUT. Rather hard to tell. */
-	if (dev_priv->chipset < 0xa0)
+	if (device->chipset < 0xa0)
 		xf_emit(ctx, 0x25, 0);
 	else
 		xf_emit(ctx, 0x3b, 0);
@@ -2343,18 +2324,18 @@ nv50_graph_construct_gene_unk1cxx(struct nouveau_grctx *ctx)
 static void
 nv50_graph_construct_gene_strmout(struct nouveau_grctx *ctx)
 {
-	struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
+	struct nouveau_device *device = ctx->device;
 	xf_emit(ctx, 1, 0x102);		/* 0000ffff STRMOUT_BUFFER_CTRL */
 	xf_emit(ctx, 1, 0);		/* ffffffff STRMOUT_PRIMITIVE_COUNT */
 	xf_emit(ctx, 4, 4);		/* 000000ff STRMOUT_NUM_ATTRIBS */
-	if (dev_priv->chipset >= 0xa0) {
+	if (device->chipset >= 0xa0) {
 		xf_emit(ctx, 4, 0);	/* ffffffff UNK1A8C */
 		xf_emit(ctx, 4, 0);	/* ffffffff UNK1780 */
 	}
 	xf_emit(ctx, 1, 4);		/* 000000ff GP_RESULT_MAP_SIZE */
 	xf_emit(ctx, 1, 4);		/* 0000007f VP_RESULT_MAP_SIZE */
 	xf_emit(ctx, 1, 0);		/* 00000001 GP_ENABLE */
-	if (dev_priv->chipset == 0x50)
+	if (device->chipset == 0x50)
 		xf_emit(ctx, 1, 0x3ff);	/* 000003ff tesla UNK0D68 */
 	else
 		xf_emit(ctx, 1, 0x7ff);	/* 000007ff tesla UNK0D68 */
@@ -2365,7 +2346,7 @@ nv50_graph_construct_gene_strmout(struct nouveau_grctx *ctx)
 	xf_emit(ctx, 4, 0);		/* 000000ff STRMOUT_ADDRESS_HIGH */
 	xf_emit(ctx, 4, 0);		/* ffffffff STRMOUT_ADDRESS_LOW */
 	xf_emit(ctx, 4, 4);		/* 000000ff STRMOUT_NUM_ATTRIBS */
-	if (dev_priv->chipset >= 0xa0) {
+	if (device->chipset >= 0xa0) {
 		xf_emit(ctx, 4, 0);	/* ffffffff UNK1A8C */
 		xf_emit(ctx, 4, 0);	/* ffffffff UNK1780 */
 	}
@@ -2385,12 +2366,12 @@ nv50_graph_construct_gene_strmout(struct nouveau_grctx *ctx)
 static void
 nv50_graph_construct_gene_ropm1(struct nouveau_grctx *ctx)
 {
-	struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
+	struct nouveau_device *device = ctx->device;
 	xf_emit(ctx, 1, 0x4e3bfdf);	/* ffffffff UNK0D64 */
 	xf_emit(ctx, 1, 0x4e3bfdf);	/* ffffffff UNK0DF4 */
 	xf_emit(ctx, 1, 0);		/* 00000007 */
 	xf_emit(ctx, 1, 0);		/* 000003ff */
-	if (IS_NVA3F(dev_priv->chipset))
+	if (IS_NVA3F(device->chipset))
 		xf_emit(ctx, 1, 0x11);	/* 000000ff tesla UNK1968 */
 	xf_emit(ctx, 1, 0);		/* ffffffff tesla UNK1A3C */
 }
@@ -2398,7 +2379,7 @@ nv50_graph_construct_gene_ropm1(struct nouveau_grctx *ctx)
 static void
 nv50_graph_construct_gene_ropm2(struct nouveau_grctx *ctx)
 {
-	struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
+	struct nouveau_device *device = ctx->device;
 	/* SEEK */
 	xf_emit(ctx, 1, 0);		/* 0000ffff DMA_QUERY */
 	xf_emit(ctx, 1, 0x0fac6881);	/* 0fffffff RT_CONTROL */
@@ -2416,7 +2397,7 @@ nv50_graph_construct_gene_ropm2(struct nouveau_grctx *ctx)
 	xf_emit(ctx, 1, 0);		/* 00000001 eng2d UNK260 */
 	xf_emit(ctx, 1, 0);		/* ff/3ff */
 	xf_emit(ctx, 1, 0);		/* 00000007 */
-	if (IS_NVA3F(dev_priv->chipset))
+	if (IS_NVA3F(device->chipset))
 		xf_emit(ctx, 1, 0x11);	/* 000000ff tesla UNK1968 */
 	xf_emit(ctx, 1, 0);		/* ffffffff tesla UNK1A3C */
 }
@@ -2424,11 +2405,11 @@ nv50_graph_construct_gene_ropm2(struct nouveau_grctx *ctx)
 static void
 nv50_graph_construct_gene_ropc(struct nouveau_grctx *ctx)
 {
-	struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
+	struct nouveau_device *device = ctx->device;
 	int magic2;
-	if (dev_priv->chipset == 0x50) {
+	if (device->chipset == 0x50) {
 		magic2 = 0x00003e60;
-	} else if (!IS_NVA3F(dev_priv->chipset)) {
+	} else if (!IS_NVA3F(device->chipset)) {
 		magic2 = 0x001ffe67;
 	} else {
 		magic2 = 0x00087e67;
@@ -2446,14 +2427,14 @@ nv50_graph_construct_gene_ropc(struct nouveau_grctx *ctx)
 	xf_emit(ctx, 1, 0);		/* 00000007 DEPTH_TEST_FUNC */
 	xf_emit(ctx, 1, 0);		/* 00000001 DEPTH_TEST_ENABLE */
 	xf_emit(ctx, 1, 0);		/* 00000001 DEPTH_WRITE_ENABLE */
-	if (IS_NVA3F(dev_priv->chipset))
+	if (IS_NVA3F(device->chipset))
 		xf_emit(ctx, 1, 1);	/* 0000001f tesla UNK169C */
 	xf_emit(ctx, 1, 0);		/* 00000007 STENCIL_FRONT_FUNC_FUNC */
 	xf_emit(ctx, 1, 0);		/* 000000ff STENCIL_FRONT_FUNC_MASK */
 	xf_emit(ctx, 1, 0);		/* 000000ff STENCIL_FRONT_MASK */
 	xf_emit(ctx, 3, 0);		/* 00000007 STENCIL_FRONT_OP_FAIL, ZFAIL, ZPASS */
 	xf_emit(ctx, 1, 0);		/* 00000001 STENCIL_FRONT_ENABLE */
-	if (dev_priv->chipset >= 0xa0 && !IS_NVAAF(dev_priv->chipset))
+	if (device->chipset >= 0xa0 && !IS_NVAAF(device->chipset))
 		xf_emit(ctx, 1, 0x15);	/* 000000ff */
 	xf_emit(ctx, 1, 0);		/* 00000001 STENCIL_BACK_ENABLE */
 	xf_emit(ctx, 1, 1);		/* 00000001 tesla UNK15B4 */
@@ -2462,14 +2443,14 @@ nv50_graph_construct_gene_ropc(struct nouveau_grctx *ctx)
 	xf_emit(ctx, 1, 0);		/* 0000000f ZETA_FORMAT */
 	xf_emit(ctx, 1, 1);		/* 00000001 ZETA_ENABLE */
 	xf_emit(ctx, 1, 0);		/* ffffffff tesla UNK1A3C */
-	if (dev_priv->chipset == 0x86 || dev_priv->chipset == 0x92 || dev_priv->chipset == 0x98 || dev_priv->chipset >= 0xa0) {
+	if (device->chipset == 0x86 || device->chipset == 0x92 || device->chipset == 0x98 || device->chipset >= 0xa0) {
 		xf_emit(ctx, 3, 0);	/* ff, ffffffff, ffffffff */
 		xf_emit(ctx, 1, 4);	/* 7 */
 		xf_emit(ctx, 1, 0x400);	/* fffffff */
 		xf_emit(ctx, 1, 0x300);	/* ffff */
 		xf_emit(ctx, 1, 0x1001);	/* 1fff */
-		if (dev_priv->chipset != 0xa0) {
-			if (IS_NVA3F(dev_priv->chipset))
+		if (device->chipset != 0xa0) {
+			if (IS_NVA3F(device->chipset))
 				xf_emit(ctx, 1, 0);	/* 0000000f UNK15C8 */
 			else
 				xf_emit(ctx, 1, 0x15);	/* ff */
@@ -2547,7 +2528,7 @@ nv50_graph_construct_gene_ropc(struct nouveau_grctx *ctx)
 	xf_emit(ctx, 1, 1);		/* 00000001 ZETA_ENABLE */
 	xf_emit(ctx, 1, 0);		/* ffffffff CLEAR_DEPTH */
 	xf_emit(ctx, 1, 1);		/* 00000001 tesla UNK19CC */
-	if (dev_priv->chipset >= 0xa0) {
+	if (device->chipset >= 0xa0) {
 		xf_emit(ctx, 2, 0);
 		xf_emit(ctx, 1, 0x1001);
 		xf_emit(ctx, 0xb, 0);
@@ -2564,7 +2545,7 @@ nv50_graph_construct_gene_ropc(struct nouveau_grctx *ctx)
 	xf_emit(ctx, 7, 0);		/* 0000000f COLOR_MASK */
 	xf_emit(ctx, 1, 0x11);		/* 3f/7f */
 	xf_emit(ctx, 1, 0);		/* 00000001 LOGIC_OP_ENABLE */
-	if (dev_priv->chipset != 0x50) {
+	if (device->chipset != 0x50) {
 		xf_emit(ctx, 1, 0);	/* 0000000f LOGIC_OP */
 		xf_emit(ctx, 1, 0);	/* 000000ff */
 	}
@@ -2581,7 +2562,7 @@ nv50_graph_construct_gene_ropc(struct nouveau_grctx *ctx)
 	xf_emit(ctx, 1, magic2);	/* 001fffff tesla UNK0F78 */
 	xf_emit(ctx, 1, 0);		/* ffffffff tesla UNK1A3C */
 	xf_emit(ctx, 1, 0x0fac6881);	/* 0fffffff RT_CONTROL */
-	if (IS_NVA3F(dev_priv->chipset)) {
+	if (IS_NVA3F(device->chipset)) {
 		xf_emit(ctx, 1, 0);	/* 00000001 tesla UNK12E4 */
 		xf_emit(ctx, 8, 1);	/* 00000007 IBLEND_EQUATION_RGB */
 		xf_emit(ctx, 8, 1);	/* 00000007 IBLEND_EQUATION_ALPHA */
@@ -2600,7 +2581,7 @@ nv50_graph_construct_gene_ropc(struct nouveau_grctx *ctx)
 		xf_emit(ctx, 1, 1);	/* 0000001f tesla UNK169C */
 		xf_emit(ctx, 1, 0);	/* 00000001 */
 		xf_emit(ctx, 1, 0);	/* 000003ff */
-	} else if (dev_priv->chipset >= 0xa0) {
+	} else if (device->chipset >= 0xa0) {
 		xf_emit(ctx, 2, 0);	/* 00000001 */
 		xf_emit(ctx, 1, 0);	/* 00000007 */
 		xf_emit(ctx, 1, 0);	/* 00000003 */
@@ -2614,7 +2595,7 @@ nv50_graph_construct_gene_ropc(struct nouveau_grctx *ctx)
 	xf_emit(ctx, 4, 0);		/* ffffffff CLEAR_COLOR */
 	xf_emit(ctx, 4, 0);		/* ffffffff BLEND_COLOR A R G B */
 	xf_emit(ctx, 1, 0);		/* 00000fff eng2d UNK2B0 */
-	if (dev_priv->chipset >= 0xa0)
+	if (device->chipset >= 0xa0)
 		xf_emit(ctx, 2, 0);	/* 00000001 */
 	xf_emit(ctx, 1, 0);		/* 000003ff */
 	xf_emit(ctx, 8, 0);		/* 00000001 BLEND_ENABLE */
@@ -2628,9 +2609,9 @@ nv50_graph_construct_gene_ropc(struct nouveau_grctx *ctx)
 	xf_emit(ctx, 1, 0);		/* 00000001 UNK19C0 */
 	xf_emit(ctx, 1, 0);		/* 00000001 LOGIC_OP_ENABLE */
 	xf_emit(ctx, 1, 0);		/* 0000000f LOGIC_OP */
-	if (dev_priv->chipset >= 0xa0)
+	if (device->chipset >= 0xa0)
 		xf_emit(ctx, 1, 0);	/* 00000001 UNK12E4? NVA3+ only? */
-	if (IS_NVA3F(dev_priv->chipset)) {
+	if (IS_NVA3F(device->chipset)) {
 		xf_emit(ctx, 8, 1);	/* 00000001 IBLEND_UNK00 */
 		xf_emit(ctx, 8, 1);	/* 00000007 IBLEND_EQUATION_RGB */
 		xf_emit(ctx, 8, 2);	/* 0000001f IBLEND_FUNC_SRC_RGB */
@@ -2659,9 +2640,9 @@ nv50_graph_construct_gene_ropc(struct nouveau_grctx *ctx)
 static void
 nv50_graph_construct_xfer_unk84xx(struct nouveau_grctx *ctx)
 {
-	struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
+	struct nouveau_device *device = ctx->device;
 	int magic3;
-	switch (dev_priv->chipset) {
+	switch (device->chipset) {
 	case 0x50:
 		magic3 = 0x1000;
 		break;
@@ -2681,16 +2662,16 @@ nv50_graph_construct_xfer_unk84xx(struct nouveau_grctx *ctx)
 	xf_emit(ctx, 1, 0);		/* 00000001 GP_ENABLE */
 	xf_emit(ctx, 1, 0);		/* ffffffff tesla UNK1A30 */
 	xf_emit(ctx, 1, 0);		/* 111/113[NVA0+] */
-	if (IS_NVA3F(dev_priv->chipset))
+	if (IS_NVA3F(device->chipset))
 		xf_emit(ctx, 0x1f, 0);	/* ffffffff */
-	else if (dev_priv->chipset >= 0xa0)
+	else if (device->chipset >= 0xa0)
 		xf_emit(ctx, 0x0f, 0);	/* ffffffff */
 	else
 		xf_emit(ctx, 0x10, 0);	/* fffffff VP_RESULT_MAP_1 up */
 	xf_emit(ctx, 2, 0);		/* f/1f[NVA3], fffffff/ffffffff[NVA0+] */
 	xf_emit(ctx, 1, 4);		/* 7f/ff VP_REG_ALLOC_RESULT */
 	xf_emit(ctx, 1, 4);		/* 7f/ff VP_RESULT_MAP_SIZE */
-	if (dev_priv->chipset >= 0xa0)
+	if (device->chipset >= 0xa0)
 		xf_emit(ctx, 1, 0x03020100);	/* ffffffff */
 	else
 		xf_emit(ctx, 1, 0x00608080);	/* fffffff VP_RESULT_MAP_0 */
@@ -2733,11 +2714,11 @@ nv50_graph_construct_xfer_unk84xx(struct nouveau_grctx *ctx)
 	xf_emit(ctx, 1, 0);		/* 00000001 GP_ENABLE */
 	xf_emit(ctx, 1, 0);		/* ffffffff tesla UNK1A30 */
 	xf_emit(ctx, 1, 0);		/* 111/113 */
-	if (dev_priv->chipset == 0x94 || dev_priv->chipset == 0x96)
+	if (device->chipset == 0x94 || device->chipset == 0x96)
 		xf_emit(ctx, 0x1020, 0);	/* 4 x (0x400 x 0xffffffff, ff, 0, 0, 0, 4 x ffffffff) */
-	else if (dev_priv->chipset < 0xa0)
+	else if (device->chipset < 0xa0)
 		xf_emit(ctx, 0xa20, 0);	/* 4 x (0x280 x 0xffffffff, ff, 0, 0, 0, 4 x ffffffff) */
-	else if (!IS_NVA3F(dev_priv->chipset))
+	else if (!IS_NVA3F(device->chipset))
 		xf_emit(ctx, 0x210, 0);	/* ffffffff */
 	else
 		xf_emit(ctx, 0x410, 0);	/* ffffffff */
@@ -2751,12 +2732,12 @@ nv50_graph_construct_xfer_unk84xx(struct nouveau_grctx *ctx)
 static void
 nv50_graph_construct_xfer_tprop(struct nouveau_grctx *ctx)
 {
-	struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
+	struct nouveau_device *device = ctx->device;
 	int magic1, magic2;
-	if (dev_priv->chipset == 0x50) {
+	if (device->chipset == 0x50) {
 		magic1 = 0x3ff;
 		magic2 = 0x00003e60;
-	} else if (!IS_NVA3F(dev_priv->chipset)) {
+	} else if (!IS_NVA3F(device->chipset)) {
 		magic1 = 0x7ff;
 		magic2 = 0x001ffe67;
 	} else {
@@ -2766,7 +2747,7 @@ nv50_graph_construct_xfer_tprop(struct nouveau_grctx *ctx)
 	xf_emit(ctx, 1, 0);		/* 00000007 ALPHA_TEST_FUNC */
 	xf_emit(ctx, 1, 0);		/* ffffffff ALPHA_TEST_REF */
 	xf_emit(ctx, 1, 0);		/* 00000001 ALPHA_TEST_ENABLE */
-	if (IS_NVA3F(dev_priv->chipset))
+	if (IS_NVA3F(device->chipset))
 		xf_emit(ctx, 1, 1);	/* 0000000f UNK16A0 */
 	xf_emit(ctx, 1, 0);		/* 7/f MULTISAMPLE_SAMPLES_LOG2 */
 	xf_emit(ctx, 1, 0);		/* 00000001 tesla UNK1534 */
@@ -2800,11 +2781,11 @@ nv50_graph_construct_xfer_tprop(struct nouveau_grctx *ctx)
 	xf_emit(ctx, 1, 1);		/* 00000001 SIFC_BITMAP_WRITE_BIT0_ENABLE */
 	xf_emit(ctx, 1, 0);		/* 00000007 ALPHA_TEST_FUNC */
 	xf_emit(ctx, 1, 0);		/* 00000001 ALPHA_TEST_ENABLE */
-	if (IS_NVA3F(dev_priv->chipset)) {
+	if (IS_NVA3F(device->chipset)) {
 		xf_emit(ctx, 1, 3);	/* 00000003 tesla UNK16B4 */
 		xf_emit(ctx, 1, 0);	/* 00000003 */
 		xf_emit(ctx, 1, 0);	/* 00000003 tesla UNK1298 */
-	} else if (dev_priv->chipset >= 0xa0) {
+	} else if (device->chipset >= 0xa0) {
 		xf_emit(ctx, 1, 1);	/* 00000001 tesla UNK16B4 */
 		xf_emit(ctx, 1, 0);	/* 00000003 */
 	} else {
@@ -2818,7 +2799,7 @@ nv50_graph_construct_xfer_tprop(struct nouveau_grctx *ctx)
 	xf_emit(ctx, 1, 1);		/* 0000001f BLEND_FUNC_DST_RGB */
 	xf_emit(ctx, 1, 1);		/* 00000007 BLEND_EQUATION_RGB */
 	xf_emit(ctx, 1, 2);		/* 0000001f BLEND_FUNC_SRC_RGB */
-	if (IS_NVA3F(dev_priv->chipset)) {
+	if (IS_NVA3F(device->chipset)) {
 		xf_emit(ctx, 1, 0);	/* 00000001 UNK12E4 */
 		xf_emit(ctx, 8, 1);	/* 00000007 IBLEND_EQUATION_RGB */
 		xf_emit(ctx, 8, 1);	/* 00000007 IBLEND_EQUATION_ALPHA */
@@ -2846,7 +2827,7 @@ nv50_graph_construct_xfer_tprop(struct nouveau_grctx *ctx)
 	xf_emit(ctx, 1, 0xcf);		/* 000000ff SIFC_FORMAT */
 	xf_emit(ctx, 1, 0xcf);		/* 000000ff DRAW_COLOR_FORMAT */
 	xf_emit(ctx, 1, 0xcf);		/* 000000ff SRC_FORMAT */
-	if (IS_NVA3F(dev_priv->chipset))
+	if (IS_NVA3F(device->chipset))
 		xf_emit(ctx, 1, 1);	/* 0000001f tesla UNK169C */
 	xf_emit(ctx, 1, 0);		/* ffffffff tesla UNK1A3C */
 	xf_emit(ctx, 1, 0);		/* 7/f[NVA3] MULTISAMPLE_SAMPLES_LOG2 */
@@ -2870,9 +2851,9 @@ nv50_graph_construct_xfer_tprop(struct nouveau_grctx *ctx)
 	xf_emit(ctx, 1, 0);		/* 00000001 DEPTH_TEST_ENABLE */
 	xf_emit(ctx, 1, 0x11);		/* 3f/7f DST_FORMAT */
 	xf_emit(ctx, 1, 1);		/* 00000001 DST_LINEAR */
-	if (IS_NVA3F(dev_priv->chipset))
+	if (IS_NVA3F(device->chipset))
 		xf_emit(ctx, 1, 1);	/* 0000001f tesla UNK169C */
-	if (dev_priv->chipset == 0x50)
+	if (device->chipset == 0x50)
 		xf_emit(ctx, 1, 0);	/* ff */
 	else
 		xf_emit(ctx, 3, 0);	/* 1, 7, 3ff */
@@ -2907,7 +2888,7 @@ nv50_graph_construct_xfer_tprop(struct nouveau_grctx *ctx)
 	xf_emit(ctx, 1, 1);		/* 00000001 ZETA_ENABLE */
 	xf_emit(ctx, 1, 0);		/* 00000007 */
 	xf_emit(ctx, 1, 0);		/* ffffffff tesla UNK1A3C */
-	if (IS_NVA3F(dev_priv->chipset))
+	if (IS_NVA3F(device->chipset))
 		xf_emit(ctx, 1, 1);	/* 0000001f tesla UNK169C */
 	xf_emit(ctx, 8, 0);		/* 0000ffff DMA_COLOR */
 	xf_emit(ctx, 1, 0);		/* 0000ffff DMA_GLOBAL */
@@ -2945,7 +2926,7 @@ nv50_graph_construct_xfer_tprop(struct nouveau_grctx *ctx)
 	xf_emit(ctx, 1, 0);		/* 0001ffff GP_BUILTIN_RESULT_EN */
 	xf_emit(ctx, 1, 0);		/* 00000003 UNK0F90 */
 	xf_emit(ctx, 1, 0);		/* 00000007 */
-	if (IS_NVA3F(dev_priv->chipset))
+	if (IS_NVA3F(device->chipset))
 		xf_emit(ctx, 1, 1);	/* 0000001f tesla UNK169C */
 	xf_emit(ctx, 1, magic2);	/* 001fffff tesla UNK0F78 */
 	xf_emit(ctx, 1, 0);		/* 7/f MULTISAMPLE_SAMPLES_LOG2 */
@@ -2974,7 +2955,7 @@ nv50_graph_construct_xfer_tprop(struct nouveau_grctx *ctx)
 	xf_emit(ctx, 1, 0x1001);	/* 00001fff ZETA_ARRAY_MODE */
 	xf_emit(ctx, 1, 0);		/* ffffffff tesla UNK1A3C */
 	xf_emit(ctx, 1, 0);		/* 7/f MULTISAMPLE_SAMPLES_LOG2 */
-	if (IS_NVA3F(dev_priv->chipset))
+	if (IS_NVA3F(device->chipset))
 		xf_emit(ctx, 1, 0);	/* 00000001 */
 	xf_emit(ctx, 1, 0);		/* ffff0ff3 */
 	xf_emit(ctx, 1, 0x11);		/* 3f/7f RT_FORMAT */
@@ -2988,14 +2969,14 @@ nv50_graph_construct_xfer_tprop(struct nouveau_grctx *ctx)
 	xf_emit(ctx, 1, 0);		/* 00000001 FRAMEBUFFER_SRGB */
 	xf_emit(ctx, 1, 0);		/* 7 */
 	xf_emit(ctx, 1, 0);		/* 00000001 LOGIC_OP_ENABLE */
-	if (IS_NVA3F(dev_priv->chipset)) {
+	if (IS_NVA3F(device->chipset)) {
 		xf_emit(ctx, 1, 0);	/* 00000001 UNK1140 */
 		xf_emit(ctx, 1, 1);	/* 0000001f tesla UNK169C */
 	}
 	xf_emit(ctx, 1, 0);		/* 7/f MULTISAMPLE_SAMPLES_LOG2 */
 	xf_emit(ctx, 1, 0);		/* 00000001 UNK1534 */
 	xf_emit(ctx, 1, 0);		/* ffff0ff3 */
-	if (dev_priv->chipset >= 0xa0)
+	if (device->chipset >= 0xa0)
 		xf_emit(ctx, 1, 0x0fac6881);	/* fffffff */
 	xf_emit(ctx, 1, magic2);	/* 001fffff tesla UNK0F78 */
 	xf_emit(ctx, 1, 0);		/* 00000001 DEPTH_BOUNDS_EN */
@@ -3012,12 +2993,12 @@ nv50_graph_construct_xfer_tprop(struct nouveau_grctx *ctx)
 	xf_emit(ctx, 1, 0);		/* 00000001 SAMPLECNT_ENABLE */
 	xf_emit(ctx, 1, 0);		/* 0000000f ZETA_FORMAT */
 	xf_emit(ctx, 1, 1);		/* 00000001 ZETA_ENABLE */
-	if (IS_NVA3F(dev_priv->chipset)) {
+	if (IS_NVA3F(device->chipset)) {
 		xf_emit(ctx, 1, 1);	/* 0000001f tesla UNK169C */
 		xf_emit(ctx, 1, 0);	/* 0000000f tesla UNK15C8 */
 	}
 	xf_emit(ctx, 1, 0);		/* ffffffff tesla UNK1A3C */
-	if (dev_priv->chipset >= 0xa0) {
+	if (device->chipset >= 0xa0) {
 		xf_emit(ctx, 3, 0);		/* 7/f, 1, ffff0ff3 */
 		xf_emit(ctx, 1, 0xfac6881);	/* fffffff */
 		xf_emit(ctx, 4, 0);		/* 1, 1, 1, 3ff */
@@ -3027,7 +3008,7 @@ nv50_graph_construct_xfer_tprop(struct nouveau_grctx *ctx)
 		xf_emit(ctx, 2, 0);		/* 7, f */
 		xf_emit(ctx, 1, 1);		/* 1 */
 		xf_emit(ctx, 1, 0);		/* 7/f */
-		if (IS_NVA3F(dev_priv->chipset))
+		if (IS_NVA3F(device->chipset))
 			xf_emit(ctx, 0x9, 0);	/* 1 */
 		else
 			xf_emit(ctx, 0x8, 0);	/* 1 */
@@ -3041,7 +3022,7 @@ nv50_graph_construct_xfer_tprop(struct nouveau_grctx *ctx)
 		xf_emit(ctx, 1, 0x11);		/* 7f */
 		xf_emit(ctx, 1, 1);		/* 1 */
 		xf_emit(ctx, 5, 0);		/* 1, 7, 3ff, 3, 7 */
-		if (IS_NVA3F(dev_priv->chipset)) {
+		if (IS_NVA3F(device->chipset)) {
 			xf_emit(ctx, 1, 0);	/* 00000001 UNK1140 */
 			xf_emit(ctx, 1, 1);	/* 0000001f tesla UNK169C */
 		}
@@ -3051,15 +3032,15 @@ nv50_graph_construct_xfer_tprop(struct nouveau_grctx *ctx)
 static void
 nv50_graph_construct_xfer_tex(struct nouveau_grctx *ctx)
 {
-	struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
+	struct nouveau_device *device = ctx->device;
 	xf_emit(ctx, 2, 0);		/* 1 LINKED_TSC. yes, 2. */
-	if (dev_priv->chipset != 0x50)
+	if (device->chipset != 0x50)
 		xf_emit(ctx, 1, 0);	/* 3 */
 	xf_emit(ctx, 1, 1);		/* 1ffff BLIT_DU_DX_INT */
 	xf_emit(ctx, 1, 0);		/* fffff BLIT_DU_DX_FRACT */
 	xf_emit(ctx, 1, 1);		/* 1ffff BLIT_DV_DY_INT */
 	xf_emit(ctx, 1, 0);		/* fffff BLIT_DV_DY_FRACT */
-	if (dev_priv->chipset == 0x50)
+	if (device->chipset == 0x50)
 		xf_emit(ctx, 1, 0);	/* 3 BLIT_CONTROL */
 	else
 		xf_emit(ctx, 2, 0);	/* 3ff, 1 */
@@ -3071,13 +3052,13 @@ nv50_graph_construct_xfer_tex(struct nouveau_grctx *ctx)
 	xf_emit(ctx, 1, 0x10100);	/* ffffffff SRC_TIC_5 */
 	xf_emit(ctx, 1, 0x02800000);	/* ffffffff SRC_TIC_6 */
 	xf_emit(ctx, 1, 0);		/* ffffffff SRC_TIC_7 */
-	if (dev_priv->chipset == 0x50) {
+	if (device->chipset == 0x50) {
 		xf_emit(ctx, 1, 0);	/* 00000001 turing UNK358 */
 		xf_emit(ctx, 1, 0);	/* ffffffff tesla UNK1A34? */
 		xf_emit(ctx, 1, 0);	/* 00000003 turing UNK37C tesla UNK1690 */
 		xf_emit(ctx, 1, 0);	/* 00000003 BLIT_CONTROL */
 		xf_emit(ctx, 1, 0);	/* 00000001 turing UNK32C tesla UNK0F94 */
-	} else if (!IS_NVAAF(dev_priv->chipset)) {
+	} else if (!IS_NVAAF(device->chipset)) {
 		xf_emit(ctx, 1, 0);	/* ffffffff tesla UNK1A34? */
 		xf_emit(ctx, 1, 0);	/* 00000003 */
 		xf_emit(ctx, 1, 0);	/* 000003ff */
@@ -3097,7 +3078,7 @@ nv50_graph_construct_xfer_tex(struct nouveau_grctx *ctx)
 static void
 nv50_graph_construct_xfer_unk8cxx(struct nouveau_grctx *ctx)
 {
-	struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
+	struct nouveau_device *device = ctx->device;
 	xf_emit(ctx, 1, 0);		/* 00000001 UNK1534 */
 	xf_emit(ctx, 1, 0);		/* 7/f MULTISAMPLE_SAMPLES_LOG2 */
 	xf_emit(ctx, 2, 0);		/* 7, ffff0ff3 */
@@ -3109,7 +3090,7 @@ nv50_graph_construct_xfer_unk8cxx(struct nouveau_grctx *ctx)
 	xf_emit(ctx, 1, 0);		/* 00000001 LINE_STIPPLE_ENABLE */
 	xf_emit(ctx, 1, 0x00ffff00);	/* 00ffffff LINE_STIPPLE_PATTERN */
 	xf_emit(ctx, 1, 1);		/* 00000001 tesla UNK0F98 */
-	if (IS_NVA3F(dev_priv->chipset))
+	if (IS_NVA3F(device->chipset))
 		xf_emit(ctx, 1, 1);	/* 0000001f tesla UNK169C */
 	xf_emit(ctx, 1, 0);		/* 00000003 tesla UNK1668 */
 	xf_emit(ctx, 1, 0);		/* 00000001 LINE_STIPPLE_ENABLE */
@@ -3136,8 +3117,8 @@ nv50_graph_construct_xfer_unk8cxx(struct nouveau_grctx *ctx)
 static void
 nv50_graph_construct_xfer_tp(struct nouveau_grctx *ctx)
 {
-	struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
-	if (dev_priv->chipset < 0xa0) {
+	struct nouveau_device *device = ctx->device;
+	if (device->chipset < 0xa0) {
 		nv50_graph_construct_xfer_unk84xx(ctx);
 		nv50_graph_construct_xfer_tprop(ctx);
 		nv50_graph_construct_xfer_tex(ctx);
@@ -3153,9 +3134,9 @@ nv50_graph_construct_xfer_tp(struct nouveau_grctx *ctx)
 static void
 nv50_graph_construct_xfer_mpc(struct nouveau_grctx *ctx)
 {
-	struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
+	struct nouveau_device *device = ctx->device;
 	int i, mpcnt = 2;
-	switch (dev_priv->chipset) {
+	switch (device->chipset) {
 		case 0x98:
 		case 0xaa:
 			mpcnt = 1;
@@ -3182,34 +3163,34 @@ nv50_graph_construct_xfer_mpc(struct nouveau_grctx *ctx)
 		xf_emit(ctx, 1, 0x80);		/* ffffffff tesla UNK1404 */
 		xf_emit(ctx, 1, 0x80007004);	/* ffffffff tesla UNK12B0 */
 		xf_emit(ctx, 1, 0x04000400);	/* ffffffff */
-		if (dev_priv->chipset >= 0xa0)
+		if (device->chipset >= 0xa0)
 			xf_emit(ctx, 1, 0xc0);	/* 00007fff tesla UNK152C */
 		xf_emit(ctx, 1, 0x1000);	/* 0000ffff tesla UNK0D60 */
 		xf_emit(ctx, 1, 0);		/* ff/3ff */
 		xf_emit(ctx, 1, 0);		/* ffffffff tesla UNK1A30 */
-		if (dev_priv->chipset == 0x86 || dev_priv->chipset == 0x98 || dev_priv->chipset == 0xa8 || IS_NVAAF(dev_priv->chipset)) {
+		if (device->chipset == 0x86 || device->chipset == 0x98 || device->chipset == 0xa8 || IS_NVAAF(device->chipset)) {
 			xf_emit(ctx, 1, 0xe00);		/* 7fff */
 			xf_emit(ctx, 1, 0x1e00);	/* 7fff */
 		}
 		xf_emit(ctx, 1, 1);		/* 000000ff VP_REG_ALLOC_TEMP */
 		xf_emit(ctx, 1, 0);		/* 00000001 LINKED_TSC */
 		xf_emit(ctx, 1, 0);		/* 00000001 GP_ENABLE */
-		if (dev_priv->chipset == 0x50)
+		if (device->chipset == 0x50)
 			xf_emit(ctx, 2, 0x1000);	/* 7fff tesla UNK141C */
 		xf_emit(ctx, 1, 1);		/* 000000ff GP_REG_ALLOC_TEMP */
 		xf_emit(ctx, 1, 0);		/* 00000001 GP_ENABLE */
 		xf_emit(ctx, 1, 4);		/* 000000ff FP_REG_ALLOC_TEMP */
 		xf_emit(ctx, 1, 2);		/* 00000003 REG_MODE */
-		if (IS_NVAAF(dev_priv->chipset))
+		if (IS_NVAAF(device->chipset))
 			xf_emit(ctx, 0xb, 0);	/* RO */
-		else if (dev_priv->chipset >= 0xa0)
+		else if (device->chipset >= 0xa0)
 			xf_emit(ctx, 0xc, 0);	/* RO */
 		else
 			xf_emit(ctx, 0xa, 0);	/* RO */
 	}
 	xf_emit(ctx, 1, 0x08100c12);		/* 1fffffff FP_INTERPOLANT_CTRL */
 	xf_emit(ctx, 1, 0);			/* ff/3ff */
-	if (dev_priv->chipset >= 0xa0) {
+	if (device->chipset >= 0xa0) {
 		xf_emit(ctx, 1, 0x1fe21);	/* 0003ffff tesla UNK0FAC */
 	}
 	xf_emit(ctx, 3, 0);			/* 7fff, 0, 0 */
@@ -3223,7 +3204,7 @@ nv50_graph_construct_xfer_mpc(struct nouveau_grctx *ctx)
 	xf_emit(ctx, 1, 0);			/* ffffffff SHARED_SIZE */
 	xf_emit(ctx, 1, 0x1fe21);		/* 1ffff/3ffff[NVA0+] tesla UNk0FAC */
 	xf_emit(ctx, 1, 0);			/* ffffffff tesla UNK1A34 */
-	if (IS_NVA3F(dev_priv->chipset))
+	if (IS_NVA3F(device->chipset))
 		xf_emit(ctx, 1, 1);		/* 0000001f tesla UNK169C */
 	xf_emit(ctx, 1, 0);			/* ff/3ff */
 	xf_emit(ctx, 1, 0);			/* 1 LINKED_TSC */
@@ -3238,7 +3219,7 @@ nv50_graph_construct_xfer_mpc(struct nouveau_grctx *ctx)
 	xf_emit(ctx, 1, 0);			/* 00000007 */
 	xf_emit(ctx, 1, 0xfac6881);		/* 0fffffff RT_CONTROL */
 	xf_emit(ctx, 1, 0);			/* 00000003 MULTISAMPLE_CTRL */
-	if (IS_NVA3F(dev_priv->chipset))
+	if (IS_NVA3F(device->chipset))
 		xf_emit(ctx, 1, 3);		/* 00000003 tesla UNK16B4 */
 	xf_emit(ctx, 1, 0);			/* 00000001 ALPHA_TEST_ENABLE */
 	xf_emit(ctx, 1, 0);			/* 00000007 ALPHA_TEST_FUNC */
@@ -3253,7 +3234,7 @@ nv50_graph_construct_xfer_mpc(struct nouveau_grctx *ctx)
 	xf_emit(ctx, 1, 1);			/* 0000001f BLEND_FUNC_DST_ALPHA */
 	xf_emit(ctx, 1, 1);			/* 00000007 BLEND_EQUATION_ALPHA */
 	xf_emit(ctx, 1, 1);			/* 00000001 UNK133C */
-	if (IS_NVA3F(dev_priv->chipset)) {
+	if (IS_NVA3F(device->chipset)) {
 		xf_emit(ctx, 1, 0);		/* 00000001 UNK12E4 */
 		xf_emit(ctx, 8, 2);		/* 0000001f IBLEND_FUNC_SRC_RGB */
 		xf_emit(ctx, 8, 1);		/* 0000001f IBLEND_FUNC_DST_RGB */
@@ -3268,11 +3249,11 @@ nv50_graph_construct_xfer_mpc(struct nouveau_grctx *ctx)
 	xf_emit(ctx, 1, 0);			/* 00000003 tesla UNK0F90 */
 	xf_emit(ctx, 1, 4);			/* 000000ff FP_RESULT_COUNT */
 	/* XXX: demagic this part some day */
-	if (dev_priv->chipset == 0x50)
+	if (device->chipset == 0x50)
 		xf_emit(ctx, 0x3a0, 0);
-	else if (dev_priv->chipset < 0x94)
+	else if (device->chipset < 0x94)
 		xf_emit(ctx, 0x3a2, 0);
-	else if (dev_priv->chipset == 0x98 || dev_priv->chipset == 0xaa)
+	else if (device->chipset == 0x98 || device->chipset == 0xaa)
 		xf_emit(ctx, 0x39f, 0);
 	else
 		xf_emit(ctx, 0x3a3, 0);
@@ -3285,15 +3266,15 @@ nv50_graph_construct_xfer_mpc(struct nouveau_grctx *ctx)
 static void
 nv50_graph_construct_xfer2(struct nouveau_grctx *ctx)
 {
-	struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
+	struct nouveau_device *device = ctx->device;
 	int i;
-	uint32_t offset;
-	uint32_t units = nv_rd32 (ctx->dev, 0x1540);
+	u32 offset;
+	u32 units = nv_rd32 (ctx->device, 0x1540);
 	int size = 0;
 
 	offset = (ctx->ctxvals_pos+0x3f)&~0x3f;
 
-	if (dev_priv->chipset < 0xa0) {
+	if (device->chipset < 0xa0) {
 		for (i = 0; i < 8; i++) {
 			ctx->ctxvals_pos = offset + i;
 			/* that little bugger belongs to csched. No idea
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvc0.c b/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvc0.c
new file mode 100644
index 000000000000..0b7951a85943
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvc0.c
@@ -0,0 +1,3039 @@
+/*
+ * Copyright 2010 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include "nvc0.h"
+
+void
+nv_icmd(struct nvc0_graph_priv *priv, u32 icmd, u32 data)
+{
+	nv_wr32(priv, 0x400204, data);
+	nv_wr32(priv, 0x400200, icmd);
+	while (nv_rd32(priv, 0x400700) & 2) {}
+}
+
+int
+nvc0_grctx_init(struct nvc0_graph_priv *priv, struct nvc0_grctx *info)
+{
+	struct nouveau_bar *bar = nouveau_bar(priv);
+	struct nouveau_object *parent = nv_object(priv);
+	struct nouveau_gpuobj *chan;
+	u32 size = (0x80000 + priv->size + 4095) & ~4095;
+	int ret, i;
+
+	/* allocate memory to for a "channel", which we'll use to generate
+	 * the default context values
+	 */
+	ret = nouveau_gpuobj_new(parent, NULL, size, 0x1000,
+				 NVOBJ_FLAG_ZERO_ALLOC, &info->chan);
+	chan = info->chan;
+	if (ret) {
+		nv_error(priv, "failed to allocate channel memory, %d\n", ret);
+		return ret;
+	}
+
+	/* PGD pointer */
+	nv_wo32(chan, 0x0200, lower_32_bits(chan->addr + 0x1000));
+	nv_wo32(chan, 0x0204, upper_32_bits(chan->addr + 0x1000));
+	nv_wo32(chan, 0x0208, 0xffffffff);
+	nv_wo32(chan, 0x020c, 0x000000ff);
+
+	/* PGT[0] pointer */
+	nv_wo32(chan, 0x1000, 0x00000000);
+	nv_wo32(chan, 0x1004, 0x00000001 | (chan->addr + 0x2000) >> 8);
+
+	/* identity-map the whole "channel" into its own vm */
+	for (i = 0; i < size / 4096; i++) {
+		u64 addr = ((chan->addr + (i * 4096)) >> 8) | 1;
+		nv_wo32(chan, 0x2000 + (i * 8), lower_32_bits(addr));
+		nv_wo32(chan, 0x2004 + (i * 8), upper_32_bits(addr));
+	}
+
+	/* context pointer (virt) */
+	nv_wo32(chan, 0x0210, 0x00080004);
+	nv_wo32(chan, 0x0214, 0x00000000);
+
+	bar->flush(bar);
+
+	nv_wr32(priv, 0x100cb8, (chan->addr + 0x1000) >> 8);
+	nv_wr32(priv, 0x100cbc, 0x80000001);
+	nv_wait(priv, 0x100c80, 0x00008000, 0x00008000);
+
+	/* setup default state for mmio list construction */
+	info->data = priv->mmio_data;
+	info->mmio = priv->mmio_list;
+	info->addr = 0x2000 + (i * 8);
+	info->priv = priv;
+	info->buffer_nr = 0;
+
+	if (priv->firmware) {
+		nv_wr32(priv, 0x409840, 0x00000030);
+		nv_wr32(priv, 0x409500, 0x80000000 | chan->addr >> 12);
+		nv_wr32(priv, 0x409504, 0x00000003);
+		if (!nv_wait(priv, 0x409800, 0x00000010, 0x00000010))
+			nv_error(priv, "load_ctx timeout\n");
+
+		nv_wo32(chan, 0x8001c, 1);
+		nv_wo32(chan, 0x80020, 0);
+		nv_wo32(chan, 0x80028, 0);
+		nv_wo32(chan, 0x8002c, 0);
+		bar->flush(bar);
+		return 0;
+	}
+
+	/* HUB_FUC(SET_CHAN) */
+	nv_wr32(priv, 0x409840, 0x80000000);
+	nv_wr32(priv, 0x409500, 0x80000000 | chan->addr >> 12);
+	nv_wr32(priv, 0x409504, 0x00000001);
+	if (!nv_wait(priv, 0x409800, 0x80000000, 0x80000000)) {
+		nv_error(priv, "HUB_SET_CHAN timeout\n");
+		nvc0_graph_ctxctl_debug(priv);
+		nouveau_gpuobj_ref(NULL, &info->chan);
+		return -EBUSY;
+	}
+
+	return 0;
+}
+
+void
+nvc0_grctx_data(struct nvc0_grctx *info, u32 size, u32 align, u32 access)
+{
+	info->buffer[info->buffer_nr]  = info->addr;
+	info->buffer[info->buffer_nr] +=  (align - 1);
+	info->buffer[info->buffer_nr] &= ~(align - 1);
+	info->addr = info->buffer[info->buffer_nr++] + size;
+
+	info->data->size = size;
+	info->data->align = align;
+	info->data->access = access;
+	info->data++;
+}
+
+void
+nvc0_grctx_mmio(struct nvc0_grctx *info, u32 addr, u32 data, u32 shift, u32 buf)
+{
+	struct nvc0_graph_priv *priv = info->priv;
+
+	info->mmio->addr = addr;
+	info->mmio->data = data;
+	info->mmio->shift = shift;
+	info->mmio->buffer = buf;
+	info->mmio++;
+
+	if (shift)
+		data |= info->buffer[buf] >> shift;
+	nv_wr32(priv, addr, data);
+}
+
+int
+nvc0_grctx_fini(struct nvc0_grctx *info)
+{
+	struct nvc0_graph_priv *priv = info->priv;
+	int i;
+
+	/* trigger a context unload by unsetting the "next channel valid" bit
+	 * and faking a context switch interrupt
+	 */
+	nv_mask(priv, 0x409b04, 0x80000000, 0x00000000);
+	nv_wr32(priv, 0x409000, 0x00000100);
+	if (!nv_wait(priv, 0x409b00, 0x80000000, 0x00000000)) {
+		nv_error(priv, "grctx template channel unload timeout\n");
+		return -EBUSY;
+	}
+
+	priv->data = kmalloc(priv->size, GFP_KERNEL);
+	if (priv->data) {
+		for (i = 0; i < priv->size; i += 4)
+			priv->data[i / 4] = nv_ro32(info->chan, 0x80000 + i);
+	}
+
+	nouveau_gpuobj_ref(NULL, &info->chan);
+	return priv->data ? 0 : -ENOMEM;
+}
+
+static void
+nvc0_grctx_generate_9097(struct nvc0_graph_priv *priv)
+{
+	u32 fermi = nvc0_graph_class(priv);
+	u32 mthd;
+
+	nv_mthd(priv, 0x9097, 0x0800, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0840, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0880, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x08c0, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0900, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0940, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0980, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x09c0, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0804, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0844, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0884, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x08c4, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0904, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0944, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0984, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x09c4, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0808, 0x00000400);
+	nv_mthd(priv, 0x9097, 0x0848, 0x00000400);
+	nv_mthd(priv, 0x9097, 0x0888, 0x00000400);
+	nv_mthd(priv, 0x9097, 0x08c8, 0x00000400);
+	nv_mthd(priv, 0x9097, 0x0908, 0x00000400);
+	nv_mthd(priv, 0x9097, 0x0948, 0x00000400);
+	nv_mthd(priv, 0x9097, 0x0988, 0x00000400);
+	nv_mthd(priv, 0x9097, 0x09c8, 0x00000400);
+	nv_mthd(priv, 0x9097, 0x080c, 0x00000300);
+	nv_mthd(priv, 0x9097, 0x084c, 0x00000300);
+	nv_mthd(priv, 0x9097, 0x088c, 0x00000300);
+	nv_mthd(priv, 0x9097, 0x08cc, 0x00000300);
+	nv_mthd(priv, 0x9097, 0x090c, 0x00000300);
+	nv_mthd(priv, 0x9097, 0x094c, 0x00000300);
+	nv_mthd(priv, 0x9097, 0x098c, 0x00000300);
+	nv_mthd(priv, 0x9097, 0x09cc, 0x00000300);
+	nv_mthd(priv, 0x9097, 0x0810, 0x000000cf);
+	nv_mthd(priv, 0x9097, 0x0850, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0890, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x08d0, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0910, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0950, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0990, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x09d0, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0814, 0x00000040);
+	nv_mthd(priv, 0x9097, 0x0854, 0x00000040);
+	nv_mthd(priv, 0x9097, 0x0894, 0x00000040);
+	nv_mthd(priv, 0x9097, 0x08d4, 0x00000040);
+	nv_mthd(priv, 0x9097, 0x0914, 0x00000040);
+	nv_mthd(priv, 0x9097, 0x0954, 0x00000040);
+	nv_mthd(priv, 0x9097, 0x0994, 0x00000040);
+	nv_mthd(priv, 0x9097, 0x09d4, 0x00000040);
+	nv_mthd(priv, 0x9097, 0x0818, 0x00000001);
+	nv_mthd(priv, 0x9097, 0x0858, 0x00000001);
+	nv_mthd(priv, 0x9097, 0x0898, 0x00000001);
+	nv_mthd(priv, 0x9097, 0x08d8, 0x00000001);
+	nv_mthd(priv, 0x9097, 0x0918, 0x00000001);
+	nv_mthd(priv, 0x9097, 0x0958, 0x00000001);
+	nv_mthd(priv, 0x9097, 0x0998, 0x00000001);
+	nv_mthd(priv, 0x9097, 0x09d8, 0x00000001);
+	nv_mthd(priv, 0x9097, 0x081c, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x085c, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x089c, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x08dc, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x091c, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x095c, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x099c, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x09dc, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0820, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0860, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x08a0, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x08e0, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0920, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0960, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x09a0, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x09e0, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x2700, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x2720, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x2740, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x2760, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x2780, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x27a0, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x27c0, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x27e0, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x2704, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x2724, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x2744, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x2764, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x2784, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x27a4, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x27c4, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x27e4, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x2708, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x2728, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x2748, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x2768, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x2788, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x27a8, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x27c8, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x27e8, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x270c, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x272c, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x274c, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x276c, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x278c, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x27ac, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x27cc, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x27ec, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x2710, 0x00014000);
+	nv_mthd(priv, 0x9097, 0x2730, 0x00014000);
+	nv_mthd(priv, 0x9097, 0x2750, 0x00014000);
+	nv_mthd(priv, 0x9097, 0x2770, 0x00014000);
+	nv_mthd(priv, 0x9097, 0x2790, 0x00014000);
+	nv_mthd(priv, 0x9097, 0x27b0, 0x00014000);
+	nv_mthd(priv, 0x9097, 0x27d0, 0x00014000);
+	nv_mthd(priv, 0x9097, 0x27f0, 0x00014000);
+	nv_mthd(priv, 0x9097, 0x2714, 0x00000040);
+	nv_mthd(priv, 0x9097, 0x2734, 0x00000040);
+	nv_mthd(priv, 0x9097, 0x2754, 0x00000040);
+	nv_mthd(priv, 0x9097, 0x2774, 0x00000040);
+	nv_mthd(priv, 0x9097, 0x2794, 0x00000040);
+	nv_mthd(priv, 0x9097, 0x27b4, 0x00000040);
+	nv_mthd(priv, 0x9097, 0x27d4, 0x00000040);
+	nv_mthd(priv, 0x9097, 0x27f4, 0x00000040);
+	nv_mthd(priv, 0x9097, 0x1c00, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1c10, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1c20, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1c30, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1c40, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1c50, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1c60, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1c70, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1c80, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1c90, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1ca0, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1cb0, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1cc0, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1cd0, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1ce0, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1cf0, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1c04, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1c14, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1c24, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1c34, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1c44, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1c54, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1c64, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1c74, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1c84, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1c94, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1ca4, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1cb4, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1cc4, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1cd4, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1ce4, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1cf4, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1c08, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1c18, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1c28, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1c38, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1c48, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1c58, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1c68, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1c78, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1c88, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1c98, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1ca8, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1cb8, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1cc8, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1cd8, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1ce8, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1cf8, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1c0c, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1c1c, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1c2c, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1c3c, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1c4c, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1c5c, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1c6c, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1c7c, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1c8c, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1c9c, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1cac, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1cbc, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1ccc, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1cdc, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1cec, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1cfc, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1d00, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1d10, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1d20, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1d30, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1d40, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1d50, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1d60, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1d70, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1d80, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1d90, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1da0, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1db0, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1dc0, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1dd0, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1de0, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1df0, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1d04, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1d14, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1d24, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1d34, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1d44, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1d54, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1d64, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1d74, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1d84, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1d94, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1da4, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1db4, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1dc4, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1dd4, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1de4, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1df4, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1d08, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1d18, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1d28, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1d38, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1d48, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1d58, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1d68, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1d78, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1d88, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1d98, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1da8, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1db8, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1dc8, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1dd8, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1de8, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1df8, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1d0c, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1d1c, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1d2c, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1d3c, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1d4c, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1d5c, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1d6c, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1d7c, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1d8c, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1d9c, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1dac, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1dbc, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1dcc, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1ddc, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1dec, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1dfc, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1f00, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1f08, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1f10, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1f18, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1f20, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1f28, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1f30, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1f38, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1f40, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1f48, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1f50, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1f58, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1f60, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1f68, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1f70, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1f78, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1f04, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1f0c, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1f14, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1f1c, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1f24, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1f2c, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1f34, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1f3c, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1f44, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1f4c, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1f54, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1f5c, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1f64, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1f6c, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1f74, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1f7c, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1f80, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1f88, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1f90, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1f98, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1fa0, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1fa8, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1fb0, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1fb8, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1fc0, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1fc8, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1fd0, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1fd8, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1fe0, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1fe8, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1ff0, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1ff8, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1f84, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1f8c, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1f94, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1f9c, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1fa4, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1fac, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1fb4, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1fbc, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1fc4, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1fcc, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1fd4, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1fdc, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1fe4, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1fec, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1ff4, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1ffc, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x2200, 0x00000022);
+	nv_mthd(priv, 0x9097, 0x2210, 0x00000022);
+	nv_mthd(priv, 0x9097, 0x2220, 0x00000022);
+	nv_mthd(priv, 0x9097, 0x2230, 0x00000022);
+	nv_mthd(priv, 0x9097, 0x2240, 0x00000022);
+	nv_mthd(priv, 0x9097, 0x2000, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x2040, 0x00000011);
+	nv_mthd(priv, 0x9097, 0x2080, 0x00000020);
+	nv_mthd(priv, 0x9097, 0x20c0, 0x00000030);
+	nv_mthd(priv, 0x9097, 0x2100, 0x00000040);
+	nv_mthd(priv, 0x9097, 0x2140, 0x00000051);
+	nv_mthd(priv, 0x9097, 0x200c, 0x00000001);
+	nv_mthd(priv, 0x9097, 0x204c, 0x00000001);
+	nv_mthd(priv, 0x9097, 0x208c, 0x00000001);
+	nv_mthd(priv, 0x9097, 0x20cc, 0x00000001);
+	nv_mthd(priv, 0x9097, 0x210c, 0x00000001);
+	nv_mthd(priv, 0x9097, 0x214c, 0x00000001);
+	nv_mthd(priv, 0x9097, 0x2010, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x2050, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x2090, 0x00000001);
+	nv_mthd(priv, 0x9097, 0x20d0, 0x00000002);
+	nv_mthd(priv, 0x9097, 0x2110, 0x00000003);
+	nv_mthd(priv, 0x9097, 0x2150, 0x00000004);
+	nv_mthd(priv, 0x9097, 0x0380, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x03a0, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x03c0, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x03e0, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0384, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x03a4, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x03c4, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x03e4, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0388, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x03a8, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x03c8, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x03e8, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x038c, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x03ac, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x03cc, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x03ec, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0700, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0710, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0720, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0730, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0704, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0714, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0724, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0734, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0708, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0718, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0728, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0738, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x2800, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x2804, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x2808, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x280c, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x2810, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x2814, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x2818, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x281c, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x2820, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x2824, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x2828, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x282c, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x2830, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x2834, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x2838, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x283c, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x2840, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x2844, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x2848, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x284c, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x2850, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x2854, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x2858, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x285c, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x2860, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x2864, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x2868, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x286c, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x2870, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x2874, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x2878, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x287c, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x2880, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x2884, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x2888, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x288c, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x2890, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x2894, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x2898, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x289c, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x28a0, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x28a4, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x28a8, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x28ac, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x28b0, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x28b4, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x28b8, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x28bc, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x28c0, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x28c4, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x28c8, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x28cc, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x28d0, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x28d4, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x28d8, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x28dc, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x28e0, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x28e4, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x28e8, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x28ec, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x28f0, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x28f4, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x28f8, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x28fc, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x2900, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x2904, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x2908, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x290c, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x2910, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x2914, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x2918, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x291c, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x2920, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x2924, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x2928, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x292c, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x2930, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x2934, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x2938, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x293c, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x2940, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x2944, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x2948, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x294c, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x2950, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x2954, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x2958, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x295c, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x2960, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x2964, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x2968, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x296c, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x2970, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x2974, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x2978, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x297c, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x2980, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x2984, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x2988, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x298c, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x2990, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x2994, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x2998, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x299c, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x29a0, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x29a4, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x29a8, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x29ac, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x29b0, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x29b4, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x29b8, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x29bc, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x29c0, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x29c4, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x29c8, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x29cc, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x29d0, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x29d4, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x29d8, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x29dc, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x29e0, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x29e4, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x29e8, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x29ec, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x29f0, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x29f4, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x29f8, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x29fc, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0a00, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0a20, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0a40, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0a60, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0a80, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0aa0, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0ac0, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0ae0, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0b00, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0b20, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0b40, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0b60, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0b80, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0ba0, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0bc0, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0be0, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0a04, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0a24, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0a44, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0a64, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0a84, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0aa4, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0ac4, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0ae4, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0b04, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0b24, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0b44, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0b64, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0b84, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0ba4, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0bc4, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0be4, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0a08, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0a28, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0a48, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0a68, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0a88, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0aa8, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0ac8, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0ae8, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0b08, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0b28, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0b48, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0b68, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0b88, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0ba8, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0bc8, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0be8, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0a0c, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0a2c, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0a4c, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0a6c, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0a8c, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0aac, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0acc, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0aec, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0b0c, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0b2c, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0b4c, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0b6c, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0b8c, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0bac, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0bcc, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0bec, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0a10, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0a30, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0a50, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0a70, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0a90, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0ab0, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0ad0, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0af0, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0b10, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0b30, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0b50, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0b70, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0b90, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0bb0, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0bd0, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0bf0, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0a14, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0a34, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0a54, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0a74, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0a94, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0ab4, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0ad4, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0af4, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0b14, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0b34, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0b54, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0b74, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0b94, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0bb4, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0bd4, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0bf4, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0c00, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0c10, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0c20, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0c30, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0c40, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0c50, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0c60, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0c70, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0c80, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0c90, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0ca0, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0cb0, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0cc0, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0cd0, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0ce0, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0cf0, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0c04, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0c14, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0c24, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0c34, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0c44, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0c54, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0c64, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0c74, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0c84, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0c94, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0ca4, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0cb4, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0cc4, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0cd4, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0ce4, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0cf4, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0c08, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0c18, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0c28, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0c38, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0c48, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0c58, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0c68, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0c78, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0c88, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0c98, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0ca8, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0cb8, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0cc8, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0cd8, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0ce8, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0cf8, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0c0c, 0x3f800000);
+	nv_mthd(priv, 0x9097, 0x0c1c, 0x3f800000);
+	nv_mthd(priv, 0x9097, 0x0c2c, 0x3f800000);
+	nv_mthd(priv, 0x9097, 0x0c3c, 0x3f800000);
+	nv_mthd(priv, 0x9097, 0x0c4c, 0x3f800000);
+	nv_mthd(priv, 0x9097, 0x0c5c, 0x3f800000);
+	nv_mthd(priv, 0x9097, 0x0c6c, 0x3f800000);
+	nv_mthd(priv, 0x9097, 0x0c7c, 0x3f800000);
+	nv_mthd(priv, 0x9097, 0x0c8c, 0x3f800000);
+	nv_mthd(priv, 0x9097, 0x0c9c, 0x3f800000);
+	nv_mthd(priv, 0x9097, 0x0cac, 0x3f800000);
+	nv_mthd(priv, 0x9097, 0x0cbc, 0x3f800000);
+	nv_mthd(priv, 0x9097, 0x0ccc, 0x3f800000);
+	nv_mthd(priv, 0x9097, 0x0cdc, 0x3f800000);
+	nv_mthd(priv, 0x9097, 0x0cec, 0x3f800000);
+	nv_mthd(priv, 0x9097, 0x0cfc, 0x3f800000);
+	nv_mthd(priv, 0x9097, 0x0d00, 0xffff0000);
+	nv_mthd(priv, 0x9097, 0x0d08, 0xffff0000);
+	nv_mthd(priv, 0x9097, 0x0d10, 0xffff0000);
+	nv_mthd(priv, 0x9097, 0x0d18, 0xffff0000);
+	nv_mthd(priv, 0x9097, 0x0d20, 0xffff0000);
+	nv_mthd(priv, 0x9097, 0x0d28, 0xffff0000);
+	nv_mthd(priv, 0x9097, 0x0d30, 0xffff0000);
+	nv_mthd(priv, 0x9097, 0x0d38, 0xffff0000);
+	nv_mthd(priv, 0x9097, 0x0d04, 0xffff0000);
+	nv_mthd(priv, 0x9097, 0x0d0c, 0xffff0000);
+	nv_mthd(priv, 0x9097, 0x0d14, 0xffff0000);
+	nv_mthd(priv, 0x9097, 0x0d1c, 0xffff0000);
+	nv_mthd(priv, 0x9097, 0x0d24, 0xffff0000);
+	nv_mthd(priv, 0x9097, 0x0d2c, 0xffff0000);
+	nv_mthd(priv, 0x9097, 0x0d34, 0xffff0000);
+	nv_mthd(priv, 0x9097, 0x0d3c, 0xffff0000);
+	nv_mthd(priv, 0x9097, 0x0e00, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0e10, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0e20, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0e30, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0e40, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0e50, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0e60, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0e70, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0e80, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0e90, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0ea0, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0eb0, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0ec0, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0ed0, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0ee0, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0ef0, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0e04, 0xffff0000);
+	nv_mthd(priv, 0x9097, 0x0e14, 0xffff0000);
+	nv_mthd(priv, 0x9097, 0x0e24, 0xffff0000);
+	nv_mthd(priv, 0x9097, 0x0e34, 0xffff0000);
+	nv_mthd(priv, 0x9097, 0x0e44, 0xffff0000);
+	nv_mthd(priv, 0x9097, 0x0e54, 0xffff0000);
+	nv_mthd(priv, 0x9097, 0x0e64, 0xffff0000);
+	nv_mthd(priv, 0x9097, 0x0e74, 0xffff0000);
+	nv_mthd(priv, 0x9097, 0x0e84, 0xffff0000);
+	nv_mthd(priv, 0x9097, 0x0e94, 0xffff0000);
+	nv_mthd(priv, 0x9097, 0x0ea4, 0xffff0000);
+	nv_mthd(priv, 0x9097, 0x0eb4, 0xffff0000);
+	nv_mthd(priv, 0x9097, 0x0ec4, 0xffff0000);
+	nv_mthd(priv, 0x9097, 0x0ed4, 0xffff0000);
+	nv_mthd(priv, 0x9097, 0x0ee4, 0xffff0000);
+	nv_mthd(priv, 0x9097, 0x0ef4, 0xffff0000);
+	nv_mthd(priv, 0x9097, 0x0e08, 0xffff0000);
+	nv_mthd(priv, 0x9097, 0x0e18, 0xffff0000);
+	nv_mthd(priv, 0x9097, 0x0e28, 0xffff0000);
+	nv_mthd(priv, 0x9097, 0x0e38, 0xffff0000);
+	nv_mthd(priv, 0x9097, 0x0e48, 0xffff0000);
+	nv_mthd(priv, 0x9097, 0x0e58, 0xffff0000);
+	nv_mthd(priv, 0x9097, 0x0e68, 0xffff0000);
+	nv_mthd(priv, 0x9097, 0x0e78, 0xffff0000);
+	nv_mthd(priv, 0x9097, 0x0e88, 0xffff0000);
+	nv_mthd(priv, 0x9097, 0x0e98, 0xffff0000);
+	nv_mthd(priv, 0x9097, 0x0ea8, 0xffff0000);
+	nv_mthd(priv, 0x9097, 0x0eb8, 0xffff0000);
+	nv_mthd(priv, 0x9097, 0x0ec8, 0xffff0000);
+	nv_mthd(priv, 0x9097, 0x0ed8, 0xffff0000);
+	nv_mthd(priv, 0x9097, 0x0ee8, 0xffff0000);
+	nv_mthd(priv, 0x9097, 0x0ef8, 0xffff0000);
+	nv_mthd(priv, 0x9097, 0x0d40, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0d48, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0d50, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0d58, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0d44, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0d4c, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0d54, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0d5c, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1e00, 0x00000001);
+	nv_mthd(priv, 0x9097, 0x1e20, 0x00000001);
+	nv_mthd(priv, 0x9097, 0x1e40, 0x00000001);
+	nv_mthd(priv, 0x9097, 0x1e60, 0x00000001);
+	nv_mthd(priv, 0x9097, 0x1e80, 0x00000001);
+	nv_mthd(priv, 0x9097, 0x1ea0, 0x00000001);
+	nv_mthd(priv, 0x9097, 0x1ec0, 0x00000001);
+	nv_mthd(priv, 0x9097, 0x1ee0, 0x00000001);
+	nv_mthd(priv, 0x9097, 0x1e04, 0x00000001);
+	nv_mthd(priv, 0x9097, 0x1e24, 0x00000001);
+	nv_mthd(priv, 0x9097, 0x1e44, 0x00000001);
+	nv_mthd(priv, 0x9097, 0x1e64, 0x00000001);
+	nv_mthd(priv, 0x9097, 0x1e84, 0x00000001);
+	nv_mthd(priv, 0x9097, 0x1ea4, 0x00000001);
+	nv_mthd(priv, 0x9097, 0x1ec4, 0x00000001);
+	nv_mthd(priv, 0x9097, 0x1ee4, 0x00000001);
+	nv_mthd(priv, 0x9097, 0x1e08, 0x00000002);
+	nv_mthd(priv, 0x9097, 0x1e28, 0x00000002);
+	nv_mthd(priv, 0x9097, 0x1e48, 0x00000002);
+	nv_mthd(priv, 0x9097, 0x1e68, 0x00000002);
+	nv_mthd(priv, 0x9097, 0x1e88, 0x00000002);
+	nv_mthd(priv, 0x9097, 0x1ea8, 0x00000002);
+	nv_mthd(priv, 0x9097, 0x1ec8, 0x00000002);
+	nv_mthd(priv, 0x9097, 0x1ee8, 0x00000002);
+	nv_mthd(priv, 0x9097, 0x1e0c, 0x00000001);
+	nv_mthd(priv, 0x9097, 0x1e2c, 0x00000001);
+	nv_mthd(priv, 0x9097, 0x1e4c, 0x00000001);
+	nv_mthd(priv, 0x9097, 0x1e6c, 0x00000001);
+	nv_mthd(priv, 0x9097, 0x1e8c, 0x00000001);
+	nv_mthd(priv, 0x9097, 0x1eac, 0x00000001);
+	nv_mthd(priv, 0x9097, 0x1ecc, 0x00000001);
+	nv_mthd(priv, 0x9097, 0x1eec, 0x00000001);
+	nv_mthd(priv, 0x9097, 0x1e10, 0x00000001);
+	nv_mthd(priv, 0x9097, 0x1e30, 0x00000001);
+	nv_mthd(priv, 0x9097, 0x1e50, 0x00000001);
+	nv_mthd(priv, 0x9097, 0x1e70, 0x00000001);
+	nv_mthd(priv, 0x9097, 0x1e90, 0x00000001);
+	nv_mthd(priv, 0x9097, 0x1eb0, 0x00000001);
+	nv_mthd(priv, 0x9097, 0x1ed0, 0x00000001);
+	nv_mthd(priv, 0x9097, 0x1ef0, 0x00000001);
+	nv_mthd(priv, 0x9097, 0x1e14, 0x00000002);
+	nv_mthd(priv, 0x9097, 0x1e34, 0x00000002);
+	nv_mthd(priv, 0x9097, 0x1e54, 0x00000002);
+	nv_mthd(priv, 0x9097, 0x1e74, 0x00000002);
+	nv_mthd(priv, 0x9097, 0x1e94, 0x00000002);
+	nv_mthd(priv, 0x9097, 0x1eb4, 0x00000002);
+	nv_mthd(priv, 0x9097, 0x1ed4, 0x00000002);
+	nv_mthd(priv, 0x9097, 0x1ef4, 0x00000002);
+	nv_mthd(priv, 0x9097, 0x1e18, 0x00000001);
+	nv_mthd(priv, 0x9097, 0x1e38, 0x00000001);
+	nv_mthd(priv, 0x9097, 0x1e58, 0x00000001);
+	nv_mthd(priv, 0x9097, 0x1e78, 0x00000001);
+	nv_mthd(priv, 0x9097, 0x1e98, 0x00000001);
+	nv_mthd(priv, 0x9097, 0x1eb8, 0x00000001);
+	nv_mthd(priv, 0x9097, 0x1ed8, 0x00000001);
+	nv_mthd(priv, 0x9097, 0x1ef8, 0x00000001);
+	if (fermi == 0x9097) {
+		for (mthd = 0x3400; mthd <= 0x35fc; mthd += 4)
+			nv_mthd(priv, 0x9097, mthd, 0x00000000);
+	}
+	nv_mthd(priv, 0x9097, 0x030c, 0x00000001);
+	nv_mthd(priv, 0x9097, 0x1944, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1514, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0d68, 0x0000ffff);
+	nv_mthd(priv, 0x9097, 0x121c, 0x0fac6881);
+	nv_mthd(priv, 0x9097, 0x0fac, 0x00000001);
+	nv_mthd(priv, 0x9097, 0x1538, 0x00000001);
+	nv_mthd(priv, 0x9097, 0x0fe0, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0fe4, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0fe8, 0x00000014);
+	nv_mthd(priv, 0x9097, 0x0fec, 0x00000040);
+	nv_mthd(priv, 0x9097, 0x0ff0, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x179c, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1228, 0x00000400);
+	nv_mthd(priv, 0x9097, 0x122c, 0x00000300);
+	nv_mthd(priv, 0x9097, 0x1230, 0x00010001);
+	nv_mthd(priv, 0x9097, 0x07f8, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x15b4, 0x00000001);
+	nv_mthd(priv, 0x9097, 0x15cc, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1534, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0fb0, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x15d0, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x153c, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x16b4, 0x00000003);
+	nv_mthd(priv, 0x9097, 0x0fbc, 0x0000ffff);
+	nv_mthd(priv, 0x9097, 0x0fc0, 0x0000ffff);
+	nv_mthd(priv, 0x9097, 0x0fc4, 0x0000ffff);
+	nv_mthd(priv, 0x9097, 0x0fc8, 0x0000ffff);
+	nv_mthd(priv, 0x9097, 0x0df8, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0dfc, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1948, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1970, 0x00000001);
+	nv_mthd(priv, 0x9097, 0x161c, 0x000009f0);
+	nv_mthd(priv, 0x9097, 0x0dcc, 0x00000010);
+	nv_mthd(priv, 0x9097, 0x163c, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x15e4, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1160, 0x25e00040);
+	nv_mthd(priv, 0x9097, 0x1164, 0x25e00040);
+	nv_mthd(priv, 0x9097, 0x1168, 0x25e00040);
+	nv_mthd(priv, 0x9097, 0x116c, 0x25e00040);
+	nv_mthd(priv, 0x9097, 0x1170, 0x25e00040);
+	nv_mthd(priv, 0x9097, 0x1174, 0x25e00040);
+	nv_mthd(priv, 0x9097, 0x1178, 0x25e00040);
+	nv_mthd(priv, 0x9097, 0x117c, 0x25e00040);
+	nv_mthd(priv, 0x9097, 0x1180, 0x25e00040);
+	nv_mthd(priv, 0x9097, 0x1184, 0x25e00040);
+	nv_mthd(priv, 0x9097, 0x1188, 0x25e00040);
+	nv_mthd(priv, 0x9097, 0x118c, 0x25e00040);
+	nv_mthd(priv, 0x9097, 0x1190, 0x25e00040);
+	nv_mthd(priv, 0x9097, 0x1194, 0x25e00040);
+	nv_mthd(priv, 0x9097, 0x1198, 0x25e00040);
+	nv_mthd(priv, 0x9097, 0x119c, 0x25e00040);
+	nv_mthd(priv, 0x9097, 0x11a0, 0x25e00040);
+	nv_mthd(priv, 0x9097, 0x11a4, 0x25e00040);
+	nv_mthd(priv, 0x9097, 0x11a8, 0x25e00040);
+	nv_mthd(priv, 0x9097, 0x11ac, 0x25e00040);
+	nv_mthd(priv, 0x9097, 0x11b0, 0x25e00040);
+	nv_mthd(priv, 0x9097, 0x11b4, 0x25e00040);
+	nv_mthd(priv, 0x9097, 0x11b8, 0x25e00040);
+	nv_mthd(priv, 0x9097, 0x11bc, 0x25e00040);
+	nv_mthd(priv, 0x9097, 0x11c0, 0x25e00040);
+	nv_mthd(priv, 0x9097, 0x11c4, 0x25e00040);
+	nv_mthd(priv, 0x9097, 0x11c8, 0x25e00040);
+	nv_mthd(priv, 0x9097, 0x11cc, 0x25e00040);
+	nv_mthd(priv, 0x9097, 0x11d0, 0x25e00040);
+	nv_mthd(priv, 0x9097, 0x11d4, 0x25e00040);
+	nv_mthd(priv, 0x9097, 0x11d8, 0x25e00040);
+	nv_mthd(priv, 0x9097, 0x11dc, 0x25e00040);
+	nv_mthd(priv, 0x9097, 0x1880, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1884, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1888, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x188c, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1890, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1894, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1898, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x189c, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x18a0, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x18a4, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x18a8, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x18ac, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x18b0, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x18b4, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x18b8, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x18bc, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x18c0, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x18c4, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x18c8, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x18cc, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x18d0, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x18d4, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x18d8, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x18dc, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x18e0, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x18e4, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x18e8, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x18ec, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x18f0, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x18f4, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x18f8, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x18fc, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0f84, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0f88, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x17c8, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x17cc, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x17d0, 0x000000ff);
+	nv_mthd(priv, 0x9097, 0x17d4, 0xffffffff);
+	nv_mthd(priv, 0x9097, 0x17d8, 0x00000002);
+	nv_mthd(priv, 0x9097, 0x17dc, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x15f4, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x15f8, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1434, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1438, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0d74, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0dec, 0x00000001);
+	nv_mthd(priv, 0x9097, 0x13a4, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1318, 0x00000001);
+	nv_mthd(priv, 0x9097, 0x1644, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0748, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0de8, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1648, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x12a4, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1120, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1124, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1128, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x112c, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1118, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x164c, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1658, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1910, 0x00000290);
+	nv_mthd(priv, 0x9097, 0x1518, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x165c, 0x00000001);
+	nv_mthd(priv, 0x9097, 0x1520, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1604, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1570, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x13b0, 0x3f800000);
+	nv_mthd(priv, 0x9097, 0x13b4, 0x3f800000);
+	nv_mthd(priv, 0x9097, 0x020c, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1670, 0x30201000);
+	nv_mthd(priv, 0x9097, 0x1674, 0x70605040);
+	nv_mthd(priv, 0x9097, 0x1678, 0xb8a89888);
+	nv_mthd(priv, 0x9097, 0x167c, 0xf8e8d8c8);
+	nv_mthd(priv, 0x9097, 0x166c, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1680, 0x00ffff00);
+	nv_mthd(priv, 0x9097, 0x12d0, 0x00000003);
+	nv_mthd(priv, 0x9097, 0x12d4, 0x00000002);
+	nv_mthd(priv, 0x9097, 0x1684, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1688, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0dac, 0x00001b02);
+	nv_mthd(priv, 0x9097, 0x0db0, 0x00001b02);
+	nv_mthd(priv, 0x9097, 0x0db4, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x168c, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x15bc, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x156c, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x187c, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1110, 0x00000001);
+	nv_mthd(priv, 0x9097, 0x0dc0, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0dc4, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0dc8, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1234, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1690, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x12ac, 0x00000001);
+	nv_mthd(priv, 0x9097, 0x02c4, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0790, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0794, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0798, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x079c, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x07a0, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x077c, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1000, 0x00000010);
+	nv_mthd(priv, 0x9097, 0x10fc, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1290, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0218, 0x00000010);
+	nv_mthd(priv, 0x9097, 0x12d8, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x12dc, 0x00000010);
+	nv_mthd(priv, 0x9097, 0x0d94, 0x00000001);
+	nv_mthd(priv, 0x9097, 0x155c, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1560, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1564, 0x00001fff);
+	nv_mthd(priv, 0x9097, 0x1574, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1578, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x157c, 0x003fffff);
+	nv_mthd(priv, 0x9097, 0x1354, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1664, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1610, 0x00000012);
+	nv_mthd(priv, 0x9097, 0x1608, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x160c, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x162c, 0x00000003);
+	nv_mthd(priv, 0x9097, 0x0210, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0320, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0324, 0x3f800000);
+	nv_mthd(priv, 0x9097, 0x0328, 0x3f800000);
+	nv_mthd(priv, 0x9097, 0x032c, 0x3f800000);
+	nv_mthd(priv, 0x9097, 0x0330, 0x3f800000);
+	nv_mthd(priv, 0x9097, 0x0334, 0x3f800000);
+	nv_mthd(priv, 0x9097, 0x0338, 0x3f800000);
+	nv_mthd(priv, 0x9097, 0x0750, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0760, 0x39291909);
+	nv_mthd(priv, 0x9097, 0x0764, 0x79695949);
+	nv_mthd(priv, 0x9097, 0x0768, 0xb9a99989);
+	nv_mthd(priv, 0x9097, 0x076c, 0xf9e9d9c9);
+	nv_mthd(priv, 0x9097, 0x0770, 0x30201000);
+	nv_mthd(priv, 0x9097, 0x0774, 0x70605040);
+	nv_mthd(priv, 0x9097, 0x0778, 0x00009080);
+	nv_mthd(priv, 0x9097, 0x0780, 0x39291909);
+	nv_mthd(priv, 0x9097, 0x0784, 0x79695949);
+	nv_mthd(priv, 0x9097, 0x0788, 0xb9a99989);
+	nv_mthd(priv, 0x9097, 0x078c, 0xf9e9d9c9);
+	nv_mthd(priv, 0x9097, 0x07d0, 0x30201000);
+	nv_mthd(priv, 0x9097, 0x07d4, 0x70605040);
+	nv_mthd(priv, 0x9097, 0x07d8, 0x00009080);
+	nv_mthd(priv, 0x9097, 0x037c, 0x00000001);
+	nv_mthd(priv, 0x9097, 0x0740, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0744, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x2600, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1918, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x191c, 0x00000900);
+	nv_mthd(priv, 0x9097, 0x1920, 0x00000405);
+	nv_mthd(priv, 0x9097, 0x1308, 0x00000001);
+	nv_mthd(priv, 0x9097, 0x1924, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x13ac, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x192c, 0x00000001);
+	nv_mthd(priv, 0x9097, 0x193c, 0x00002c1c);
+	nv_mthd(priv, 0x9097, 0x0d7c, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0f8c, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x02c0, 0x00000001);
+	nv_mthd(priv, 0x9097, 0x1510, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1940, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0ff4, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0ff8, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x194c, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1950, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1968, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1590, 0x0000003f);
+	nv_mthd(priv, 0x9097, 0x07e8, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x07ec, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x07f0, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x07f4, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x196c, 0x00000011);
+	nv_mthd(priv, 0x9097, 0x197c, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0fcc, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0fd0, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x02d8, 0x00000040);
+	nv_mthd(priv, 0x9097, 0x1980, 0x00000080);
+	nv_mthd(priv, 0x9097, 0x1504, 0x00000080);
+	nv_mthd(priv, 0x9097, 0x1984, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0300, 0x00000001);
+	nv_mthd(priv, 0x9097, 0x13a8, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x12ec, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1310, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1314, 0x00000001);
+	nv_mthd(priv, 0x9097, 0x1380, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1384, 0x00000001);
+	nv_mthd(priv, 0x9097, 0x1388, 0x00000001);
+	nv_mthd(priv, 0x9097, 0x138c, 0x00000001);
+	nv_mthd(priv, 0x9097, 0x1390, 0x00000001);
+	nv_mthd(priv, 0x9097, 0x1394, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x139c, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1398, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1594, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1598, 0x00000001);
+	nv_mthd(priv, 0x9097, 0x159c, 0x00000001);
+	nv_mthd(priv, 0x9097, 0x15a0, 0x00000001);
+	nv_mthd(priv, 0x9097, 0x15a4, 0x00000001);
+	nv_mthd(priv, 0x9097, 0x0f54, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0f58, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0f5c, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x19bc, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0f9c, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0fa0, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x12cc, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x12e8, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x130c, 0x00000001);
+	nv_mthd(priv, 0x9097, 0x1360, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1364, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1368, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x136c, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1370, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1374, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1378, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x137c, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x133c, 0x00000001);
+	nv_mthd(priv, 0x9097, 0x1340, 0x00000001);
+	nv_mthd(priv, 0x9097, 0x1344, 0x00000002);
+	nv_mthd(priv, 0x9097, 0x1348, 0x00000001);
+	nv_mthd(priv, 0x9097, 0x134c, 0x00000001);
+	nv_mthd(priv, 0x9097, 0x1350, 0x00000002);
+	nv_mthd(priv, 0x9097, 0x1358, 0x00000001);
+	nv_mthd(priv, 0x9097, 0x12e4, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x131c, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1320, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1324, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1328, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x19c0, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1140, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x19c4, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x19c8, 0x00001500);
+	nv_mthd(priv, 0x9097, 0x135c, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0f90, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x19e0, 0x00000001);
+	nv_mthd(priv, 0x9097, 0x19e4, 0x00000001);
+	nv_mthd(priv, 0x9097, 0x19e8, 0x00000001);
+	nv_mthd(priv, 0x9097, 0x19ec, 0x00000001);
+	nv_mthd(priv, 0x9097, 0x19f0, 0x00000001);
+	nv_mthd(priv, 0x9097, 0x19f4, 0x00000001);
+	nv_mthd(priv, 0x9097, 0x19f8, 0x00000001);
+	nv_mthd(priv, 0x9097, 0x19fc, 0x00000001);
+	nv_mthd(priv, 0x9097, 0x19cc, 0x00000001);
+	nv_mthd(priv, 0x9097, 0x15b8, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1a00, 0x00001111);
+	nv_mthd(priv, 0x9097, 0x1a04, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1a08, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1a0c, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1a10, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1a14, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1a18, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1a1c, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0d6c, 0xffff0000);
+	nv_mthd(priv, 0x9097, 0x0d70, 0xffff0000);
+	nv_mthd(priv, 0x9097, 0x10f8, 0x00001010);
+	nv_mthd(priv, 0x9097, 0x0d80, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0d84, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0d88, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0d8c, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0d90, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0da0, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1508, 0x80000000);
+	nv_mthd(priv, 0x9097, 0x150c, 0x40000000);
+	nv_mthd(priv, 0x9097, 0x1668, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0318, 0x00000008);
+	nv_mthd(priv, 0x9097, 0x031c, 0x00000008);
+	nv_mthd(priv, 0x9097, 0x0d9c, 0x00000001);
+	nv_mthd(priv, 0x9097, 0x07dc, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x074c, 0x00000055);
+	nv_mthd(priv, 0x9097, 0x1420, 0x00000003);
+	nv_mthd(priv, 0x9097, 0x17bc, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x17c0, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x17c4, 0x00000001);
+	nv_mthd(priv, 0x9097, 0x1008, 0x00000008);
+	nv_mthd(priv, 0x9097, 0x100c, 0x00000040);
+	nv_mthd(priv, 0x9097, 0x1010, 0x0000012c);
+	nv_mthd(priv, 0x9097, 0x0d60, 0x00000040);
+	nv_mthd(priv, 0x9097, 0x075c, 0x00000003);
+	nv_mthd(priv, 0x9097, 0x1018, 0x00000020);
+	nv_mthd(priv, 0x9097, 0x101c, 0x00000001);
+	nv_mthd(priv, 0x9097, 0x1020, 0x00000020);
+	nv_mthd(priv, 0x9097, 0x1024, 0x00000001);
+	nv_mthd(priv, 0x9097, 0x1444, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1448, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x144c, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0360, 0x20164010);
+	nv_mthd(priv, 0x9097, 0x0364, 0x00000020);
+	nv_mthd(priv, 0x9097, 0x0368, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0de4, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0204, 0x00000006);
+	nv_mthd(priv, 0x9097, 0x0208, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x02cc, 0x003fffff);
+	nv_mthd(priv, 0x9097, 0x02d0, 0x00000c48);
+	nv_mthd(priv, 0x9097, 0x1220, 0x00000005);
+	nv_mthd(priv, 0x9097, 0x0fdc, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0f98, 0x00300008);
+	nv_mthd(priv, 0x9097, 0x1284, 0x04000080);
+	nv_mthd(priv, 0x9097, 0x1450, 0x00300008);
+	nv_mthd(priv, 0x9097, 0x1454, 0x04000080);
+	nv_mthd(priv, 0x9097, 0x0214, 0x00000000);
+	/* in trace, right after 0x90c0, not here */
+	nv_mthd(priv, 0x9097, 0x3410, 0x80002006);
+}
+
+static void
+nvc0_grctx_generate_9197(struct nvc0_graph_priv *priv)
+{
+	u32 fermi = nvc0_graph_class(priv);
+	u32 mthd;
+
+	if (fermi == 0x9197) {
+		for (mthd = 0x3400; mthd <= 0x35fc; mthd += 4)
+			nv_mthd(priv, 0x9197, mthd, 0x00000000);
+	}
+	nv_mthd(priv, 0x9197, 0x02e4, 0x0000b001);
+}
+
+static void
+nvc0_grctx_generate_9297(struct nvc0_graph_priv *priv)
+{
+	u32 fermi = nvc0_graph_class(priv);
+	u32 mthd;
+
+	if (fermi == 0x9297) {
+		for (mthd = 0x3400; mthd <= 0x35fc; mthd += 4)
+			nv_mthd(priv, 0x9297, mthd, 0x00000000);
+	}
+	nv_mthd(priv, 0x9297, 0x036c, 0x00000000);
+	nv_mthd(priv, 0x9297, 0x0370, 0x00000000);
+	nv_mthd(priv, 0x9297, 0x07a4, 0x00000000);
+	nv_mthd(priv, 0x9297, 0x07a8, 0x00000000);
+	nv_mthd(priv, 0x9297, 0x0374, 0x00000000);
+	nv_mthd(priv, 0x9297, 0x0378, 0x00000020);
+}
+
+static void
+nvc0_grctx_generate_902d(struct nvc0_graph_priv *priv)
+{
+	nv_mthd(priv, 0x902d, 0x0200, 0x000000cf);
+	nv_mthd(priv, 0x902d, 0x0204, 0x00000001);
+	nv_mthd(priv, 0x902d, 0x0208, 0x00000020);
+	nv_mthd(priv, 0x902d, 0x020c, 0x00000001);
+	nv_mthd(priv, 0x902d, 0x0210, 0x00000000);
+	nv_mthd(priv, 0x902d, 0x0214, 0x00000080);
+	nv_mthd(priv, 0x902d, 0x0218, 0x00000100);
+	nv_mthd(priv, 0x902d, 0x021c, 0x00000100);
+	nv_mthd(priv, 0x902d, 0x0220, 0x00000000);
+	nv_mthd(priv, 0x902d, 0x0224, 0x00000000);
+	nv_mthd(priv, 0x902d, 0x0230, 0x000000cf);
+	nv_mthd(priv, 0x902d, 0x0234, 0x00000001);
+	nv_mthd(priv, 0x902d, 0x0238, 0x00000020);
+	nv_mthd(priv, 0x902d, 0x023c, 0x00000001);
+	nv_mthd(priv, 0x902d, 0x0244, 0x00000080);
+	nv_mthd(priv, 0x902d, 0x0248, 0x00000100);
+	nv_mthd(priv, 0x902d, 0x024c, 0x00000100);
+}
+
+static void
+nvc0_grctx_generate_9039(struct nvc0_graph_priv *priv)
+{
+	nv_mthd(priv, 0x9039, 0x030c, 0x00000000);
+	nv_mthd(priv, 0x9039, 0x0310, 0x00000000);
+	nv_mthd(priv, 0x9039, 0x0314, 0x00000000);
+	nv_mthd(priv, 0x9039, 0x0320, 0x00000000);
+	nv_mthd(priv, 0x9039, 0x0238, 0x00000000);
+	nv_mthd(priv, 0x9039, 0x023c, 0x00000000);
+	nv_mthd(priv, 0x9039, 0x0318, 0x00000000);
+	nv_mthd(priv, 0x9039, 0x031c, 0x00000000);
+}
+
+static void
+nvc0_grctx_generate_90c0(struct nvc0_graph_priv *priv)
+{
+	int i;
+
+	for (i = 0; nv_device(priv)->chipset == 0xd9 && i < 4; i++) {
+		nv_mthd(priv, 0x90c0, 0x2700 + (i * 0x40), 0x00000000);
+		nv_mthd(priv, 0x90c0, 0x2720 + (i * 0x40), 0x00000000);
+		nv_mthd(priv, 0x90c0, 0x2704 + (i * 0x40), 0x00000000);
+		nv_mthd(priv, 0x90c0, 0x2724 + (i * 0x40), 0x00000000);
+		nv_mthd(priv, 0x90c0, 0x2708 + (i * 0x40), 0x00000000);
+		nv_mthd(priv, 0x90c0, 0x2728 + (i * 0x40), 0x00000000);
+	}
+	nv_mthd(priv, 0x90c0, 0x270c, 0x00000000);
+	nv_mthd(priv, 0x90c0, 0x272c, 0x00000000);
+	nv_mthd(priv, 0x90c0, 0x274c, 0x00000000);
+	nv_mthd(priv, 0x90c0, 0x276c, 0x00000000);
+	nv_mthd(priv, 0x90c0, 0x278c, 0x00000000);
+	nv_mthd(priv, 0x90c0, 0x27ac, 0x00000000);
+	nv_mthd(priv, 0x90c0, 0x27cc, 0x00000000);
+	nv_mthd(priv, 0x90c0, 0x27ec, 0x00000000);
+	for (i = 0; nv_device(priv)->chipset == 0xd9 && i < 4; i++) {
+		nv_mthd(priv, 0x90c0, 0x2710 + (i * 0x40), 0x00014000);
+		nv_mthd(priv, 0x90c0, 0x2730 + (i * 0x40), 0x00014000);
+		nv_mthd(priv, 0x90c0, 0x2714 + (i * 0x40), 0x00000040);
+		nv_mthd(priv, 0x90c0, 0x2734 + (i * 0x40), 0x00000040);
+	}
+	nv_mthd(priv, 0x90c0, 0x030c, 0x00000001);
+	nv_mthd(priv, 0x90c0, 0x1944, 0x00000000);
+	nv_mthd(priv, 0x90c0, 0x0758, 0x00000100);
+	nv_mthd(priv, 0x90c0, 0x02c4, 0x00000000);
+	nv_mthd(priv, 0x90c0, 0x0790, 0x00000000);
+	nv_mthd(priv, 0x90c0, 0x0794, 0x00000000);
+	nv_mthd(priv, 0x90c0, 0x0798, 0x00000000);
+	nv_mthd(priv, 0x90c0, 0x079c, 0x00000000);
+	nv_mthd(priv, 0x90c0, 0x07a0, 0x00000000);
+	nv_mthd(priv, 0x90c0, 0x077c, 0x00000000);
+	nv_mthd(priv, 0x90c0, 0x0204, 0x00000000);
+	nv_mthd(priv, 0x90c0, 0x0208, 0x00000000);
+	nv_mthd(priv, 0x90c0, 0x020c, 0x00000000);
+	nv_mthd(priv, 0x90c0, 0x0214, 0x00000000);
+	nv_mthd(priv, 0x90c0, 0x024c, 0x00000000);
+	nv_mthd(priv, 0x90c0, 0x0d94, 0x00000001);
+	nv_mthd(priv, 0x90c0, 0x1608, 0x00000000);
+	nv_mthd(priv, 0x90c0, 0x160c, 0x00000000);
+	nv_mthd(priv, 0x90c0, 0x1664, 0x00000000);
+}
+
+static void
+nvc0_grctx_generate_dispatch(struct nvc0_graph_priv *priv)
+{
+	int i;
+
+	nv_wr32(priv, 0x404004, 0x00000000);
+	nv_wr32(priv, 0x404008, 0x00000000);
+	nv_wr32(priv, 0x40400c, 0x00000000);
+	nv_wr32(priv, 0x404010, 0x00000000);
+	nv_wr32(priv, 0x404014, 0x00000000);
+	nv_wr32(priv, 0x404018, 0x00000000);
+	nv_wr32(priv, 0x40401c, 0x00000000);
+	nv_wr32(priv, 0x404020, 0x00000000);
+	nv_wr32(priv, 0x404024, 0x00000000);
+	nv_wr32(priv, 0x404028, 0x00000000);
+	nv_wr32(priv, 0x40402c, 0x00000000);
+	nv_wr32(priv, 0x404044, 0x00000000);
+	nv_wr32(priv, 0x404094, 0x00000000);
+	nv_wr32(priv, 0x404098, 0x00000000);
+	nv_wr32(priv, 0x40409c, 0x00000000);
+	nv_wr32(priv, 0x4040a0, 0x00000000);
+	nv_wr32(priv, 0x4040a4, 0x00000000);
+	nv_wr32(priv, 0x4040a8, 0x00000000);
+	nv_wr32(priv, 0x4040ac, 0x00000000);
+	nv_wr32(priv, 0x4040b0, 0x00000000);
+	nv_wr32(priv, 0x4040b4, 0x00000000);
+	nv_wr32(priv, 0x4040b8, 0x00000000);
+	nv_wr32(priv, 0x4040bc, 0x00000000);
+	nv_wr32(priv, 0x4040c0, 0x00000000);
+	nv_wr32(priv, 0x4040c4, 0x00000000);
+	nv_wr32(priv, 0x4040c8, 0xf0000087);
+	nv_wr32(priv, 0x4040d4, 0x00000000);
+	nv_wr32(priv, 0x4040d8, 0x00000000);
+	nv_wr32(priv, 0x4040dc, 0x00000000);
+	nv_wr32(priv, 0x4040e0, 0x00000000);
+	nv_wr32(priv, 0x4040e4, 0x00000000);
+	nv_wr32(priv, 0x4040e8, 0x00001000);
+	nv_wr32(priv, 0x4040f8, 0x00000000);
+	nv_wr32(priv, 0x404130, 0x00000000);
+	nv_wr32(priv, 0x404134, 0x00000000);
+	nv_wr32(priv, 0x404138, 0x20000040);
+	nv_wr32(priv, 0x404150, 0x0000002e);
+	nv_wr32(priv, 0x404154, 0x00000400);
+	nv_wr32(priv, 0x404158, 0x00000200);
+	nv_wr32(priv, 0x404164, 0x00000055);
+	nv_wr32(priv, 0x404168, 0x00000000);
+	nv_wr32(priv, 0x404174, 0x00000000);
+	nv_wr32(priv, 0x404178, 0x00000000);
+	nv_wr32(priv, 0x40417c, 0x00000000);
+	for (i = 0; i < 8; i++)
+		nv_wr32(priv, 0x404200 + (i * 4), 0x00000000); /* subc */
+}
+
+static void
+nvc0_grctx_generate_macro(struct nvc0_graph_priv *priv)
+{
+	nv_wr32(priv, 0x404404, 0x00000000);
+	nv_wr32(priv, 0x404408, 0x00000000);
+	nv_wr32(priv, 0x40440c, 0x00000000);
+	nv_wr32(priv, 0x404410, 0x00000000);
+	nv_wr32(priv, 0x404414, 0x00000000);
+	nv_wr32(priv, 0x404418, 0x00000000);
+	nv_wr32(priv, 0x40441c, 0x00000000);
+	nv_wr32(priv, 0x404420, 0x00000000);
+	nv_wr32(priv, 0x404424, 0x00000000);
+	nv_wr32(priv, 0x404428, 0x00000000);
+	nv_wr32(priv, 0x40442c, 0x00000000);
+	nv_wr32(priv, 0x404430, 0x00000000);
+	nv_wr32(priv, 0x404434, 0x00000000);
+	nv_wr32(priv, 0x404438, 0x00000000);
+	nv_wr32(priv, 0x404460, 0x00000000);
+	nv_wr32(priv, 0x404464, 0x00000000);
+	nv_wr32(priv, 0x404468, 0x00ffffff);
+	nv_wr32(priv, 0x40446c, 0x00000000);
+	nv_wr32(priv, 0x404480, 0x00000001);
+	nv_wr32(priv, 0x404498, 0x00000001);
+}
+
+static void
+nvc0_grctx_generate_m2mf(struct nvc0_graph_priv *priv)
+{
+	nv_wr32(priv, 0x404604, 0x00000015);
+	nv_wr32(priv, 0x404608, 0x00000000);
+	nv_wr32(priv, 0x40460c, 0x00002e00);
+	nv_wr32(priv, 0x404610, 0x00000100);
+	nv_wr32(priv, 0x404618, 0x00000000);
+	nv_wr32(priv, 0x40461c, 0x00000000);
+	nv_wr32(priv, 0x404620, 0x00000000);
+	nv_wr32(priv, 0x404624, 0x00000000);
+	nv_wr32(priv, 0x404628, 0x00000000);
+	nv_wr32(priv, 0x40462c, 0x00000000);
+	nv_wr32(priv, 0x404630, 0x00000000);
+	nv_wr32(priv, 0x404634, 0x00000000);
+	nv_wr32(priv, 0x404638, 0x00000004);
+	nv_wr32(priv, 0x40463c, 0x00000000);
+	nv_wr32(priv, 0x404640, 0x00000000);
+	nv_wr32(priv, 0x404644, 0x00000000);
+	nv_wr32(priv, 0x404648, 0x00000000);
+	nv_wr32(priv, 0x40464c, 0x00000000);
+	nv_wr32(priv, 0x404650, 0x00000000);
+	nv_wr32(priv, 0x404654, 0x00000000);
+	nv_wr32(priv, 0x404658, 0x00000000);
+	nv_wr32(priv, 0x40465c, 0x007f0100);
+	nv_wr32(priv, 0x404660, 0x00000000);
+	nv_wr32(priv, 0x404664, 0x00000000);
+	nv_wr32(priv, 0x404668, 0x00000000);
+	nv_wr32(priv, 0x40466c, 0x00000000);
+	nv_wr32(priv, 0x404670, 0x00000000);
+	nv_wr32(priv, 0x404674, 0x00000000);
+	nv_wr32(priv, 0x404678, 0x00000000);
+	nv_wr32(priv, 0x40467c, 0x00000002);
+	nv_wr32(priv, 0x404680, 0x00000000);
+	nv_wr32(priv, 0x404684, 0x00000000);
+	nv_wr32(priv, 0x404688, 0x00000000);
+	nv_wr32(priv, 0x40468c, 0x00000000);
+	nv_wr32(priv, 0x404690, 0x00000000);
+	nv_wr32(priv, 0x404694, 0x00000000);
+	nv_wr32(priv, 0x404698, 0x00000000);
+	nv_wr32(priv, 0x40469c, 0x00000000);
+	nv_wr32(priv, 0x4046a0, 0x007f0080);
+	nv_wr32(priv, 0x4046a4, 0x00000000);
+	nv_wr32(priv, 0x4046a8, 0x00000000);
+	nv_wr32(priv, 0x4046ac, 0x00000000);
+	nv_wr32(priv, 0x4046b0, 0x00000000);
+	nv_wr32(priv, 0x4046b4, 0x00000000);
+	nv_wr32(priv, 0x4046b8, 0x00000000);
+	nv_wr32(priv, 0x4046bc, 0x00000000);
+	nv_wr32(priv, 0x4046c0, 0x00000000);
+	nv_wr32(priv, 0x4046c4, 0x00000000);
+	nv_wr32(priv, 0x4046c8, 0x00000000);
+	nv_wr32(priv, 0x4046cc, 0x00000000);
+	nv_wr32(priv, 0x4046d0, 0x00000000);
+	nv_wr32(priv, 0x4046d4, 0x00000000);
+	nv_wr32(priv, 0x4046d8, 0x00000000);
+	nv_wr32(priv, 0x4046dc, 0x00000000);
+	nv_wr32(priv, 0x4046e0, 0x00000000);
+	nv_wr32(priv, 0x4046e4, 0x00000000);
+	nv_wr32(priv, 0x4046e8, 0x00000000);
+	nv_wr32(priv, 0x4046f0, 0x00000000);
+	nv_wr32(priv, 0x4046f4, 0x00000000);
+}
+
+static void
+nvc0_grctx_generate_unk47xx(struct nvc0_graph_priv *priv)
+{
+	nv_wr32(priv, 0x404700, 0x00000000);
+	nv_wr32(priv, 0x404704, 0x00000000);
+	nv_wr32(priv, 0x404708, 0x00000000);
+	nv_wr32(priv, 0x40470c, 0x00000000);
+	nv_wr32(priv, 0x404710, 0x00000000);
+	nv_wr32(priv, 0x404714, 0x00000000);
+	nv_wr32(priv, 0x404718, 0x00000000);
+	nv_wr32(priv, 0x40471c, 0x00000000);
+	nv_wr32(priv, 0x404720, 0x00000000);
+	nv_wr32(priv, 0x404724, 0x00000000);
+	nv_wr32(priv, 0x404728, 0x00000000);
+	nv_wr32(priv, 0x40472c, 0x00000000);
+	nv_wr32(priv, 0x404730, 0x00000000);
+	nv_wr32(priv, 0x404734, 0x00000100);
+	nv_wr32(priv, 0x404738, 0x00000000);
+	nv_wr32(priv, 0x40473c, 0x00000000);
+	nv_wr32(priv, 0x404740, 0x00000000);
+	nv_wr32(priv, 0x404744, 0x00000000);
+	nv_wr32(priv, 0x404748, 0x00000000);
+	nv_wr32(priv, 0x40474c, 0x00000000);
+	nv_wr32(priv, 0x404750, 0x00000000);
+	nv_wr32(priv, 0x404754, 0x00000000);
+}
+
+static void
+nvc0_grctx_generate_shaders(struct nvc0_graph_priv *priv)
+{
+
+	if (nv_device(priv)->chipset == 0xd9) {
+		nv_wr32(priv, 0x405800, 0x0f8000bf);
+		nv_wr32(priv, 0x405830, 0x02180218);
+		nv_wr32(priv, 0x405834, 0x08000000);
+	} else
+	if (nv_device(priv)->chipset == 0xc1) {
+		nv_wr32(priv, 0x405800, 0x0f8000bf);
+		nv_wr32(priv, 0x405830, 0x02180218);
+		nv_wr32(priv, 0x405834, 0x00000000);
+	} else {
+		nv_wr32(priv, 0x405800, 0x078000bf);
+		nv_wr32(priv, 0x405830, 0x02180000);
+		nv_wr32(priv, 0x405834, 0x00000000);
+	}
+	nv_wr32(priv, 0x405838, 0x00000000);
+	nv_wr32(priv, 0x405854, 0x00000000);
+	nv_wr32(priv, 0x405870, 0x00000001);
+	nv_wr32(priv, 0x405874, 0x00000001);
+	nv_wr32(priv, 0x405878, 0x00000001);
+	nv_wr32(priv, 0x40587c, 0x00000001);
+	nv_wr32(priv, 0x405a00, 0x00000000);
+	nv_wr32(priv, 0x405a04, 0x00000000);
+	nv_wr32(priv, 0x405a18, 0x00000000);
+}
+
+static void
+nvc0_grctx_generate_unk60xx(struct nvc0_graph_priv *priv)
+{
+	nv_wr32(priv, 0x406020, 0x000103c1);
+	nv_wr32(priv, 0x406028, 0x00000001);
+	nv_wr32(priv, 0x40602c, 0x00000001);
+	nv_wr32(priv, 0x406030, 0x00000001);
+	nv_wr32(priv, 0x406034, 0x00000001);
+}
+
+static void
+nvc0_grctx_generate_unk64xx(struct nvc0_graph_priv *priv)
+{
+
+	nv_wr32(priv, 0x4064a8, 0x00000000);
+	nv_wr32(priv, 0x4064ac, 0x00003fff);
+	nv_wr32(priv, 0x4064b4, 0x00000000);
+	nv_wr32(priv, 0x4064b8, 0x00000000);
+	if (nv_device(priv)->chipset == 0xd9)
+		nv_wr32(priv, 0x4064bc, 0x00000000);
+	if (nv_device(priv)->chipset == 0xc1 ||
+	    nv_device(priv)->chipset == 0xd9) {
+		nv_wr32(priv, 0x4064c0, 0x80140078);
+		nv_wr32(priv, 0x4064c4, 0x0086ffff);
+	}
+}
+
+static void
+nvc0_grctx_generate_tpbus(struct nvc0_graph_priv *priv)
+{
+	nv_wr32(priv, 0x407804, 0x00000023);
+	nv_wr32(priv, 0x40780c, 0x0a418820);
+	nv_wr32(priv, 0x407810, 0x062080e6);
+	nv_wr32(priv, 0x407814, 0x020398a4);
+	nv_wr32(priv, 0x407818, 0x0e629062);
+	nv_wr32(priv, 0x40781c, 0x0a418820);
+	nv_wr32(priv, 0x407820, 0x000000e6);
+	nv_wr32(priv, 0x4078bc, 0x00000103);
+}
+
+static void
+nvc0_grctx_generate_ccache(struct nvc0_graph_priv *priv)
+{
+	nv_wr32(priv, 0x408000, 0x00000000);
+	nv_wr32(priv, 0x408004, 0x00000000);
+	nv_wr32(priv, 0x408008, 0x00000018);
+	nv_wr32(priv, 0x40800c, 0x00000000);
+	nv_wr32(priv, 0x408010, 0x00000000);
+	nv_wr32(priv, 0x408014, 0x00000069);
+	nv_wr32(priv, 0x408018, 0xe100e100);
+	nv_wr32(priv, 0x408064, 0x00000000);
+}
+
+static void
+nvc0_grctx_generate_rop(struct nvc0_graph_priv *priv)
+{
+	int chipset = nv_device(priv)->chipset;
+
+	/* ROPC_BROADCAST */
+	nv_wr32(priv, 0x408800, 0x02802a3c);
+	nv_wr32(priv, 0x408804, 0x00000040);
+	if (chipset == 0xd9) {
+		nv_wr32(priv, 0x408808, 0x1043e005);
+		nv_wr32(priv, 0x408900, 0x3080b801);
+		nv_wr32(priv, 0x408904, 0x1043e005);
+		nv_wr32(priv, 0x408908, 0x00c8102f);
+	} else
+	if (chipset == 0xc1) {
+		nv_wr32(priv, 0x408808, 0x1003e005);
+		nv_wr32(priv, 0x408900, 0x3080b801);
+		nv_wr32(priv, 0x408904, 0x62000001);
+		nv_wr32(priv, 0x408908, 0x00c80929);
+	} else {
+		nv_wr32(priv, 0x408808, 0x0003e00d);
+		nv_wr32(priv, 0x408900, 0x3080b801);
+		nv_wr32(priv, 0x408904, 0x02000001);
+		nv_wr32(priv, 0x408908, 0x00c80929);
+	}
+	nv_wr32(priv, 0x40890c, 0x00000000);
+	nv_wr32(priv, 0x408980, 0x0000011d);
+}
+
+static void
+nvc0_grctx_generate_gpc(struct nvc0_graph_priv *priv)
+{
+	int chipset = nv_device(priv)->chipset;
+	int i;
+
+	/* GPC_BROADCAST */
+	nv_wr32(priv, 0x418380, 0x00000016);
+	nv_wr32(priv, 0x418400, 0x38004e00);
+	nv_wr32(priv, 0x418404, 0x71e0ffff);
+	nv_wr32(priv, 0x418408, 0x00000000);
+	nv_wr32(priv, 0x41840c, 0x00001008);
+	nv_wr32(priv, 0x418410, 0x0fff0fff);
+	nv_wr32(priv, 0x418414, chipset != 0xd9 ? 0x00200fff : 0x02200fff);
+	nv_wr32(priv, 0x418450, 0x00000000);
+	nv_wr32(priv, 0x418454, 0x00000000);
+	nv_wr32(priv, 0x418458, 0x00000000);
+	nv_wr32(priv, 0x41845c, 0x00000000);
+	nv_wr32(priv, 0x418460, 0x00000000);
+	nv_wr32(priv, 0x418464, 0x00000000);
+	nv_wr32(priv, 0x418468, 0x00000001);
+	nv_wr32(priv, 0x41846c, 0x00000000);
+	nv_wr32(priv, 0x418470, 0x00000000);
+	nv_wr32(priv, 0x418600, 0x0000001f);
+	nv_wr32(priv, 0x418684, 0x0000000f);
+	nv_wr32(priv, 0x418700, 0x00000002);
+	nv_wr32(priv, 0x418704, 0x00000080);
+	nv_wr32(priv, 0x418708, 0x00000000);
+	nv_wr32(priv, 0x41870c, chipset != 0xd9 ? 0x07c80000 : 0x00000000);
+	nv_wr32(priv, 0x418710, 0x00000000);
+	nv_wr32(priv, 0x418800, chipset != 0xd9 ? 0x0006860a : 0x7006860a);
+	nv_wr32(priv, 0x418808, 0x00000000);
+	nv_wr32(priv, 0x41880c, 0x00000000);
+	nv_wr32(priv, 0x418810, 0x00000000);
+	nv_wr32(priv, 0x418828, 0x00008442);
+	if (chipset == 0xc1 || chipset == 0xd9)
+		nv_wr32(priv, 0x418830, 0x10000001);
+	else
+		nv_wr32(priv, 0x418830, 0x00000001);
+	nv_wr32(priv, 0x4188d8, 0x00000008);
+	nv_wr32(priv, 0x4188e0, 0x01000000);
+	nv_wr32(priv, 0x4188e8, 0x00000000);
+	nv_wr32(priv, 0x4188ec, 0x00000000);
+	nv_wr32(priv, 0x4188f0, 0x00000000);
+	nv_wr32(priv, 0x4188f4, 0x00000000);
+	nv_wr32(priv, 0x4188f8, 0x00000000);
+	if (chipset == 0xd9)
+		nv_wr32(priv, 0x4188fc, 0x20100008);
+	else if (chipset == 0xc1)
+		nv_wr32(priv, 0x4188fc, 0x00100018);
+	else
+		nv_wr32(priv, 0x4188fc, 0x00100000);
+	nv_wr32(priv, 0x41891c, 0x00ff00ff);
+	nv_wr32(priv, 0x418924, 0x00000000);
+	nv_wr32(priv, 0x418928, 0x00ffff00);
+	nv_wr32(priv, 0x41892c, 0x0000ff00);
+	for (i = 0; i < 8; i++) {
+		nv_wr32(priv, 0x418a00 + (i * 0x20), 0x00000000);
+		nv_wr32(priv, 0x418a04 + (i * 0x20), 0x00000000);
+		nv_wr32(priv, 0x418a08 + (i * 0x20), 0x00000000);
+		nv_wr32(priv, 0x418a0c + (i * 0x20), 0x00010000);
+		nv_wr32(priv, 0x418a10 + (i * 0x20), 0x00000000);
+		nv_wr32(priv, 0x418a14 + (i * 0x20), 0x00000000);
+		nv_wr32(priv, 0x418a18 + (i * 0x20), 0x00000000);
+	}
+	nv_wr32(priv, 0x418b00, chipset != 0xd9 ? 0x00000000 : 0x00000006);
+	nv_wr32(priv, 0x418b08, 0x0a418820);
+	nv_wr32(priv, 0x418b0c, 0x062080e6);
+	nv_wr32(priv, 0x418b10, 0x020398a4);
+	nv_wr32(priv, 0x418b14, 0x0e629062);
+	nv_wr32(priv, 0x418b18, 0x0a418820);
+	nv_wr32(priv, 0x418b1c, 0x000000e6);
+	nv_wr32(priv, 0x418bb8, 0x00000103);
+	nv_wr32(priv, 0x418c08, 0x00000001);
+	nv_wr32(priv, 0x418c10, 0x00000000);
+	nv_wr32(priv, 0x418c14, 0x00000000);
+	nv_wr32(priv, 0x418c18, 0x00000000);
+	nv_wr32(priv, 0x418c1c, 0x00000000);
+	nv_wr32(priv, 0x418c20, 0x00000000);
+	nv_wr32(priv, 0x418c24, 0x00000000);
+	nv_wr32(priv, 0x418c28, 0x00000000);
+	nv_wr32(priv, 0x418c2c, 0x00000000);
+	if (chipset == 0xc1 || chipset == 0xd9)
+		nv_wr32(priv, 0x418c6c, 0x00000001);
+	nv_wr32(priv, 0x418c80, 0x20200004);
+	nv_wr32(priv, 0x418c8c, 0x00000001);
+	nv_wr32(priv, 0x419000, 0x00000780);
+	nv_wr32(priv, 0x419004, 0x00000000);
+	nv_wr32(priv, 0x419008, 0x00000000);
+	nv_wr32(priv, 0x419014, 0x00000004);
+}
+
+static void
+nvc0_grctx_generate_tp(struct nvc0_graph_priv *priv)
+{
+	int chipset = nv_device(priv)->chipset;
+
+	/* GPC_BROADCAST.TP_BROADCAST */
+	nv_wr32(priv, 0x419818, 0x00000000);
+	nv_wr32(priv, 0x41983c, 0x00038bc7);
+	nv_wr32(priv, 0x419848, 0x00000000);
+	if (chipset == 0xc1 || chipset == 0xd9)
+		nv_wr32(priv, 0x419864, 0x00000129);
+	else
+		nv_wr32(priv, 0x419864, 0x0000012a);
+	nv_wr32(priv, 0x419888, 0x00000000);
+	nv_wr32(priv, 0x419a00, 0x000001f0);
+	nv_wr32(priv, 0x419a04, 0x00000001);
+	nv_wr32(priv, 0x419a08, 0x00000023);
+	nv_wr32(priv, 0x419a0c, 0x00020000);
+	nv_wr32(priv, 0x419a10, 0x00000000);
+	nv_wr32(priv, 0x419a14, 0x00000200);
+	nv_wr32(priv, 0x419a1c, 0x00000000);
+	nv_wr32(priv, 0x419a20, 0x00000800);
+	if (chipset == 0xd9)
+		nv_wr32(priv, 0x00419ac4, 0x0017f440);
+	else if (chipset != 0xc0 && chipset != 0xc8)
+		nv_wr32(priv, 0x00419ac4, 0x0007f440);
+	nv_wr32(priv, 0x419b00, 0x0a418820);
+	nv_wr32(priv, 0x419b04, 0x062080e6);
+	nv_wr32(priv, 0x419b08, 0x020398a4);
+	nv_wr32(priv, 0x419b0c, 0x0e629062);
+	nv_wr32(priv, 0x419b10, 0x0a418820);
+	nv_wr32(priv, 0x419b14, 0x000000e6);
+	nv_wr32(priv, 0x419bd0, 0x00900103);
+	if (chipset == 0xc1 || chipset == 0xd9)
+		nv_wr32(priv, 0x419be0, 0x00400001);
+	else
+		nv_wr32(priv, 0x419be0, 0x00000001);
+	nv_wr32(priv, 0x419be4, 0x00000000);
+	nv_wr32(priv, 0x419c00, chipset != 0xd9 ? 0x00000002 : 0x0000000a);
+	nv_wr32(priv, 0x419c04, 0x00000006);
+	nv_wr32(priv, 0x419c08, 0x00000002);
+	nv_wr32(priv, 0x419c20, 0x00000000);
+	if (nv_device(priv)->chipset == 0xd9) {
+		nv_wr32(priv, 0x419c24, 0x00084210);
+		nv_wr32(priv, 0x419c28, 0x3cf3cf3c);
+		nv_wr32(priv, 0x419cb0, 0x00020048);
+	} else
+	if (chipset == 0xce || chipset == 0xcf) {
+		nv_wr32(priv, 0x419cb0, 0x00020048);
+	} else {
+		nv_wr32(priv, 0x419cb0, 0x00060048);
+	}
+	nv_wr32(priv, 0x419ce8, 0x00000000);
+	nv_wr32(priv, 0x419cf4, 0x00000183);
+	if (chipset == 0xc1 || chipset == 0xd9)
+		nv_wr32(priv, 0x419d20, 0x12180000);
+	else
+		nv_wr32(priv, 0x419d20, 0x02180000);
+	nv_wr32(priv, 0x419d24, 0x00001fff);
+	if (chipset == 0xc1 || chipset == 0xd9)
+		nv_wr32(priv, 0x419d44, 0x02180218);
+	nv_wr32(priv, 0x419e04, 0x00000000);
+	nv_wr32(priv, 0x419e08, 0x00000000);
+	nv_wr32(priv, 0x419e0c, 0x00000000);
+	nv_wr32(priv, 0x419e10, 0x00000002);
+	nv_wr32(priv, 0x419e44, 0x001beff2);
+	nv_wr32(priv, 0x419e48, 0x00000000);
+	nv_wr32(priv, 0x419e4c, 0x0000000f);
+	nv_wr32(priv, 0x419e50, 0x00000000);
+	nv_wr32(priv, 0x419e54, 0x00000000);
+	nv_wr32(priv, 0x419e58, 0x00000000);
+	nv_wr32(priv, 0x419e5c, 0x00000000);
+	nv_wr32(priv, 0x419e60, 0x00000000);
+	nv_wr32(priv, 0x419e64, 0x00000000);
+	nv_wr32(priv, 0x419e68, 0x00000000);
+	nv_wr32(priv, 0x419e6c, 0x00000000);
+	nv_wr32(priv, 0x419e70, 0x00000000);
+	nv_wr32(priv, 0x419e74, 0x00000000);
+	nv_wr32(priv, 0x419e78, 0x00000000);
+	nv_wr32(priv, 0x419e7c, 0x00000000);
+	nv_wr32(priv, 0x419e80, 0x00000000);
+	nv_wr32(priv, 0x419e84, 0x00000000);
+	nv_wr32(priv, 0x419e88, 0x00000000);
+	nv_wr32(priv, 0x419e8c, 0x00000000);
+	nv_wr32(priv, 0x419e90, 0x00000000);
+	nv_wr32(priv, 0x419e98, 0x00000000);
+	if (chipset != 0xc0 && chipset != 0xc8)
+		nv_wr32(priv, 0x419ee0, 0x00011110);
+	nv_wr32(priv, 0x419f50, 0x00000000);
+	nv_wr32(priv, 0x419f54, 0x00000000);
+	if (chipset != 0xc0 && chipset != 0xc8)
+		nv_wr32(priv, 0x419f58, 0x00000000);
+}
+
+int
+nvc0_grctx_generate(struct nvc0_graph_priv *priv)
+{
+	struct nvc0_grctx info;
+	int ret, i, gpc, tpc, id;
+	u32 fermi = nvc0_graph_class(priv);
+	u32 r000260, tmp;
+
+	ret = nvc0_grctx_init(priv, &info);
+	if (ret)
+		return ret;
+
+	r000260 = nv_rd32(priv, 0x000260);
+	nv_wr32(priv, 0x000260, r000260 & ~1);
+	nv_wr32(priv, 0x400208, 0x00000000);
+
+	nvc0_grctx_generate_dispatch(priv);
+	nvc0_grctx_generate_macro(priv);
+	nvc0_grctx_generate_m2mf(priv);
+	nvc0_grctx_generate_unk47xx(priv);
+	nvc0_grctx_generate_shaders(priv);
+	nvc0_grctx_generate_unk60xx(priv);
+	nvc0_grctx_generate_unk64xx(priv);
+	nvc0_grctx_generate_tpbus(priv);
+	nvc0_grctx_generate_ccache(priv);
+	nvc0_grctx_generate_rop(priv);
+	nvc0_grctx_generate_gpc(priv);
+	nvc0_grctx_generate_tp(priv);
+
+	nv_wr32(priv, 0x404154, 0x00000000);
+
+	/* generate per-context mmio list data */
+	mmio_data(0x002000, 0x0100, NV_MEM_ACCESS_RW | NV_MEM_ACCESS_SYS);
+	mmio_data(0x008000, 0x0100, NV_MEM_ACCESS_RW | NV_MEM_ACCESS_SYS);
+	mmio_data(0x060000, 0x1000, NV_MEM_ACCESS_RW);
+	mmio_list(0x408004, 0x00000000,  8, 0);
+	mmio_list(0x408008, 0x80000018,  0, 0);
+	mmio_list(0x40800c, 0x00000000,  8, 1);
+	mmio_list(0x408010, 0x80000000,  0, 0);
+	mmio_list(0x418810, 0x80000000, 12, 2);
+	mmio_list(0x419848, 0x10000000, 12, 2);
+	mmio_list(0x419004, 0x00000000,  8, 1);
+	mmio_list(0x419008, 0x00000000,  0, 0);
+	mmio_list(0x418808, 0x00000000,  8, 0);
+	mmio_list(0x41880c, 0x80000018,  0, 0);
+	if (nv_device(priv)->chipset != 0xc1) {
+		tmp = 0x02180000;
+		mmio_list(0x405830, tmp, 0, 0);
+		for (gpc = 0; gpc < priv->gpc_nr; gpc++) {
+			for (tpc = 0; tpc < priv->tpc_nr[gpc]; tpc++) {
+				u32 reg = TPC_UNIT(gpc, tpc, 0x0520);
+				mmio_list(reg, tmp, 0, 0);
+				tmp += 0x0324;
+			}
+		}
+	} else {
+		tmp = 0x02180000;
+		mmio_list(0x405830, 0x00000218 | tmp, 0, 0);
+		mmio_list(0x4064c4, 0x0086ffff, 0, 0);
+		for (gpc = 0; gpc < priv->gpc_nr; gpc++) {
+			for (tpc = 0; tpc < priv->tpc_nr[gpc]; tpc++) {
+				u32 reg = TPC_UNIT(gpc, tpc, 0x0520);
+				mmio_list(reg, 0x10000000 | tmp, 0, 0);
+				tmp += 0x0324;
+			}
+			for (tpc = 0; tpc < priv->tpc_nr[gpc]; tpc++) {
+				u32 reg = TPC_UNIT(gpc, tpc, 0x0544);
+				mmio_list(reg, tmp, 0, 0);
+				tmp += 0x0324;
+			}
+		}
+	}
+
+	for (tpc = 0, id = 0; tpc < 4; tpc++) {
+		for (gpc = 0; gpc < priv->gpc_nr; gpc++) {
+			if (tpc < priv->tpc_nr[gpc]) {
+				nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x698), id);
+				nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x4e8), id);
+				nv_wr32(priv, GPC_UNIT(gpc, 0x0c10 + tpc * 4), id);
+				nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x088), id);
+				id++;
+			}
+
+			nv_wr32(priv, GPC_UNIT(gpc, 0x0c08), priv->tpc_nr[gpc]);
+			nv_wr32(priv, GPC_UNIT(gpc, 0x0c8c), priv->tpc_nr[gpc]);
+		}
+	}
+
+	tmp = 0;
+	for (i = 0; i < priv->gpc_nr; i++)
+		tmp |= priv->tpc_nr[i] << (i * 4);
+	nv_wr32(priv, 0x406028, tmp);
+	nv_wr32(priv, 0x405870, tmp);
+
+	nv_wr32(priv, 0x40602c, 0x00000000);
+	nv_wr32(priv, 0x405874, 0x00000000);
+	nv_wr32(priv, 0x406030, 0x00000000);
+	nv_wr32(priv, 0x405878, 0x00000000);
+	nv_wr32(priv, 0x406034, 0x00000000);
+	nv_wr32(priv, 0x40587c, 0x00000000);
+
+	if (1) {
+		u8 tpcnr[GPC_MAX], data[TPC_MAX];
+
+		memcpy(tpcnr, priv->tpc_nr, sizeof(priv->tpc_nr));
+		memset(data, 0x1f, sizeof(data));
+
+		gpc = -1;
+		for (tpc = 0; tpc < priv->tpc_total; tpc++) {
+			do {
+				gpc = (gpc + 1) % priv->gpc_nr;
+			} while (!tpcnr[gpc]);
+			tpcnr[gpc]--;
+			data[tpc] = gpc;
+		}
+
+		for (i = 0; i < 4; i++)
+			nv_wr32(priv, 0x4060a8 + (i * 4), ((u32 *)data)[i]);
+	}
+
+	if (1) {
+		u32 data[6] = {}, data2[2] = {};
+		u8 tpcnr[GPC_MAX];
+		u8 shift, ntpcv;
+
+		/* calculate first set of magics */
+		memcpy(tpcnr, priv->tpc_nr, sizeof(priv->tpc_nr));
+
+		gpc = -1;
+		for (tpc = 0; tpc < priv->tpc_total; tpc++) {
+			do {
+				gpc = (gpc + 1) % priv->gpc_nr;
+			} while (!tpcnr[gpc]);
+			tpcnr[gpc]--;
+
+			data[tpc / 6] |= gpc << ((tpc % 6) * 5);
+		}
+
+		for (; tpc < 32; tpc++)
+			data[tpc / 6] |= 7 << ((tpc % 6) * 5);
+
+		/* and the second... */
+		shift = 0;
+		ntpcv = priv->tpc_total;
+		while (!(ntpcv & (1 << 4))) {
+			ntpcv <<= 1;
+			shift++;
+		}
+
+		data2[0]  = (ntpcv << 16);
+		data2[0] |= (shift << 21);
+		data2[0] |= (((1 << (0 + 5)) % ntpcv) << 24);
+		for (i = 1; i < 7; i++)
+			data2[1] |= ((1 << (i + 5)) % ntpcv) << ((i - 1) * 5);
+
+		/* GPC_BROADCAST */
+		nv_wr32(priv, 0x418bb8, (priv->tpc_total << 8) |
+					priv->magic_not_rop_nr);
+		for (i = 0; i < 6; i++)
+			nv_wr32(priv, 0x418b08 + (i * 4), data[i]);
+
+		/* GPC_BROADCAST.TP_BROADCAST */
+		nv_wr32(priv, 0x419bd0, (priv->tpc_total << 8) |
+				       priv->magic_not_rop_nr |
+				       data2[0]);
+		nv_wr32(priv, 0x419be4, data2[1]);
+		for (i = 0; i < 6; i++)
+			nv_wr32(priv, 0x419b00 + (i * 4), data[i]);
+
+		/* UNK78xx */
+		nv_wr32(priv, 0x4078bc, (priv->tpc_total << 8) |
+					priv->magic_not_rop_nr);
+		for (i = 0; i < 6; i++)
+			nv_wr32(priv, 0x40780c + (i * 4), data[i]);
+	}
+
+	if (1) {
+		u32 tpc_mask = 0, tpc_set = 0;
+		u8  tpcnr[GPC_MAX], a, b;
+
+		memcpy(tpcnr, priv->tpc_nr, sizeof(priv->tpc_nr));
+		for (gpc = 0; gpc < priv->gpc_nr; gpc++)
+			tpc_mask |= ((1 << priv->tpc_nr[gpc]) - 1) << (gpc * 8);
+
+		for (i = 0, gpc = -1, b = -1; i < 32; i++) {
+			a = (i * (priv->tpc_total - 1)) / 32;
+			if (a != b) {
+				b = a;
+				do {
+					gpc = (gpc + 1) % priv->gpc_nr;
+				} while (!tpcnr[gpc]);
+				tpc = priv->tpc_nr[gpc] - tpcnr[gpc]--;
+
+				tpc_set |= 1 << ((gpc * 8) + tpc);
+			}
+
+			nv_wr32(priv, 0x406800 + (i * 0x20), tpc_set);
+			nv_wr32(priv, 0x406c00 + (i * 0x20), tpc_set ^ tpc_mask);
+		}
+	}
+
+	nv_wr32(priv, 0x400208, 0x80000000);
+
+	nv_icmd(priv, 0x00001000, 0x00000004);
+	nv_icmd(priv, 0x000000a9, 0x0000ffff);
+	nv_icmd(priv, 0x00000038, 0x0fac6881);
+	nv_icmd(priv, 0x0000003d, 0x00000001);
+	nv_icmd(priv, 0x000000e8, 0x00000400);
+	nv_icmd(priv, 0x000000e9, 0x00000400);
+	nv_icmd(priv, 0x000000ea, 0x00000400);
+	nv_icmd(priv, 0x000000eb, 0x00000400);
+	nv_icmd(priv, 0x000000ec, 0x00000400);
+	nv_icmd(priv, 0x000000ed, 0x00000400);
+	nv_icmd(priv, 0x000000ee, 0x00000400);
+	nv_icmd(priv, 0x000000ef, 0x00000400);
+	nv_icmd(priv, 0x00000078, 0x00000300);
+	nv_icmd(priv, 0x00000079, 0x00000300);
+	nv_icmd(priv, 0x0000007a, 0x00000300);
+	nv_icmd(priv, 0x0000007b, 0x00000300);
+	nv_icmd(priv, 0x0000007c, 0x00000300);
+	nv_icmd(priv, 0x0000007d, 0x00000300);
+	nv_icmd(priv, 0x0000007e, 0x00000300);
+	nv_icmd(priv, 0x0000007f, 0x00000300);
+	nv_icmd(priv, 0x00000050, 0x00000011);
+	nv_icmd(priv, 0x00000058, 0x00000008);
+	nv_icmd(priv, 0x00000059, 0x00000008);
+	nv_icmd(priv, 0x0000005a, 0x00000008);
+	nv_icmd(priv, 0x0000005b, 0x00000008);
+	nv_icmd(priv, 0x0000005c, 0x00000008);
+	nv_icmd(priv, 0x0000005d, 0x00000008);
+	nv_icmd(priv, 0x0000005e, 0x00000008);
+	nv_icmd(priv, 0x0000005f, 0x00000008);
+	nv_icmd(priv, 0x00000208, 0x00000001);
+	nv_icmd(priv, 0x00000209, 0x00000001);
+	nv_icmd(priv, 0x0000020a, 0x00000001);
+	nv_icmd(priv, 0x0000020b, 0x00000001);
+	nv_icmd(priv, 0x0000020c, 0x00000001);
+	nv_icmd(priv, 0x0000020d, 0x00000001);
+	nv_icmd(priv, 0x0000020e, 0x00000001);
+	nv_icmd(priv, 0x0000020f, 0x00000001);
+	nv_icmd(priv, 0x00000081, 0x00000001);
+	nv_icmd(priv, 0x00000085, 0x00000004);
+	nv_icmd(priv, 0x00000088, 0x00000400);
+	nv_icmd(priv, 0x00000090, 0x00000300);
+	nv_icmd(priv, 0x00000098, 0x00001001);
+	nv_icmd(priv, 0x000000e3, 0x00000001);
+	nv_icmd(priv, 0x000000da, 0x00000001);
+	nv_icmd(priv, 0x000000f8, 0x00000003);
+	nv_icmd(priv, 0x000000fa, 0x00000001);
+	nv_icmd(priv, 0x0000009f, 0x0000ffff);
+	nv_icmd(priv, 0x000000a0, 0x0000ffff);
+	nv_icmd(priv, 0x000000a1, 0x0000ffff);
+	nv_icmd(priv, 0x000000a2, 0x0000ffff);
+	nv_icmd(priv, 0x000000b1, 0x00000001);
+	nv_icmd(priv, 0x000000b2, 0x00000000);
+	nv_icmd(priv, 0x000000b3, 0x00000000);
+	nv_icmd(priv, 0x000000b4, 0x00000000);
+	nv_icmd(priv, 0x000000b5, 0x00000000);
+	nv_icmd(priv, 0x000000b6, 0x00000000);
+	nv_icmd(priv, 0x000000b7, 0x00000000);
+	nv_icmd(priv, 0x000000b8, 0x00000000);
+	nv_icmd(priv, 0x000000b9, 0x00000000);
+	nv_icmd(priv, 0x000000ba, 0x00000000);
+	nv_icmd(priv, 0x000000bb, 0x00000000);
+	nv_icmd(priv, 0x000000bc, 0x00000000);
+	nv_icmd(priv, 0x000000bd, 0x00000000);
+	nv_icmd(priv, 0x000000be, 0x00000000);
+	nv_icmd(priv, 0x000000bf, 0x00000000);
+	nv_icmd(priv, 0x000000c0, 0x00000000);
+	nv_icmd(priv, 0x000000c1, 0x00000000);
+	nv_icmd(priv, 0x000000c2, 0x00000000);
+	nv_icmd(priv, 0x000000c3, 0x00000000);
+	nv_icmd(priv, 0x000000c4, 0x00000000);
+	nv_icmd(priv, 0x000000c5, 0x00000000);
+	nv_icmd(priv, 0x000000c6, 0x00000000);
+	nv_icmd(priv, 0x000000c7, 0x00000000);
+	nv_icmd(priv, 0x000000c8, 0x00000000);
+	nv_icmd(priv, 0x000000c9, 0x00000000);
+	nv_icmd(priv, 0x000000ca, 0x00000000);
+	nv_icmd(priv, 0x000000cb, 0x00000000);
+	nv_icmd(priv, 0x000000cc, 0x00000000);
+	nv_icmd(priv, 0x000000cd, 0x00000000);
+	nv_icmd(priv, 0x000000ce, 0x00000000);
+	nv_icmd(priv, 0x000000cf, 0x00000000);
+	nv_icmd(priv, 0x000000d0, 0x00000000);
+	nv_icmd(priv, 0x000000d1, 0x00000000);
+	nv_icmd(priv, 0x000000d2, 0x00000000);
+	nv_icmd(priv, 0x000000d3, 0x00000000);
+	nv_icmd(priv, 0x000000d4, 0x00000000);
+	nv_icmd(priv, 0x000000d5, 0x00000000);
+	nv_icmd(priv, 0x000000d6, 0x00000000);
+	nv_icmd(priv, 0x000000d7, 0x00000000);
+	nv_icmd(priv, 0x000000d8, 0x00000000);
+	nv_icmd(priv, 0x000000d9, 0x00000000);
+	nv_icmd(priv, 0x00000210, 0x00000040);
+	nv_icmd(priv, 0x00000211, 0x00000040);
+	nv_icmd(priv, 0x00000212, 0x00000040);
+	nv_icmd(priv, 0x00000213, 0x00000040);
+	nv_icmd(priv, 0x00000214, 0x00000040);
+	nv_icmd(priv, 0x00000215, 0x00000040);
+	nv_icmd(priv, 0x00000216, 0x00000040);
+	nv_icmd(priv, 0x00000217, 0x00000040);
+	if (nv_device(priv)->chipset == 0xd9) {
+		for (i = 0x0400; i <= 0x0417; i++)
+			nv_icmd(priv, i, 0x00000040);
+	}
+	nv_icmd(priv, 0x00000218, 0x0000c080);
+	nv_icmd(priv, 0x00000219, 0x0000c080);
+	nv_icmd(priv, 0x0000021a, 0x0000c080);
+	nv_icmd(priv, 0x0000021b, 0x0000c080);
+	nv_icmd(priv, 0x0000021c, 0x0000c080);
+	nv_icmd(priv, 0x0000021d, 0x0000c080);
+	nv_icmd(priv, 0x0000021e, 0x0000c080);
+	nv_icmd(priv, 0x0000021f, 0x0000c080);
+	if (nv_device(priv)->chipset == 0xd9) {
+		for (i = 0x0440; i <= 0x0457; i++)
+			nv_icmd(priv, i, 0x0000c080);
+	}
+	nv_icmd(priv, 0x000000ad, 0x0000013e);
+	nv_icmd(priv, 0x000000e1, 0x00000010);
+	nv_icmd(priv, 0x00000290, 0x00000000);
+	nv_icmd(priv, 0x00000291, 0x00000000);
+	nv_icmd(priv, 0x00000292, 0x00000000);
+	nv_icmd(priv, 0x00000293, 0x00000000);
+	nv_icmd(priv, 0x00000294, 0x00000000);
+	nv_icmd(priv, 0x00000295, 0x00000000);
+	nv_icmd(priv, 0x00000296, 0x00000000);
+	nv_icmd(priv, 0x00000297, 0x00000000);
+	nv_icmd(priv, 0x00000298, 0x00000000);
+	nv_icmd(priv, 0x00000299, 0x00000000);
+	nv_icmd(priv, 0x0000029a, 0x00000000);
+	nv_icmd(priv, 0x0000029b, 0x00000000);
+	nv_icmd(priv, 0x0000029c, 0x00000000);
+	nv_icmd(priv, 0x0000029d, 0x00000000);
+	nv_icmd(priv, 0x0000029e, 0x00000000);
+	nv_icmd(priv, 0x0000029f, 0x00000000);
+	nv_icmd(priv, 0x000003b0, 0x00000000);
+	nv_icmd(priv, 0x000003b1, 0x00000000);
+	nv_icmd(priv, 0x000003b2, 0x00000000);
+	nv_icmd(priv, 0x000003b3, 0x00000000);
+	nv_icmd(priv, 0x000003b4, 0x00000000);
+	nv_icmd(priv, 0x000003b5, 0x00000000);
+	nv_icmd(priv, 0x000003b6, 0x00000000);
+	nv_icmd(priv, 0x000003b7, 0x00000000);
+	nv_icmd(priv, 0x000003b8, 0x00000000);
+	nv_icmd(priv, 0x000003b9, 0x00000000);
+	nv_icmd(priv, 0x000003ba, 0x00000000);
+	nv_icmd(priv, 0x000003bb, 0x00000000);
+	nv_icmd(priv, 0x000003bc, 0x00000000);
+	nv_icmd(priv, 0x000003bd, 0x00000000);
+	nv_icmd(priv, 0x000003be, 0x00000000);
+	nv_icmd(priv, 0x000003bf, 0x00000000);
+	nv_icmd(priv, 0x000002a0, 0x00000000);
+	nv_icmd(priv, 0x000002a1, 0x00000000);
+	nv_icmd(priv, 0x000002a2, 0x00000000);
+	nv_icmd(priv, 0x000002a3, 0x00000000);
+	nv_icmd(priv, 0x000002a4, 0x00000000);
+	nv_icmd(priv, 0x000002a5, 0x00000000);
+	nv_icmd(priv, 0x000002a6, 0x00000000);
+	nv_icmd(priv, 0x000002a7, 0x00000000);
+	nv_icmd(priv, 0x000002a8, 0x00000000);
+	nv_icmd(priv, 0x000002a9, 0x00000000);
+	nv_icmd(priv, 0x000002aa, 0x00000000);
+	nv_icmd(priv, 0x000002ab, 0x00000000);
+	nv_icmd(priv, 0x000002ac, 0x00000000);
+	nv_icmd(priv, 0x000002ad, 0x00000000);
+	nv_icmd(priv, 0x000002ae, 0x00000000);
+	nv_icmd(priv, 0x000002af, 0x00000000);
+	nv_icmd(priv, 0x00000420, 0x00000000);
+	nv_icmd(priv, 0x00000421, 0x00000000);
+	nv_icmd(priv, 0x00000422, 0x00000000);
+	nv_icmd(priv, 0x00000423, 0x00000000);
+	nv_icmd(priv, 0x00000424, 0x00000000);
+	nv_icmd(priv, 0x00000425, 0x00000000);
+	nv_icmd(priv, 0x00000426, 0x00000000);
+	nv_icmd(priv, 0x00000427, 0x00000000);
+	nv_icmd(priv, 0x00000428, 0x00000000);
+	nv_icmd(priv, 0x00000429, 0x00000000);
+	nv_icmd(priv, 0x0000042a, 0x00000000);
+	nv_icmd(priv, 0x0000042b, 0x00000000);
+	nv_icmd(priv, 0x0000042c, 0x00000000);
+	nv_icmd(priv, 0x0000042d, 0x00000000);
+	nv_icmd(priv, 0x0000042e, 0x00000000);
+	nv_icmd(priv, 0x0000042f, 0x00000000);
+	nv_icmd(priv, 0x000002b0, 0x00000000);
+	nv_icmd(priv, 0x000002b1, 0x00000000);
+	nv_icmd(priv, 0x000002b2, 0x00000000);
+	nv_icmd(priv, 0x000002b3, 0x00000000);
+	nv_icmd(priv, 0x000002b4, 0x00000000);
+	nv_icmd(priv, 0x000002b5, 0x00000000);
+	nv_icmd(priv, 0x000002b6, 0x00000000);
+	nv_icmd(priv, 0x000002b7, 0x00000000);
+	nv_icmd(priv, 0x000002b8, 0x00000000);
+	nv_icmd(priv, 0x000002b9, 0x00000000);
+	nv_icmd(priv, 0x000002ba, 0x00000000);
+	nv_icmd(priv, 0x000002bb, 0x00000000);
+	nv_icmd(priv, 0x000002bc, 0x00000000);
+	nv_icmd(priv, 0x000002bd, 0x00000000);
+	nv_icmd(priv, 0x000002be, 0x00000000);
+	nv_icmd(priv, 0x000002bf, 0x00000000);
+	nv_icmd(priv, 0x00000430, 0x00000000);
+	nv_icmd(priv, 0x00000431, 0x00000000);
+	nv_icmd(priv, 0x00000432, 0x00000000);
+	nv_icmd(priv, 0x00000433, 0x00000000);
+	nv_icmd(priv, 0x00000434, 0x00000000);
+	nv_icmd(priv, 0x00000435, 0x00000000);
+	nv_icmd(priv, 0x00000436, 0x00000000);
+	nv_icmd(priv, 0x00000437, 0x00000000);
+	nv_icmd(priv, 0x00000438, 0x00000000);
+	nv_icmd(priv, 0x00000439, 0x00000000);
+	nv_icmd(priv, 0x0000043a, 0x00000000);
+	nv_icmd(priv, 0x0000043b, 0x00000000);
+	nv_icmd(priv, 0x0000043c, 0x00000000);
+	nv_icmd(priv, 0x0000043d, 0x00000000);
+	nv_icmd(priv, 0x0000043e, 0x00000000);
+	nv_icmd(priv, 0x0000043f, 0x00000000);
+	nv_icmd(priv, 0x000002c0, 0x00000000);
+	nv_icmd(priv, 0x000002c1, 0x00000000);
+	nv_icmd(priv, 0x000002c2, 0x00000000);
+	nv_icmd(priv, 0x000002c3, 0x00000000);
+	nv_icmd(priv, 0x000002c4, 0x00000000);
+	nv_icmd(priv, 0x000002c5, 0x00000000);
+	nv_icmd(priv, 0x000002c6, 0x00000000);
+	nv_icmd(priv, 0x000002c7, 0x00000000);
+	nv_icmd(priv, 0x000002c8, 0x00000000);
+	nv_icmd(priv, 0x000002c9, 0x00000000);
+	nv_icmd(priv, 0x000002ca, 0x00000000);
+	nv_icmd(priv, 0x000002cb, 0x00000000);
+	nv_icmd(priv, 0x000002cc, 0x00000000);
+	nv_icmd(priv, 0x000002cd, 0x00000000);
+	nv_icmd(priv, 0x000002ce, 0x00000000);
+	nv_icmd(priv, 0x000002cf, 0x00000000);
+	nv_icmd(priv, 0x000004d0, 0x00000000);
+	nv_icmd(priv, 0x000004d1, 0x00000000);
+	nv_icmd(priv, 0x000004d2, 0x00000000);
+	nv_icmd(priv, 0x000004d3, 0x00000000);
+	nv_icmd(priv, 0x000004d4, 0x00000000);
+	nv_icmd(priv, 0x000004d5, 0x00000000);
+	nv_icmd(priv, 0x000004d6, 0x00000000);
+	nv_icmd(priv, 0x000004d7, 0x00000000);
+	nv_icmd(priv, 0x000004d8, 0x00000000);
+	nv_icmd(priv, 0x000004d9, 0x00000000);
+	nv_icmd(priv, 0x000004da, 0x00000000);
+	nv_icmd(priv, 0x000004db, 0x00000000);
+	nv_icmd(priv, 0x000004dc, 0x00000000);
+	nv_icmd(priv, 0x000004dd, 0x00000000);
+	nv_icmd(priv, 0x000004de, 0x00000000);
+	nv_icmd(priv, 0x000004df, 0x00000000);
+	nv_icmd(priv, 0x00000720, 0x00000000);
+	nv_icmd(priv, 0x00000721, 0x00000000);
+	nv_icmd(priv, 0x00000722, 0x00000000);
+	nv_icmd(priv, 0x00000723, 0x00000000);
+	nv_icmd(priv, 0x00000724, 0x00000000);
+	nv_icmd(priv, 0x00000725, 0x00000000);
+	nv_icmd(priv, 0x00000726, 0x00000000);
+	nv_icmd(priv, 0x00000727, 0x00000000);
+	nv_icmd(priv, 0x00000728, 0x00000000);
+	nv_icmd(priv, 0x00000729, 0x00000000);
+	nv_icmd(priv, 0x0000072a, 0x00000000);
+	nv_icmd(priv, 0x0000072b, 0x00000000);
+	nv_icmd(priv, 0x0000072c, 0x00000000);
+	nv_icmd(priv, 0x0000072d, 0x00000000);
+	nv_icmd(priv, 0x0000072e, 0x00000000);
+	nv_icmd(priv, 0x0000072f, 0x00000000);
+	nv_icmd(priv, 0x000008c0, 0x00000000);
+	nv_icmd(priv, 0x000008c1, 0x00000000);
+	nv_icmd(priv, 0x000008c2, 0x00000000);
+	nv_icmd(priv, 0x000008c3, 0x00000000);
+	nv_icmd(priv, 0x000008c4, 0x00000000);
+	nv_icmd(priv, 0x000008c5, 0x00000000);
+	nv_icmd(priv, 0x000008c6, 0x00000000);
+	nv_icmd(priv, 0x000008c7, 0x00000000);
+	nv_icmd(priv, 0x000008c8, 0x00000000);
+	nv_icmd(priv, 0x000008c9, 0x00000000);
+	nv_icmd(priv, 0x000008ca, 0x00000000);
+	nv_icmd(priv, 0x000008cb, 0x00000000);
+	nv_icmd(priv, 0x000008cc, 0x00000000);
+	nv_icmd(priv, 0x000008cd, 0x00000000);
+	nv_icmd(priv, 0x000008ce, 0x00000000);
+	nv_icmd(priv, 0x000008cf, 0x00000000);
+	nv_icmd(priv, 0x00000890, 0x00000000);
+	nv_icmd(priv, 0x00000891, 0x00000000);
+	nv_icmd(priv, 0x00000892, 0x00000000);
+	nv_icmd(priv, 0x00000893, 0x00000000);
+	nv_icmd(priv, 0x00000894, 0x00000000);
+	nv_icmd(priv, 0x00000895, 0x00000000);
+	nv_icmd(priv, 0x00000896, 0x00000000);
+	nv_icmd(priv, 0x00000897, 0x00000000);
+	nv_icmd(priv, 0x00000898, 0x00000000);
+	nv_icmd(priv, 0x00000899, 0x00000000);
+	nv_icmd(priv, 0x0000089a, 0x00000000);
+	nv_icmd(priv, 0x0000089b, 0x00000000);
+	nv_icmd(priv, 0x0000089c, 0x00000000);
+	nv_icmd(priv, 0x0000089d, 0x00000000);
+	nv_icmd(priv, 0x0000089e, 0x00000000);
+	nv_icmd(priv, 0x0000089f, 0x00000000);
+	nv_icmd(priv, 0x000008e0, 0x00000000);
+	nv_icmd(priv, 0x000008e1, 0x00000000);
+	nv_icmd(priv, 0x000008e2, 0x00000000);
+	nv_icmd(priv, 0x000008e3, 0x00000000);
+	nv_icmd(priv, 0x000008e4, 0x00000000);
+	nv_icmd(priv, 0x000008e5, 0x00000000);
+	nv_icmd(priv, 0x000008e6, 0x00000000);
+	nv_icmd(priv, 0x000008e7, 0x00000000);
+	nv_icmd(priv, 0x000008e8, 0x00000000);
+	nv_icmd(priv, 0x000008e9, 0x00000000);
+	nv_icmd(priv, 0x000008ea, 0x00000000);
+	nv_icmd(priv, 0x000008eb, 0x00000000);
+	nv_icmd(priv, 0x000008ec, 0x00000000);
+	nv_icmd(priv, 0x000008ed, 0x00000000);
+	nv_icmd(priv, 0x000008ee, 0x00000000);
+	nv_icmd(priv, 0x000008ef, 0x00000000);
+	nv_icmd(priv, 0x000008a0, 0x00000000);
+	nv_icmd(priv, 0x000008a1, 0x00000000);
+	nv_icmd(priv, 0x000008a2, 0x00000000);
+	nv_icmd(priv, 0x000008a3, 0x00000000);
+	nv_icmd(priv, 0x000008a4, 0x00000000);
+	nv_icmd(priv, 0x000008a5, 0x00000000);
+	nv_icmd(priv, 0x000008a6, 0x00000000);
+	nv_icmd(priv, 0x000008a7, 0x00000000);
+	nv_icmd(priv, 0x000008a8, 0x00000000);
+	nv_icmd(priv, 0x000008a9, 0x00000000);
+	nv_icmd(priv, 0x000008aa, 0x00000000);
+	nv_icmd(priv, 0x000008ab, 0x00000000);
+	nv_icmd(priv, 0x000008ac, 0x00000000);
+	nv_icmd(priv, 0x000008ad, 0x00000000);
+	nv_icmd(priv, 0x000008ae, 0x00000000);
+	nv_icmd(priv, 0x000008af, 0x00000000);
+	nv_icmd(priv, 0x000008f0, 0x00000000);
+	nv_icmd(priv, 0x000008f1, 0x00000000);
+	nv_icmd(priv, 0x000008f2, 0x00000000);
+	nv_icmd(priv, 0x000008f3, 0x00000000);
+	nv_icmd(priv, 0x000008f4, 0x00000000);
+	nv_icmd(priv, 0x000008f5, 0x00000000);
+	nv_icmd(priv, 0x000008f6, 0x00000000);
+	nv_icmd(priv, 0x000008f7, 0x00000000);
+	nv_icmd(priv, 0x000008f8, 0x00000000);
+	nv_icmd(priv, 0x000008f9, 0x00000000);
+	nv_icmd(priv, 0x000008fa, 0x00000000);
+	nv_icmd(priv, 0x000008fb, 0x00000000);
+	nv_icmd(priv, 0x000008fc, 0x00000000);
+	nv_icmd(priv, 0x000008fd, 0x00000000);
+	nv_icmd(priv, 0x000008fe, 0x00000000);
+	nv_icmd(priv, 0x000008ff, 0x00000000);
+	nv_icmd(priv, 0x0000094c, 0x000000ff);
+	nv_icmd(priv, 0x0000094d, 0xffffffff);
+	nv_icmd(priv, 0x0000094e, 0x00000002);
+	nv_icmd(priv, 0x000002ec, 0x00000001);
+	nv_icmd(priv, 0x00000303, 0x00000001);
+	nv_icmd(priv, 0x000002e6, 0x00000001);
+	nv_icmd(priv, 0x00000466, 0x00000052);
+	nv_icmd(priv, 0x00000301, 0x3f800000);
+	nv_icmd(priv, 0x00000304, 0x30201000);
+	nv_icmd(priv, 0x00000305, 0x70605040);
+	nv_icmd(priv, 0x00000306, 0xb8a89888);
+	nv_icmd(priv, 0x00000307, 0xf8e8d8c8);
+	nv_icmd(priv, 0x0000030a, 0x00ffff00);
+	nv_icmd(priv, 0x0000030b, 0x0000001a);
+	nv_icmd(priv, 0x0000030c, 0x00000001);
+	nv_icmd(priv, 0x00000318, 0x00000001);
+	nv_icmd(priv, 0x00000340, 0x00000000);
+	nv_icmd(priv, 0x00000375, 0x00000001);
+	nv_icmd(priv, 0x00000351, 0x00000100);
+	nv_icmd(priv, 0x0000037d, 0x00000006);
+	nv_icmd(priv, 0x000003a0, 0x00000002);
+	nv_icmd(priv, 0x000003aa, 0x00000001);
+	nv_icmd(priv, 0x000003a9, 0x00000001);
+	nv_icmd(priv, 0x00000380, 0x00000001);
+	nv_icmd(priv, 0x00000360, 0x00000040);
+	nv_icmd(priv, 0x00000366, 0x00000000);
+	nv_icmd(priv, 0x00000367, 0x00000000);
+	nv_icmd(priv, 0x00000368, 0x00001fff);
+	nv_icmd(priv, 0x00000370, 0x00000000);
+	nv_icmd(priv, 0x00000371, 0x00000000);
+	nv_icmd(priv, 0x00000372, 0x003fffff);
+	nv_icmd(priv, 0x0000037a, 0x00000012);
+	nv_icmd(priv, 0x000005e0, 0x00000022);
+	nv_icmd(priv, 0x000005e1, 0x00000022);
+	nv_icmd(priv, 0x000005e2, 0x00000022);
+	nv_icmd(priv, 0x000005e3, 0x00000022);
+	nv_icmd(priv, 0x000005e4, 0x00000022);
+	nv_icmd(priv, 0x00000619, 0x00000003);
+	nv_icmd(priv, 0x00000811, 0x00000003);
+	nv_icmd(priv, 0x00000812, 0x00000004);
+	nv_icmd(priv, 0x00000813, 0x00000006);
+	nv_icmd(priv, 0x00000814, 0x00000008);
+	nv_icmd(priv, 0x00000815, 0x0000000b);
+	nv_icmd(priv, 0x00000800, 0x00000001);
+	nv_icmd(priv, 0x00000801, 0x00000001);
+	nv_icmd(priv, 0x00000802, 0x00000001);
+	nv_icmd(priv, 0x00000803, 0x00000001);
+	nv_icmd(priv, 0x00000804, 0x00000001);
+	nv_icmd(priv, 0x00000805, 0x00000001);
+	nv_icmd(priv, 0x00000632, 0x00000001);
+	nv_icmd(priv, 0x00000633, 0x00000002);
+	nv_icmd(priv, 0x00000634, 0x00000003);
+	nv_icmd(priv, 0x00000635, 0x00000004);
+	nv_icmd(priv, 0x00000654, 0x3f800000);
+	nv_icmd(priv, 0x00000657, 0x3f800000);
+	nv_icmd(priv, 0x00000655, 0x3f800000);
+	nv_icmd(priv, 0x00000656, 0x3f800000);
+	nv_icmd(priv, 0x000006cd, 0x3f800000);
+	nv_icmd(priv, 0x000007f5, 0x3f800000);
+	nv_icmd(priv, 0x000007dc, 0x39291909);
+	nv_icmd(priv, 0x000007dd, 0x79695949);
+	nv_icmd(priv, 0x000007de, 0xb9a99989);
+	nv_icmd(priv, 0x000007df, 0xf9e9d9c9);
+	nv_icmd(priv, 0x000007e8, 0x00003210);
+	nv_icmd(priv, 0x000007e9, 0x00007654);
+	nv_icmd(priv, 0x000007ea, 0x00000098);
+	nv_icmd(priv, 0x000007ec, 0x39291909);
+	nv_icmd(priv, 0x000007ed, 0x79695949);
+	nv_icmd(priv, 0x000007ee, 0xb9a99989);
+	nv_icmd(priv, 0x000007ef, 0xf9e9d9c9);
+	nv_icmd(priv, 0x000007f0, 0x00003210);
+	nv_icmd(priv, 0x000007f1, 0x00007654);
+	nv_icmd(priv, 0x000007f2, 0x00000098);
+	nv_icmd(priv, 0x000005a5, 0x00000001);
+	nv_icmd(priv, 0x00000980, 0x00000000);
+	nv_icmd(priv, 0x00000981, 0x00000000);
+	nv_icmd(priv, 0x00000982, 0x00000000);
+	nv_icmd(priv, 0x00000983, 0x00000000);
+	nv_icmd(priv, 0x00000984, 0x00000000);
+	nv_icmd(priv, 0x00000985, 0x00000000);
+	nv_icmd(priv, 0x00000986, 0x00000000);
+	nv_icmd(priv, 0x00000987, 0x00000000);
+	nv_icmd(priv, 0x00000988, 0x00000000);
+	nv_icmd(priv, 0x00000989, 0x00000000);
+	nv_icmd(priv, 0x0000098a, 0x00000000);
+	nv_icmd(priv, 0x0000098b, 0x00000000);
+	nv_icmd(priv, 0x0000098c, 0x00000000);
+	nv_icmd(priv, 0x0000098d, 0x00000000);
+	nv_icmd(priv, 0x0000098e, 0x00000000);
+	nv_icmd(priv, 0x0000098f, 0x00000000);
+	nv_icmd(priv, 0x00000990, 0x00000000);
+	nv_icmd(priv, 0x00000991, 0x00000000);
+	nv_icmd(priv, 0x00000992, 0x00000000);
+	nv_icmd(priv, 0x00000993, 0x00000000);
+	nv_icmd(priv, 0x00000994, 0x00000000);
+	nv_icmd(priv, 0x00000995, 0x00000000);
+	nv_icmd(priv, 0x00000996, 0x00000000);
+	nv_icmd(priv, 0x00000997, 0x00000000);
+	nv_icmd(priv, 0x00000998, 0x00000000);
+	nv_icmd(priv, 0x00000999, 0x00000000);
+	nv_icmd(priv, 0x0000099a, 0x00000000);
+	nv_icmd(priv, 0x0000099b, 0x00000000);
+	nv_icmd(priv, 0x0000099c, 0x00000000);
+	nv_icmd(priv, 0x0000099d, 0x00000000);
+	nv_icmd(priv, 0x0000099e, 0x00000000);
+	nv_icmd(priv, 0x0000099f, 0x00000000);
+	nv_icmd(priv, 0x000009a0, 0x00000000);
+	nv_icmd(priv, 0x000009a1, 0x00000000);
+	nv_icmd(priv, 0x000009a2, 0x00000000);
+	nv_icmd(priv, 0x000009a3, 0x00000000);
+	nv_icmd(priv, 0x000009a4, 0x00000000);
+	nv_icmd(priv, 0x000009a5, 0x00000000);
+	nv_icmd(priv, 0x000009a6, 0x00000000);
+	nv_icmd(priv, 0x000009a7, 0x00000000);
+	nv_icmd(priv, 0x000009a8, 0x00000000);
+	nv_icmd(priv, 0x000009a9, 0x00000000);
+	nv_icmd(priv, 0x000009aa, 0x00000000);
+	nv_icmd(priv, 0x000009ab, 0x00000000);
+	nv_icmd(priv, 0x000009ac, 0x00000000);
+	nv_icmd(priv, 0x000009ad, 0x00000000);
+	nv_icmd(priv, 0x000009ae, 0x00000000);
+	nv_icmd(priv, 0x000009af, 0x00000000);
+	nv_icmd(priv, 0x000009b0, 0x00000000);
+	nv_icmd(priv, 0x000009b1, 0x00000000);
+	nv_icmd(priv, 0x000009b2, 0x00000000);
+	nv_icmd(priv, 0x000009b3, 0x00000000);
+	nv_icmd(priv, 0x000009b4, 0x00000000);
+	nv_icmd(priv, 0x000009b5, 0x00000000);
+	nv_icmd(priv, 0x000009b6, 0x00000000);
+	nv_icmd(priv, 0x000009b7, 0x00000000);
+	nv_icmd(priv, 0x000009b8, 0x00000000);
+	nv_icmd(priv, 0x000009b9, 0x00000000);
+	nv_icmd(priv, 0x000009ba, 0x00000000);
+	nv_icmd(priv, 0x000009bb, 0x00000000);
+	nv_icmd(priv, 0x000009bc, 0x00000000);
+	nv_icmd(priv, 0x000009bd, 0x00000000);
+	nv_icmd(priv, 0x000009be, 0x00000000);
+	nv_icmd(priv, 0x000009bf, 0x00000000);
+	nv_icmd(priv, 0x000009c0, 0x00000000);
+	nv_icmd(priv, 0x000009c1, 0x00000000);
+	nv_icmd(priv, 0x000009c2, 0x00000000);
+	nv_icmd(priv, 0x000009c3, 0x00000000);
+	nv_icmd(priv, 0x000009c4, 0x00000000);
+	nv_icmd(priv, 0x000009c5, 0x00000000);
+	nv_icmd(priv, 0x000009c6, 0x00000000);
+	nv_icmd(priv, 0x000009c7, 0x00000000);
+	nv_icmd(priv, 0x000009c8, 0x00000000);
+	nv_icmd(priv, 0x000009c9, 0x00000000);
+	nv_icmd(priv, 0x000009ca, 0x00000000);
+	nv_icmd(priv, 0x000009cb, 0x00000000);
+	nv_icmd(priv, 0x000009cc, 0x00000000);
+	nv_icmd(priv, 0x000009cd, 0x00000000);
+	nv_icmd(priv, 0x000009ce, 0x00000000);
+	nv_icmd(priv, 0x000009cf, 0x00000000);
+	nv_icmd(priv, 0x000009d0, 0x00000000);
+	nv_icmd(priv, 0x000009d1, 0x00000000);
+	nv_icmd(priv, 0x000009d2, 0x00000000);
+	nv_icmd(priv, 0x000009d3, 0x00000000);
+	nv_icmd(priv, 0x000009d4, 0x00000000);
+	nv_icmd(priv, 0x000009d5, 0x00000000);
+	nv_icmd(priv, 0x000009d6, 0x00000000);
+	nv_icmd(priv, 0x000009d7, 0x00000000);
+	nv_icmd(priv, 0x000009d8, 0x00000000);
+	nv_icmd(priv, 0x000009d9, 0x00000000);
+	nv_icmd(priv, 0x000009da, 0x00000000);
+	nv_icmd(priv, 0x000009db, 0x00000000);
+	nv_icmd(priv, 0x000009dc, 0x00000000);
+	nv_icmd(priv, 0x000009dd, 0x00000000);
+	nv_icmd(priv, 0x000009de, 0x00000000);
+	nv_icmd(priv, 0x000009df, 0x00000000);
+	nv_icmd(priv, 0x000009e0, 0x00000000);
+	nv_icmd(priv, 0x000009e1, 0x00000000);
+	nv_icmd(priv, 0x000009e2, 0x00000000);
+	nv_icmd(priv, 0x000009e3, 0x00000000);
+	nv_icmd(priv, 0x000009e4, 0x00000000);
+	nv_icmd(priv, 0x000009e5, 0x00000000);
+	nv_icmd(priv, 0x000009e6, 0x00000000);
+	nv_icmd(priv, 0x000009e7, 0x00000000);
+	nv_icmd(priv, 0x000009e8, 0x00000000);
+	nv_icmd(priv, 0x000009e9, 0x00000000);
+	nv_icmd(priv, 0x000009ea, 0x00000000);
+	nv_icmd(priv, 0x000009eb, 0x00000000);
+	nv_icmd(priv, 0x000009ec, 0x00000000);
+	nv_icmd(priv, 0x000009ed, 0x00000000);
+	nv_icmd(priv, 0x000009ee, 0x00000000);
+	nv_icmd(priv, 0x000009ef, 0x00000000);
+	nv_icmd(priv, 0x000009f0, 0x00000000);
+	nv_icmd(priv, 0x000009f1, 0x00000000);
+	nv_icmd(priv, 0x000009f2, 0x00000000);
+	nv_icmd(priv, 0x000009f3, 0x00000000);
+	nv_icmd(priv, 0x000009f4, 0x00000000);
+	nv_icmd(priv, 0x000009f5, 0x00000000);
+	nv_icmd(priv, 0x000009f6, 0x00000000);
+	nv_icmd(priv, 0x000009f7, 0x00000000);
+	nv_icmd(priv, 0x000009f8, 0x00000000);
+	nv_icmd(priv, 0x000009f9, 0x00000000);
+	nv_icmd(priv, 0x000009fa, 0x00000000);
+	nv_icmd(priv, 0x000009fb, 0x00000000);
+	nv_icmd(priv, 0x000009fc, 0x00000000);
+	nv_icmd(priv, 0x000009fd, 0x00000000);
+	nv_icmd(priv, 0x000009fe, 0x00000000);
+	nv_icmd(priv, 0x000009ff, 0x00000000);
+	nv_icmd(priv, 0x00000468, 0x00000004);
+	nv_icmd(priv, 0x0000046c, 0x00000001);
+	nv_icmd(priv, 0x00000470, 0x00000000);
+	nv_icmd(priv, 0x00000471, 0x00000000);
+	nv_icmd(priv, 0x00000472, 0x00000000);
+	nv_icmd(priv, 0x00000473, 0x00000000);
+	nv_icmd(priv, 0x00000474, 0x00000000);
+	nv_icmd(priv, 0x00000475, 0x00000000);
+	nv_icmd(priv, 0x00000476, 0x00000000);
+	nv_icmd(priv, 0x00000477, 0x00000000);
+	nv_icmd(priv, 0x00000478, 0x00000000);
+	nv_icmd(priv, 0x00000479, 0x00000000);
+	nv_icmd(priv, 0x0000047a, 0x00000000);
+	nv_icmd(priv, 0x0000047b, 0x00000000);
+	nv_icmd(priv, 0x0000047c, 0x00000000);
+	nv_icmd(priv, 0x0000047d, 0x00000000);
+	nv_icmd(priv, 0x0000047e, 0x00000000);
+	nv_icmd(priv, 0x0000047f, 0x00000000);
+	nv_icmd(priv, 0x00000480, 0x00000000);
+	nv_icmd(priv, 0x00000481, 0x00000000);
+	nv_icmd(priv, 0x00000482, 0x00000000);
+	nv_icmd(priv, 0x00000483, 0x00000000);
+	nv_icmd(priv, 0x00000484, 0x00000000);
+	nv_icmd(priv, 0x00000485, 0x00000000);
+	nv_icmd(priv, 0x00000486, 0x00000000);
+	nv_icmd(priv, 0x00000487, 0x00000000);
+	nv_icmd(priv, 0x00000488, 0x00000000);
+	nv_icmd(priv, 0x00000489, 0x00000000);
+	nv_icmd(priv, 0x0000048a, 0x00000000);
+	nv_icmd(priv, 0x0000048b, 0x00000000);
+	nv_icmd(priv, 0x0000048c, 0x00000000);
+	nv_icmd(priv, 0x0000048d, 0x00000000);
+	nv_icmd(priv, 0x0000048e, 0x00000000);
+	nv_icmd(priv, 0x0000048f, 0x00000000);
+	nv_icmd(priv, 0x00000490, 0x00000000);
+	nv_icmd(priv, 0x00000491, 0x00000000);
+	nv_icmd(priv, 0x00000492, 0x00000000);
+	nv_icmd(priv, 0x00000493, 0x00000000);
+	nv_icmd(priv, 0x00000494, 0x00000000);
+	nv_icmd(priv, 0x00000495, 0x00000000);
+	nv_icmd(priv, 0x00000496, 0x00000000);
+	nv_icmd(priv, 0x00000497, 0x00000000);
+	nv_icmd(priv, 0x00000498, 0x00000000);
+	nv_icmd(priv, 0x00000499, 0x00000000);
+	nv_icmd(priv, 0x0000049a, 0x00000000);
+	nv_icmd(priv, 0x0000049b, 0x00000000);
+	nv_icmd(priv, 0x0000049c, 0x00000000);
+	nv_icmd(priv, 0x0000049d, 0x00000000);
+	nv_icmd(priv, 0x0000049e, 0x00000000);
+	nv_icmd(priv, 0x0000049f, 0x00000000);
+	nv_icmd(priv, 0x000004a0, 0x00000000);
+	nv_icmd(priv, 0x000004a1, 0x00000000);
+	nv_icmd(priv, 0x000004a2, 0x00000000);
+	nv_icmd(priv, 0x000004a3, 0x00000000);
+	nv_icmd(priv, 0x000004a4, 0x00000000);
+	nv_icmd(priv, 0x000004a5, 0x00000000);
+	nv_icmd(priv, 0x000004a6, 0x00000000);
+	nv_icmd(priv, 0x000004a7, 0x00000000);
+	nv_icmd(priv, 0x000004a8, 0x00000000);
+	nv_icmd(priv, 0x000004a9, 0x00000000);
+	nv_icmd(priv, 0x000004aa, 0x00000000);
+	nv_icmd(priv, 0x000004ab, 0x00000000);
+	nv_icmd(priv, 0x000004ac, 0x00000000);
+	nv_icmd(priv, 0x000004ad, 0x00000000);
+	nv_icmd(priv, 0x000004ae, 0x00000000);
+	nv_icmd(priv, 0x000004af, 0x00000000);
+	nv_icmd(priv, 0x000004b0, 0x00000000);
+	nv_icmd(priv, 0x000004b1, 0x00000000);
+	nv_icmd(priv, 0x000004b2, 0x00000000);
+	nv_icmd(priv, 0x000004b3, 0x00000000);
+	nv_icmd(priv, 0x000004b4, 0x00000000);
+	nv_icmd(priv, 0x000004b5, 0x00000000);
+	nv_icmd(priv, 0x000004b6, 0x00000000);
+	nv_icmd(priv, 0x000004b7, 0x00000000);
+	nv_icmd(priv, 0x000004b8, 0x00000000);
+	nv_icmd(priv, 0x000004b9, 0x00000000);
+	nv_icmd(priv, 0x000004ba, 0x00000000);
+	nv_icmd(priv, 0x000004bb, 0x00000000);
+	nv_icmd(priv, 0x000004bc, 0x00000000);
+	nv_icmd(priv, 0x000004bd, 0x00000000);
+	nv_icmd(priv, 0x000004be, 0x00000000);
+	nv_icmd(priv, 0x000004bf, 0x00000000);
+	nv_icmd(priv, 0x000004c0, 0x00000000);
+	nv_icmd(priv, 0x000004c1, 0x00000000);
+	nv_icmd(priv, 0x000004c2, 0x00000000);
+	nv_icmd(priv, 0x000004c3, 0x00000000);
+	nv_icmd(priv, 0x000004c4, 0x00000000);
+	nv_icmd(priv, 0x000004c5, 0x00000000);
+	nv_icmd(priv, 0x000004c6, 0x00000000);
+	nv_icmd(priv, 0x000004c7, 0x00000000);
+	nv_icmd(priv, 0x000004c8, 0x00000000);
+	nv_icmd(priv, 0x000004c9, 0x00000000);
+	nv_icmd(priv, 0x000004ca, 0x00000000);
+	nv_icmd(priv, 0x000004cb, 0x00000000);
+	nv_icmd(priv, 0x000004cc, 0x00000000);
+	nv_icmd(priv, 0x000004cd, 0x00000000);
+	nv_icmd(priv, 0x000004ce, 0x00000000);
+	nv_icmd(priv, 0x000004cf, 0x00000000);
+	nv_icmd(priv, 0x00000510, 0x3f800000);
+	nv_icmd(priv, 0x00000511, 0x3f800000);
+	nv_icmd(priv, 0x00000512, 0x3f800000);
+	nv_icmd(priv, 0x00000513, 0x3f800000);
+	nv_icmd(priv, 0x00000514, 0x3f800000);
+	nv_icmd(priv, 0x00000515, 0x3f800000);
+	nv_icmd(priv, 0x00000516, 0x3f800000);
+	nv_icmd(priv, 0x00000517, 0x3f800000);
+	nv_icmd(priv, 0x00000518, 0x3f800000);
+	nv_icmd(priv, 0x00000519, 0x3f800000);
+	nv_icmd(priv, 0x0000051a, 0x3f800000);
+	nv_icmd(priv, 0x0000051b, 0x3f800000);
+	nv_icmd(priv, 0x0000051c, 0x3f800000);
+	nv_icmd(priv, 0x0000051d, 0x3f800000);
+	nv_icmd(priv, 0x0000051e, 0x3f800000);
+	nv_icmd(priv, 0x0000051f, 0x3f800000);
+	nv_icmd(priv, 0x00000520, 0x000002b6);
+	nv_icmd(priv, 0x00000529, 0x00000001);
+	nv_icmd(priv, 0x00000530, 0xffff0000);
+	nv_icmd(priv, 0x00000531, 0xffff0000);
+	nv_icmd(priv, 0x00000532, 0xffff0000);
+	nv_icmd(priv, 0x00000533, 0xffff0000);
+	nv_icmd(priv, 0x00000534, 0xffff0000);
+	nv_icmd(priv, 0x00000535, 0xffff0000);
+	nv_icmd(priv, 0x00000536, 0xffff0000);
+	nv_icmd(priv, 0x00000537, 0xffff0000);
+	nv_icmd(priv, 0x00000538, 0xffff0000);
+	nv_icmd(priv, 0x00000539, 0xffff0000);
+	nv_icmd(priv, 0x0000053a, 0xffff0000);
+	nv_icmd(priv, 0x0000053b, 0xffff0000);
+	nv_icmd(priv, 0x0000053c, 0xffff0000);
+	nv_icmd(priv, 0x0000053d, 0xffff0000);
+	nv_icmd(priv, 0x0000053e, 0xffff0000);
+	nv_icmd(priv, 0x0000053f, 0xffff0000);
+	nv_icmd(priv, 0x00000585, 0x0000003f);
+	nv_icmd(priv, 0x00000576, 0x00000003);
+	if (nv_device(priv)->chipset == 0xc1 ||
+	    nv_device(priv)->chipset == 0xd9)
+		nv_icmd(priv, 0x0000057b, 0x00000059);
+	nv_icmd(priv, 0x00000586, 0x00000040);
+	nv_icmd(priv, 0x00000582, 0x00000080);
+	nv_icmd(priv, 0x00000583, 0x00000080);
+	nv_icmd(priv, 0x000005c2, 0x00000001);
+	nv_icmd(priv, 0x00000638, 0x00000001);
+	nv_icmd(priv, 0x00000639, 0x00000001);
+	nv_icmd(priv, 0x0000063a, 0x00000002);
+	nv_icmd(priv, 0x0000063b, 0x00000001);
+	nv_icmd(priv, 0x0000063c, 0x00000001);
+	nv_icmd(priv, 0x0000063d, 0x00000002);
+	nv_icmd(priv, 0x0000063e, 0x00000001);
+	nv_icmd(priv, 0x000008b8, 0x00000001);
+	nv_icmd(priv, 0x000008b9, 0x00000001);
+	nv_icmd(priv, 0x000008ba, 0x00000001);
+	nv_icmd(priv, 0x000008bb, 0x00000001);
+	nv_icmd(priv, 0x000008bc, 0x00000001);
+	nv_icmd(priv, 0x000008bd, 0x00000001);
+	nv_icmd(priv, 0x000008be, 0x00000001);
+	nv_icmd(priv, 0x000008bf, 0x00000001);
+	nv_icmd(priv, 0x00000900, 0x00000001);
+	nv_icmd(priv, 0x00000901, 0x00000001);
+	nv_icmd(priv, 0x00000902, 0x00000001);
+	nv_icmd(priv, 0x00000903, 0x00000001);
+	nv_icmd(priv, 0x00000904, 0x00000001);
+	nv_icmd(priv, 0x00000905, 0x00000001);
+	nv_icmd(priv, 0x00000906, 0x00000001);
+	nv_icmd(priv, 0x00000907, 0x00000001);
+	nv_icmd(priv, 0x00000908, 0x00000002);
+	nv_icmd(priv, 0x00000909, 0x00000002);
+	nv_icmd(priv, 0x0000090a, 0x00000002);
+	nv_icmd(priv, 0x0000090b, 0x00000002);
+	nv_icmd(priv, 0x0000090c, 0x00000002);
+	nv_icmd(priv, 0x0000090d, 0x00000002);
+	nv_icmd(priv, 0x0000090e, 0x00000002);
+	nv_icmd(priv, 0x0000090f, 0x00000002);
+	nv_icmd(priv, 0x00000910, 0x00000001);
+	nv_icmd(priv, 0x00000911, 0x00000001);
+	nv_icmd(priv, 0x00000912, 0x00000001);
+	nv_icmd(priv, 0x00000913, 0x00000001);
+	nv_icmd(priv, 0x00000914, 0x00000001);
+	nv_icmd(priv, 0x00000915, 0x00000001);
+	nv_icmd(priv, 0x00000916, 0x00000001);
+	nv_icmd(priv, 0x00000917, 0x00000001);
+	nv_icmd(priv, 0x00000918, 0x00000001);
+	nv_icmd(priv, 0x00000919, 0x00000001);
+	nv_icmd(priv, 0x0000091a, 0x00000001);
+	nv_icmd(priv, 0x0000091b, 0x00000001);
+	nv_icmd(priv, 0x0000091c, 0x00000001);
+	nv_icmd(priv, 0x0000091d, 0x00000001);
+	nv_icmd(priv, 0x0000091e, 0x00000001);
+	nv_icmd(priv, 0x0000091f, 0x00000001);
+	nv_icmd(priv, 0x00000920, 0x00000002);
+	nv_icmd(priv, 0x00000921, 0x00000002);
+	nv_icmd(priv, 0x00000922, 0x00000002);
+	nv_icmd(priv, 0x00000923, 0x00000002);
+	nv_icmd(priv, 0x00000924, 0x00000002);
+	nv_icmd(priv, 0x00000925, 0x00000002);
+	nv_icmd(priv, 0x00000926, 0x00000002);
+	nv_icmd(priv, 0x00000927, 0x00000002);
+	nv_icmd(priv, 0x00000928, 0x00000001);
+	nv_icmd(priv, 0x00000929, 0x00000001);
+	nv_icmd(priv, 0x0000092a, 0x00000001);
+	nv_icmd(priv, 0x0000092b, 0x00000001);
+	nv_icmd(priv, 0x0000092c, 0x00000001);
+	nv_icmd(priv, 0x0000092d, 0x00000001);
+	nv_icmd(priv, 0x0000092e, 0x00000001);
+	nv_icmd(priv, 0x0000092f, 0x00000001);
+	nv_icmd(priv, 0x00000648, 0x00000001);
+	nv_icmd(priv, 0x00000649, 0x00000001);
+	nv_icmd(priv, 0x0000064a, 0x00000001);
+	nv_icmd(priv, 0x0000064b, 0x00000001);
+	nv_icmd(priv, 0x0000064c, 0x00000001);
+	nv_icmd(priv, 0x0000064d, 0x00000001);
+	nv_icmd(priv, 0x0000064e, 0x00000001);
+	nv_icmd(priv, 0x0000064f, 0x00000001);
+	nv_icmd(priv, 0x00000650, 0x00000001);
+	nv_icmd(priv, 0x00000658, 0x0000000f);
+	nv_icmd(priv, 0x000007ff, 0x0000000a);
+	nv_icmd(priv, 0x0000066a, 0x40000000);
+	nv_icmd(priv, 0x0000066b, 0x10000000);
+	nv_icmd(priv, 0x0000066c, 0xffff0000);
+	nv_icmd(priv, 0x0000066d, 0xffff0000);
+	nv_icmd(priv, 0x000007af, 0x00000008);
+	nv_icmd(priv, 0x000007b0, 0x00000008);
+	nv_icmd(priv, 0x000007f6, 0x00000001);
+	nv_icmd(priv, 0x000006b2, 0x00000055);
+	nv_icmd(priv, 0x000007ad, 0x00000003);
+	nv_icmd(priv, 0x00000937, 0x00000001);
+	nv_icmd(priv, 0x00000971, 0x00000008);
+	nv_icmd(priv, 0x00000972, 0x00000040);
+	nv_icmd(priv, 0x00000973, 0x0000012c);
+	nv_icmd(priv, 0x0000097c, 0x00000040);
+	nv_icmd(priv, 0x00000979, 0x00000003);
+	nv_icmd(priv, 0x00000975, 0x00000020);
+	nv_icmd(priv, 0x00000976, 0x00000001);
+	nv_icmd(priv, 0x00000977, 0x00000020);
+	nv_icmd(priv, 0x00000978, 0x00000001);
+	nv_icmd(priv, 0x00000957, 0x00000003);
+	nv_icmd(priv, 0x0000095e, 0x20164010);
+	nv_icmd(priv, 0x0000095f, 0x00000020);
+	if (nv_device(priv)->chipset == 0xd9)
+		nv_icmd(priv, 0x0000097d, 0x00000020);
+	nv_icmd(priv, 0x00000683, 0x00000006);
+	nv_icmd(priv, 0x00000685, 0x003fffff);
+	nv_icmd(priv, 0x00000687, 0x00000c48);
+	nv_icmd(priv, 0x000006a0, 0x00000005);
+	nv_icmd(priv, 0x00000840, 0x00300008);
+	nv_icmd(priv, 0x00000841, 0x04000080);
+	nv_icmd(priv, 0x00000842, 0x00300008);
+	nv_icmd(priv, 0x00000843, 0x04000080);
+	nv_icmd(priv, 0x00000818, 0x00000000);
+	nv_icmd(priv, 0x00000819, 0x00000000);
+	nv_icmd(priv, 0x0000081a, 0x00000000);
+	nv_icmd(priv, 0x0000081b, 0x00000000);
+	nv_icmd(priv, 0x0000081c, 0x00000000);
+	nv_icmd(priv, 0x0000081d, 0x00000000);
+	nv_icmd(priv, 0x0000081e, 0x00000000);
+	nv_icmd(priv, 0x0000081f, 0x00000000);
+	nv_icmd(priv, 0x00000848, 0x00000000);
+	nv_icmd(priv, 0x00000849, 0x00000000);
+	nv_icmd(priv, 0x0000084a, 0x00000000);
+	nv_icmd(priv, 0x0000084b, 0x00000000);
+	nv_icmd(priv, 0x0000084c, 0x00000000);
+	nv_icmd(priv, 0x0000084d, 0x00000000);
+	nv_icmd(priv, 0x0000084e, 0x00000000);
+	nv_icmd(priv, 0x0000084f, 0x00000000);
+	nv_icmd(priv, 0x00000850, 0x00000000);
+	nv_icmd(priv, 0x00000851, 0x00000000);
+	nv_icmd(priv, 0x00000852, 0x00000000);
+	nv_icmd(priv, 0x00000853, 0x00000000);
+	nv_icmd(priv, 0x00000854, 0x00000000);
+	nv_icmd(priv, 0x00000855, 0x00000000);
+	nv_icmd(priv, 0x00000856, 0x00000000);
+	nv_icmd(priv, 0x00000857, 0x00000000);
+	nv_icmd(priv, 0x00000738, 0x00000000);
+	nv_icmd(priv, 0x000006aa, 0x00000001);
+	nv_icmd(priv, 0x000006ab, 0x00000002);
+	nv_icmd(priv, 0x000006ac, 0x00000080);
+	nv_icmd(priv, 0x000006ad, 0x00000100);
+	nv_icmd(priv, 0x000006ae, 0x00000100);
+	nv_icmd(priv, 0x000006b1, 0x00000011);
+	nv_icmd(priv, 0x000006bb, 0x000000cf);
+	nv_icmd(priv, 0x000006ce, 0x2a712488);
+	nv_icmd(priv, 0x00000739, 0x4085c000);
+	nv_icmd(priv, 0x0000073a, 0x00000080);
+	nv_icmd(priv, 0x00000786, 0x80000100);
+	nv_icmd(priv, 0x0000073c, 0x00010100);
+	nv_icmd(priv, 0x0000073d, 0x02800000);
+	nv_icmd(priv, 0x00000787, 0x000000cf);
+	nv_icmd(priv, 0x0000078c, 0x00000008);
+	nv_icmd(priv, 0x00000792, 0x00000001);
+	nv_icmd(priv, 0x00000794, 0x00000001);
+	nv_icmd(priv, 0x00000795, 0x00000001);
+	nv_icmd(priv, 0x00000796, 0x00000001);
+	nv_icmd(priv, 0x00000797, 0x000000cf);
+	nv_icmd(priv, 0x00000836, 0x00000001);
+	nv_icmd(priv, 0x0000079a, 0x00000002);
+	nv_icmd(priv, 0x00000833, 0x04444480);
+	nv_icmd(priv, 0x000007a1, 0x00000001);
+	nv_icmd(priv, 0x000007a3, 0x00000001);
+	nv_icmd(priv, 0x000007a4, 0x00000001);
+	nv_icmd(priv, 0x000007a5, 0x00000001);
+	nv_icmd(priv, 0x00000831, 0x00000004);
+	nv_icmd(priv, 0x0000080c, 0x00000002);
+	nv_icmd(priv, 0x0000080d, 0x00000100);
+	nv_icmd(priv, 0x0000080e, 0x00000100);
+	nv_icmd(priv, 0x0000080f, 0x00000001);
+	nv_icmd(priv, 0x00000823, 0x00000002);
+	nv_icmd(priv, 0x00000824, 0x00000100);
+	nv_icmd(priv, 0x00000825, 0x00000100);
+	nv_icmd(priv, 0x00000826, 0x00000001);
+	nv_icmd(priv, 0x0000095d, 0x00000001);
+	nv_icmd(priv, 0x0000082b, 0x00000004);
+	nv_icmd(priv, 0x00000942, 0x00010001);
+	nv_icmd(priv, 0x00000943, 0x00000001);
+	nv_icmd(priv, 0x00000944, 0x00000022);
+	nv_icmd(priv, 0x000007c5, 0x00010001);
+	nv_icmd(priv, 0x00000834, 0x00000001);
+	nv_icmd(priv, 0x000007c7, 0x00000001);
+	nv_icmd(priv, 0x0000c1b0, 0x0000000f);
+	nv_icmd(priv, 0x0000c1b1, 0x0000000f);
+	nv_icmd(priv, 0x0000c1b2, 0x0000000f);
+	nv_icmd(priv, 0x0000c1b3, 0x0000000f);
+	nv_icmd(priv, 0x0000c1b4, 0x0000000f);
+	nv_icmd(priv, 0x0000c1b5, 0x0000000f);
+	nv_icmd(priv, 0x0000c1b6, 0x0000000f);
+	nv_icmd(priv, 0x0000c1b7, 0x0000000f);
+	nv_icmd(priv, 0x0000c1b8, 0x0fac6881);
+	nv_icmd(priv, 0x0000c1b9, 0x00fac688);
+	nv_icmd(priv, 0x0001e100, 0x00000001);
+	nv_icmd(priv, 0x00001000, 0x00000002);
+	nv_icmd(priv, 0x000006aa, 0x00000001);
+	nv_icmd(priv, 0x000006ad, 0x00000100);
+	nv_icmd(priv, 0x000006ae, 0x00000100);
+	nv_icmd(priv, 0x000006b1, 0x00000011);
+	nv_icmd(priv, 0x0000078c, 0x00000008);
+	nv_icmd(priv, 0x00000792, 0x00000001);
+	nv_icmd(priv, 0x00000794, 0x00000001);
+	nv_icmd(priv, 0x00000795, 0x00000001);
+	nv_icmd(priv, 0x00000796, 0x00000001);
+	nv_icmd(priv, 0x00000797, 0x000000cf);
+	nv_icmd(priv, 0x0000079a, 0x00000002);
+	nv_icmd(priv, 0x00000833, 0x04444480);
+	nv_icmd(priv, 0x000007a1, 0x00000001);
+	nv_icmd(priv, 0x000007a3, 0x00000001);
+	nv_icmd(priv, 0x000007a4, 0x00000001);
+	nv_icmd(priv, 0x000007a5, 0x00000001);
+	nv_icmd(priv, 0x00000831, 0x00000004);
+	nv_icmd(priv, 0x0001e100, 0x00000001);
+	nv_icmd(priv, 0x00001000, 0x00000014);
+	nv_icmd(priv, 0x00000351, 0x00000100);
+	nv_icmd(priv, 0x00000957, 0x00000003);
+	nv_icmd(priv, 0x0000095d, 0x00000001);
+	nv_icmd(priv, 0x0000082b, 0x00000004);
+	nv_icmd(priv, 0x00000942, 0x00010001);
+	nv_icmd(priv, 0x00000943, 0x00000001);
+	nv_icmd(priv, 0x000007c5, 0x00010001);
+	nv_icmd(priv, 0x00000834, 0x00000001);
+	nv_icmd(priv, 0x000007c7, 0x00000001);
+	nv_icmd(priv, 0x0001e100, 0x00000001);
+	nv_icmd(priv, 0x00001000, 0x00000001);
+	nv_icmd(priv, 0x0000080c, 0x00000002);
+	nv_icmd(priv, 0x0000080d, 0x00000100);
+	nv_icmd(priv, 0x0000080e, 0x00000100);
+	nv_icmd(priv, 0x0000080f, 0x00000001);
+	nv_icmd(priv, 0x00000823, 0x00000002);
+	nv_icmd(priv, 0x00000824, 0x00000100);
+	nv_icmd(priv, 0x00000825, 0x00000100);
+	nv_icmd(priv, 0x00000826, 0x00000001);
+	nv_icmd(priv, 0x0001e100, 0x00000001);
+	nv_wr32(priv, 0x400208, 0x00000000);
+	nv_wr32(priv, 0x404154, 0x00000400);
+
+	nvc0_grctx_generate_9097(priv);
+	if (fermi >= 0x9197)
+		nvc0_grctx_generate_9197(priv);
+	if (fermi >= 0x9297)
+		nvc0_grctx_generate_9297(priv);
+	nvc0_grctx_generate_902d(priv);
+	nvc0_grctx_generate_9039(priv);
+	nvc0_grctx_generate_90c0(priv);
+
+	nv_wr32(priv, 0x000260, r000260);
+
+	return nvc0_grctx_fini(&info);
+}
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/ctxnve0.c b/drivers/gpu/drm/nouveau/core/engine/graph/ctxnve0.c
new file mode 100644
index 000000000000..6d8c63931ee6
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/ctxnve0.c
@@ -0,0 +1,2788 @@
+/*
+ * Copyright 2010 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include "nvc0.h"
+
+static void
+nve0_grctx_generate_icmd(struct nvc0_graph_priv *priv)
+{
+	nv_wr32(priv, 0x400208, 0x80000000);
+	nv_icmd(priv, 0x001000, 0x00000004);
+	nv_icmd(priv, 0x000039, 0x00000000);
+	nv_icmd(priv, 0x00003a, 0x00000000);
+	nv_icmd(priv, 0x00003b, 0x00000000);
+	nv_icmd(priv, 0x0000a9, 0x0000ffff);
+	nv_icmd(priv, 0x000038, 0x0fac6881);
+	nv_icmd(priv, 0x00003d, 0x00000001);
+	nv_icmd(priv, 0x0000e8, 0x00000400);
+	nv_icmd(priv, 0x0000e9, 0x00000400);
+	nv_icmd(priv, 0x0000ea, 0x00000400);
+	nv_icmd(priv, 0x0000eb, 0x00000400);
+	nv_icmd(priv, 0x0000ec, 0x00000400);
+	nv_icmd(priv, 0x0000ed, 0x00000400);
+	nv_icmd(priv, 0x0000ee, 0x00000400);
+	nv_icmd(priv, 0x0000ef, 0x00000400);
+	nv_icmd(priv, 0x000078, 0x00000300);
+	nv_icmd(priv, 0x000079, 0x00000300);
+	nv_icmd(priv, 0x00007a, 0x00000300);
+	nv_icmd(priv, 0x00007b, 0x00000300);
+	nv_icmd(priv, 0x00007c, 0x00000300);
+	nv_icmd(priv, 0x00007d, 0x00000300);
+	nv_icmd(priv, 0x00007e, 0x00000300);
+	nv_icmd(priv, 0x00007f, 0x00000300);
+	nv_icmd(priv, 0x000050, 0x00000011);
+	nv_icmd(priv, 0x000058, 0x00000008);
+	nv_icmd(priv, 0x000059, 0x00000008);
+	nv_icmd(priv, 0x00005a, 0x00000008);
+	nv_icmd(priv, 0x00005b, 0x00000008);
+	nv_icmd(priv, 0x00005c, 0x00000008);
+	nv_icmd(priv, 0x00005d, 0x00000008);
+	nv_icmd(priv, 0x00005e, 0x00000008);
+	nv_icmd(priv, 0x00005f, 0x00000008);
+	nv_icmd(priv, 0x000208, 0x00000001);
+	nv_icmd(priv, 0x000209, 0x00000001);
+	nv_icmd(priv, 0x00020a, 0x00000001);
+	nv_icmd(priv, 0x00020b, 0x00000001);
+	nv_icmd(priv, 0x00020c, 0x00000001);
+	nv_icmd(priv, 0x00020d, 0x00000001);
+	nv_icmd(priv, 0x00020e, 0x00000001);
+	nv_icmd(priv, 0x00020f, 0x00000001);
+	nv_icmd(priv, 0x000081, 0x00000001);
+	nv_icmd(priv, 0x000085, 0x00000004);
+	nv_icmd(priv, 0x000088, 0x00000400);
+	nv_icmd(priv, 0x000090, 0x00000300);
+	nv_icmd(priv, 0x000098, 0x00001001);
+	nv_icmd(priv, 0x0000e3, 0x00000001);
+	nv_icmd(priv, 0x0000da, 0x00000001);
+	nv_icmd(priv, 0x0000f8, 0x00000003);
+	nv_icmd(priv, 0x0000fa, 0x00000001);
+	nv_icmd(priv, 0x00009f, 0x0000ffff);
+	nv_icmd(priv, 0x0000a0, 0x0000ffff);
+	nv_icmd(priv, 0x0000a1, 0x0000ffff);
+	nv_icmd(priv, 0x0000a2, 0x0000ffff);
+	nv_icmd(priv, 0x0000b1, 0x00000001);
+	nv_icmd(priv, 0x0000ad, 0x0000013e);
+	nv_icmd(priv, 0x0000e1, 0x00000010);
+	nv_icmd(priv, 0x000290, 0x00000000);
+	nv_icmd(priv, 0x000291, 0x00000000);
+	nv_icmd(priv, 0x000292, 0x00000000);
+	nv_icmd(priv, 0x000293, 0x00000000);
+	nv_icmd(priv, 0x000294, 0x00000000);
+	nv_icmd(priv, 0x000295, 0x00000000);
+	nv_icmd(priv, 0x000296, 0x00000000);
+	nv_icmd(priv, 0x000297, 0x00000000);
+	nv_icmd(priv, 0x000298, 0x00000000);
+	nv_icmd(priv, 0x000299, 0x00000000);
+	nv_icmd(priv, 0x00029a, 0x00000000);
+	nv_icmd(priv, 0x00029b, 0x00000000);
+	nv_icmd(priv, 0x00029c, 0x00000000);
+	nv_icmd(priv, 0x00029d, 0x00000000);
+	nv_icmd(priv, 0x00029e, 0x00000000);
+	nv_icmd(priv, 0x00029f, 0x00000000);
+	nv_icmd(priv, 0x0003b0, 0x00000000);
+	nv_icmd(priv, 0x0003b1, 0x00000000);
+	nv_icmd(priv, 0x0003b2, 0x00000000);
+	nv_icmd(priv, 0x0003b3, 0x00000000);
+	nv_icmd(priv, 0x0003b4, 0x00000000);
+	nv_icmd(priv, 0x0003b5, 0x00000000);
+	nv_icmd(priv, 0x0003b6, 0x00000000);
+	nv_icmd(priv, 0x0003b7, 0x00000000);
+	nv_icmd(priv, 0x0003b8, 0x00000000);
+	nv_icmd(priv, 0x0003b9, 0x00000000);
+	nv_icmd(priv, 0x0003ba, 0x00000000);
+	nv_icmd(priv, 0x0003bb, 0x00000000);
+	nv_icmd(priv, 0x0003bc, 0x00000000);
+	nv_icmd(priv, 0x0003bd, 0x00000000);
+	nv_icmd(priv, 0x0003be, 0x00000000);
+	nv_icmd(priv, 0x0003bf, 0x00000000);
+	nv_icmd(priv, 0x0002a0, 0x00000000);
+	nv_icmd(priv, 0x0002a1, 0x00000000);
+	nv_icmd(priv, 0x0002a2, 0x00000000);
+	nv_icmd(priv, 0x0002a3, 0x00000000);
+	nv_icmd(priv, 0x0002a4, 0x00000000);
+	nv_icmd(priv, 0x0002a5, 0x00000000);
+	nv_icmd(priv, 0x0002a6, 0x00000000);
+	nv_icmd(priv, 0x0002a7, 0x00000000);
+	nv_icmd(priv, 0x0002a8, 0x00000000);
+	nv_icmd(priv, 0x0002a9, 0x00000000);
+	nv_icmd(priv, 0x0002aa, 0x00000000);
+	nv_icmd(priv, 0x0002ab, 0x00000000);
+	nv_icmd(priv, 0x0002ac, 0x00000000);
+	nv_icmd(priv, 0x0002ad, 0x00000000);
+	nv_icmd(priv, 0x0002ae, 0x00000000);
+	nv_icmd(priv, 0x0002af, 0x00000000);
+	nv_icmd(priv, 0x000420, 0x00000000);
+	nv_icmd(priv, 0x000421, 0x00000000);
+	nv_icmd(priv, 0x000422, 0x00000000);
+	nv_icmd(priv, 0x000423, 0x00000000);
+	nv_icmd(priv, 0x000424, 0x00000000);
+	nv_icmd(priv, 0x000425, 0x00000000);
+	nv_icmd(priv, 0x000426, 0x00000000);
+	nv_icmd(priv, 0x000427, 0x00000000);
+	nv_icmd(priv, 0x000428, 0x00000000);
+	nv_icmd(priv, 0x000429, 0x00000000);
+	nv_icmd(priv, 0x00042a, 0x00000000);
+	nv_icmd(priv, 0x00042b, 0x00000000);
+	nv_icmd(priv, 0x00042c, 0x00000000);
+	nv_icmd(priv, 0x00042d, 0x00000000);
+	nv_icmd(priv, 0x00042e, 0x00000000);
+	nv_icmd(priv, 0x00042f, 0x00000000);
+	nv_icmd(priv, 0x0002b0, 0x00000000);
+	nv_icmd(priv, 0x0002b1, 0x00000000);
+	nv_icmd(priv, 0x0002b2, 0x00000000);
+	nv_icmd(priv, 0x0002b3, 0x00000000);
+	nv_icmd(priv, 0x0002b4, 0x00000000);
+	nv_icmd(priv, 0x0002b5, 0x00000000);
+	nv_icmd(priv, 0x0002b6, 0x00000000);
+	nv_icmd(priv, 0x0002b7, 0x00000000);
+	nv_icmd(priv, 0x0002b8, 0x00000000);
+	nv_icmd(priv, 0x0002b9, 0x00000000);
+	nv_icmd(priv, 0x0002ba, 0x00000000);
+	nv_icmd(priv, 0x0002bb, 0x00000000);
+	nv_icmd(priv, 0x0002bc, 0x00000000);
+	nv_icmd(priv, 0x0002bd, 0x00000000);
+	nv_icmd(priv, 0x0002be, 0x00000000);
+	nv_icmd(priv, 0x0002bf, 0x00000000);
+	nv_icmd(priv, 0x000430, 0x00000000);
+	nv_icmd(priv, 0x000431, 0x00000000);
+	nv_icmd(priv, 0x000432, 0x00000000);
+	nv_icmd(priv, 0x000433, 0x00000000);
+	nv_icmd(priv, 0x000434, 0x00000000);
+	nv_icmd(priv, 0x000435, 0x00000000);
+	nv_icmd(priv, 0x000436, 0x00000000);
+	nv_icmd(priv, 0x000437, 0x00000000);
+	nv_icmd(priv, 0x000438, 0x00000000);
+	nv_icmd(priv, 0x000439, 0x00000000);
+	nv_icmd(priv, 0x00043a, 0x00000000);
+	nv_icmd(priv, 0x00043b, 0x00000000);
+	nv_icmd(priv, 0x00043c, 0x00000000);
+	nv_icmd(priv, 0x00043d, 0x00000000);
+	nv_icmd(priv, 0x00043e, 0x00000000);
+	nv_icmd(priv, 0x00043f, 0x00000000);
+	nv_icmd(priv, 0x0002c0, 0x00000000);
+	nv_icmd(priv, 0x0002c1, 0x00000000);
+	nv_icmd(priv, 0x0002c2, 0x00000000);
+	nv_icmd(priv, 0x0002c3, 0x00000000);
+	nv_icmd(priv, 0x0002c4, 0x00000000);
+	nv_icmd(priv, 0x0002c5, 0x00000000);
+	nv_icmd(priv, 0x0002c6, 0x00000000);
+	nv_icmd(priv, 0x0002c7, 0x00000000);
+	nv_icmd(priv, 0x0002c8, 0x00000000);
+	nv_icmd(priv, 0x0002c9, 0x00000000);
+	nv_icmd(priv, 0x0002ca, 0x00000000);
+	nv_icmd(priv, 0x0002cb, 0x00000000);
+	nv_icmd(priv, 0x0002cc, 0x00000000);
+	nv_icmd(priv, 0x0002cd, 0x00000000);
+	nv_icmd(priv, 0x0002ce, 0x00000000);
+	nv_icmd(priv, 0x0002cf, 0x00000000);
+	nv_icmd(priv, 0x0004d0, 0x00000000);
+	nv_icmd(priv, 0x0004d1, 0x00000000);
+	nv_icmd(priv, 0x0004d2, 0x00000000);
+	nv_icmd(priv, 0x0004d3, 0x00000000);
+	nv_icmd(priv, 0x0004d4, 0x00000000);
+	nv_icmd(priv, 0x0004d5, 0x00000000);
+	nv_icmd(priv, 0x0004d6, 0x00000000);
+	nv_icmd(priv, 0x0004d7, 0x00000000);
+	nv_icmd(priv, 0x0004d8, 0x00000000);
+	nv_icmd(priv, 0x0004d9, 0x00000000);
+	nv_icmd(priv, 0x0004da, 0x00000000);
+	nv_icmd(priv, 0x0004db, 0x00000000);
+	nv_icmd(priv, 0x0004dc, 0x00000000);
+	nv_icmd(priv, 0x0004dd, 0x00000000);
+	nv_icmd(priv, 0x0004de, 0x00000000);
+	nv_icmd(priv, 0x0004df, 0x00000000);
+	nv_icmd(priv, 0x000720, 0x00000000);
+	nv_icmd(priv, 0x000721, 0x00000000);
+	nv_icmd(priv, 0x000722, 0x00000000);
+	nv_icmd(priv, 0x000723, 0x00000000);
+	nv_icmd(priv, 0x000724, 0x00000000);
+	nv_icmd(priv, 0x000725, 0x00000000);
+	nv_icmd(priv, 0x000726, 0x00000000);
+	nv_icmd(priv, 0x000727, 0x00000000);
+	nv_icmd(priv, 0x000728, 0x00000000);
+	nv_icmd(priv, 0x000729, 0x00000000);
+	nv_icmd(priv, 0x00072a, 0x00000000);
+	nv_icmd(priv, 0x00072b, 0x00000000);
+	nv_icmd(priv, 0x00072c, 0x00000000);
+	nv_icmd(priv, 0x00072d, 0x00000000);
+	nv_icmd(priv, 0x00072e, 0x00000000);
+	nv_icmd(priv, 0x00072f, 0x00000000);
+	nv_icmd(priv, 0x0008c0, 0x00000000);
+	nv_icmd(priv, 0x0008c1, 0x00000000);
+	nv_icmd(priv, 0x0008c2, 0x00000000);
+	nv_icmd(priv, 0x0008c3, 0x00000000);
+	nv_icmd(priv, 0x0008c4, 0x00000000);
+	nv_icmd(priv, 0x0008c5, 0x00000000);
+	nv_icmd(priv, 0x0008c6, 0x00000000);
+	nv_icmd(priv, 0x0008c7, 0x00000000);
+	nv_icmd(priv, 0x0008c8, 0x00000000);
+	nv_icmd(priv, 0x0008c9, 0x00000000);
+	nv_icmd(priv, 0x0008ca, 0x00000000);
+	nv_icmd(priv, 0x0008cb, 0x00000000);
+	nv_icmd(priv, 0x0008cc, 0x00000000);
+	nv_icmd(priv, 0x0008cd, 0x00000000);
+	nv_icmd(priv, 0x0008ce, 0x00000000);
+	nv_icmd(priv, 0x0008cf, 0x00000000);
+	nv_icmd(priv, 0x000890, 0x00000000);
+	nv_icmd(priv, 0x000891, 0x00000000);
+	nv_icmd(priv, 0x000892, 0x00000000);
+	nv_icmd(priv, 0x000893, 0x00000000);
+	nv_icmd(priv, 0x000894, 0x00000000);
+	nv_icmd(priv, 0x000895, 0x00000000);
+	nv_icmd(priv, 0x000896, 0x00000000);
+	nv_icmd(priv, 0x000897, 0x00000000);
+	nv_icmd(priv, 0x000898, 0x00000000);
+	nv_icmd(priv, 0x000899, 0x00000000);
+	nv_icmd(priv, 0x00089a, 0x00000000);
+	nv_icmd(priv, 0x00089b, 0x00000000);
+	nv_icmd(priv, 0x00089c, 0x00000000);
+	nv_icmd(priv, 0x00089d, 0x00000000);
+	nv_icmd(priv, 0x00089e, 0x00000000);
+	nv_icmd(priv, 0x00089f, 0x00000000);
+	nv_icmd(priv, 0x0008e0, 0x00000000);
+	nv_icmd(priv, 0x0008e1, 0x00000000);
+	nv_icmd(priv, 0x0008e2, 0x00000000);
+	nv_icmd(priv, 0x0008e3, 0x00000000);
+	nv_icmd(priv, 0x0008e4, 0x00000000);
+	nv_icmd(priv, 0x0008e5, 0x00000000);
+	nv_icmd(priv, 0x0008e6, 0x00000000);
+	nv_icmd(priv, 0x0008e7, 0x00000000);
+	nv_icmd(priv, 0x0008e8, 0x00000000);
+	nv_icmd(priv, 0x0008e9, 0x00000000);
+	nv_icmd(priv, 0x0008ea, 0x00000000);
+	nv_icmd(priv, 0x0008eb, 0x00000000);
+	nv_icmd(priv, 0x0008ec, 0x00000000);
+	nv_icmd(priv, 0x0008ed, 0x00000000);
+	nv_icmd(priv, 0x0008ee, 0x00000000);
+	nv_icmd(priv, 0x0008ef, 0x00000000);
+	nv_icmd(priv, 0x0008a0, 0x00000000);
+	nv_icmd(priv, 0x0008a1, 0x00000000);
+	nv_icmd(priv, 0x0008a2, 0x00000000);
+	nv_icmd(priv, 0x0008a3, 0x00000000);
+	nv_icmd(priv, 0x0008a4, 0x00000000);
+	nv_icmd(priv, 0x0008a5, 0x00000000);
+	nv_icmd(priv, 0x0008a6, 0x00000000);
+	nv_icmd(priv, 0x0008a7, 0x00000000);
+	nv_icmd(priv, 0x0008a8, 0x00000000);
+	nv_icmd(priv, 0x0008a9, 0x00000000);
+	nv_icmd(priv, 0x0008aa, 0x00000000);
+	nv_icmd(priv, 0x0008ab, 0x00000000);
+	nv_icmd(priv, 0x0008ac, 0x00000000);
+	nv_icmd(priv, 0x0008ad, 0x00000000);
+	nv_icmd(priv, 0x0008ae, 0x00000000);
+	nv_icmd(priv, 0x0008af, 0x00000000);
+	nv_icmd(priv, 0x0008f0, 0x00000000);
+	nv_icmd(priv, 0x0008f1, 0x00000000);
+	nv_icmd(priv, 0x0008f2, 0x00000000);
+	nv_icmd(priv, 0x0008f3, 0x00000000);
+	nv_icmd(priv, 0x0008f4, 0x00000000);
+	nv_icmd(priv, 0x0008f5, 0x00000000);
+	nv_icmd(priv, 0x0008f6, 0x00000000);
+	nv_icmd(priv, 0x0008f7, 0x00000000);
+	nv_icmd(priv, 0x0008f8, 0x00000000);
+	nv_icmd(priv, 0x0008f9, 0x00000000);
+	nv_icmd(priv, 0x0008fa, 0x00000000);
+	nv_icmd(priv, 0x0008fb, 0x00000000);
+	nv_icmd(priv, 0x0008fc, 0x00000000);
+	nv_icmd(priv, 0x0008fd, 0x00000000);
+	nv_icmd(priv, 0x0008fe, 0x00000000);
+	nv_icmd(priv, 0x0008ff, 0x00000000);
+	nv_icmd(priv, 0x00094c, 0x000000ff);
+	nv_icmd(priv, 0x00094d, 0xffffffff);
+	nv_icmd(priv, 0x00094e, 0x00000002);
+	nv_icmd(priv, 0x0002ec, 0x00000001);
+	nv_icmd(priv, 0x000303, 0x00000001);
+	nv_icmd(priv, 0x0002e6, 0x00000001);
+	nv_icmd(priv, 0x000466, 0x00000052);
+	nv_icmd(priv, 0x000301, 0x3f800000);
+	nv_icmd(priv, 0x000304, 0x30201000);
+	nv_icmd(priv, 0x000305, 0x70605040);
+	nv_icmd(priv, 0x000306, 0xb8a89888);
+	nv_icmd(priv, 0x000307, 0xf8e8d8c8);
+	nv_icmd(priv, 0x00030a, 0x00ffff00);
+	nv_icmd(priv, 0x00030b, 0x0000001a);
+	nv_icmd(priv, 0x00030c, 0x00000001);
+	nv_icmd(priv, 0x000318, 0x00000001);
+	nv_icmd(priv, 0x000340, 0x00000000);
+	nv_icmd(priv, 0x000375, 0x00000001);
+	nv_icmd(priv, 0x00037d, 0x00000006);
+	nv_icmd(priv, 0x0003a0, 0x00000002);
+	nv_icmd(priv, 0x0003aa, 0x00000001);
+	nv_icmd(priv, 0x0003a9, 0x00000001);
+	nv_icmd(priv, 0x000380, 0x00000001);
+	nv_icmd(priv, 0x000383, 0x00000011);
+	nv_icmd(priv, 0x000360, 0x00000040);
+	nv_icmd(priv, 0x000366, 0x00000000);
+	nv_icmd(priv, 0x000367, 0x00000000);
+	nv_icmd(priv, 0x000368, 0x00000fff);
+	nv_icmd(priv, 0x000370, 0x00000000);
+	nv_icmd(priv, 0x000371, 0x00000000);
+	nv_icmd(priv, 0x000372, 0x000fffff);
+	nv_icmd(priv, 0x00037a, 0x00000012);
+	nv_icmd(priv, 0x000619, 0x00000003);
+	nv_icmd(priv, 0x000811, 0x00000003);
+	nv_icmd(priv, 0x000812, 0x00000004);
+	nv_icmd(priv, 0x000813, 0x00000006);
+	nv_icmd(priv, 0x000814, 0x00000008);
+	nv_icmd(priv, 0x000815, 0x0000000b);
+	nv_icmd(priv, 0x000800, 0x00000001);
+	nv_icmd(priv, 0x000801, 0x00000001);
+	nv_icmd(priv, 0x000802, 0x00000001);
+	nv_icmd(priv, 0x000803, 0x00000001);
+	nv_icmd(priv, 0x000804, 0x00000001);
+	nv_icmd(priv, 0x000805, 0x00000001);
+	nv_icmd(priv, 0x000632, 0x00000001);
+	nv_icmd(priv, 0x000633, 0x00000002);
+	nv_icmd(priv, 0x000634, 0x00000003);
+	nv_icmd(priv, 0x000635, 0x00000004);
+	nv_icmd(priv, 0x000654, 0x3f800000);
+	nv_icmd(priv, 0x000657, 0x3f800000);
+	nv_icmd(priv, 0x000655, 0x3f800000);
+	nv_icmd(priv, 0x000656, 0x3f800000);
+	nv_icmd(priv, 0x0006cd, 0x3f800000);
+	nv_icmd(priv, 0x0007f5, 0x3f800000);
+	nv_icmd(priv, 0x0007dc, 0x39291909);
+	nv_icmd(priv, 0x0007dd, 0x79695949);
+	nv_icmd(priv, 0x0007de, 0xb9a99989);
+	nv_icmd(priv, 0x0007df, 0xf9e9d9c9);
+	nv_icmd(priv, 0x0007e8, 0x00003210);
+	nv_icmd(priv, 0x0007e9, 0x00007654);
+	nv_icmd(priv, 0x0007ea, 0x00000098);
+	nv_icmd(priv, 0x0007ec, 0x39291909);
+	nv_icmd(priv, 0x0007ed, 0x79695949);
+	nv_icmd(priv, 0x0007ee, 0xb9a99989);
+	nv_icmd(priv, 0x0007ef, 0xf9e9d9c9);
+	nv_icmd(priv, 0x0007f0, 0x00003210);
+	nv_icmd(priv, 0x0007f1, 0x00007654);
+	nv_icmd(priv, 0x0007f2, 0x00000098);
+	nv_icmd(priv, 0x0005a5, 0x00000001);
+	nv_icmd(priv, 0x000980, 0x00000000);
+	nv_icmd(priv, 0x000981, 0x00000000);
+	nv_icmd(priv, 0x000982, 0x00000000);
+	nv_icmd(priv, 0x000983, 0x00000000);
+	nv_icmd(priv, 0x000984, 0x00000000);
+	nv_icmd(priv, 0x000985, 0x00000000);
+	nv_icmd(priv, 0x000986, 0x00000000);
+	nv_icmd(priv, 0x000987, 0x00000000);
+	nv_icmd(priv, 0x000988, 0x00000000);
+	nv_icmd(priv, 0x000989, 0x00000000);
+	nv_icmd(priv, 0x00098a, 0x00000000);
+	nv_icmd(priv, 0x00098b, 0x00000000);
+	nv_icmd(priv, 0x00098c, 0x00000000);
+	nv_icmd(priv, 0x00098d, 0x00000000);
+	nv_icmd(priv, 0x00098e, 0x00000000);
+	nv_icmd(priv, 0x00098f, 0x00000000);
+	nv_icmd(priv, 0x000990, 0x00000000);
+	nv_icmd(priv, 0x000991, 0x00000000);
+	nv_icmd(priv, 0x000992, 0x00000000);
+	nv_icmd(priv, 0x000993, 0x00000000);
+	nv_icmd(priv, 0x000994, 0x00000000);
+	nv_icmd(priv, 0x000995, 0x00000000);
+	nv_icmd(priv, 0x000996, 0x00000000);
+	nv_icmd(priv, 0x000997, 0x00000000);
+	nv_icmd(priv, 0x000998, 0x00000000);
+	nv_icmd(priv, 0x000999, 0x00000000);
+	nv_icmd(priv, 0x00099a, 0x00000000);
+	nv_icmd(priv, 0x00099b, 0x00000000);
+	nv_icmd(priv, 0x00099c, 0x00000000);
+	nv_icmd(priv, 0x00099d, 0x00000000);
+	nv_icmd(priv, 0x00099e, 0x00000000);
+	nv_icmd(priv, 0x00099f, 0x00000000);
+	nv_icmd(priv, 0x0009a0, 0x00000000);
+	nv_icmd(priv, 0x0009a1, 0x00000000);
+	nv_icmd(priv, 0x0009a2, 0x00000000);
+	nv_icmd(priv, 0x0009a3, 0x00000000);
+	nv_icmd(priv, 0x0009a4, 0x00000000);
+	nv_icmd(priv, 0x0009a5, 0x00000000);
+	nv_icmd(priv, 0x0009a6, 0x00000000);
+	nv_icmd(priv, 0x0009a7, 0x00000000);
+	nv_icmd(priv, 0x0009a8, 0x00000000);
+	nv_icmd(priv, 0x0009a9, 0x00000000);
+	nv_icmd(priv, 0x0009aa, 0x00000000);
+	nv_icmd(priv, 0x0009ab, 0x00000000);
+	nv_icmd(priv, 0x0009ac, 0x00000000);
+	nv_icmd(priv, 0x0009ad, 0x00000000);
+	nv_icmd(priv, 0x0009ae, 0x00000000);
+	nv_icmd(priv, 0x0009af, 0x00000000);
+	nv_icmd(priv, 0x0009b0, 0x00000000);
+	nv_icmd(priv, 0x0009b1, 0x00000000);
+	nv_icmd(priv, 0x0009b2, 0x00000000);
+	nv_icmd(priv, 0x0009b3, 0x00000000);
+	nv_icmd(priv, 0x0009b4, 0x00000000);
+	nv_icmd(priv, 0x0009b5, 0x00000000);
+	nv_icmd(priv, 0x0009b6, 0x00000000);
+	nv_icmd(priv, 0x0009b7, 0x00000000);
+	nv_icmd(priv, 0x0009b8, 0x00000000);
+	nv_icmd(priv, 0x0009b9, 0x00000000);
+	nv_icmd(priv, 0x0009ba, 0x00000000);
+	nv_icmd(priv, 0x0009bb, 0x00000000);
+	nv_icmd(priv, 0x0009bc, 0x00000000);
+	nv_icmd(priv, 0x0009bd, 0x00000000);
+	nv_icmd(priv, 0x0009be, 0x00000000);
+	nv_icmd(priv, 0x0009bf, 0x00000000);
+	nv_icmd(priv, 0x0009c0, 0x00000000);
+	nv_icmd(priv, 0x0009c1, 0x00000000);
+	nv_icmd(priv, 0x0009c2, 0x00000000);
+	nv_icmd(priv, 0x0009c3, 0x00000000);
+	nv_icmd(priv, 0x0009c4, 0x00000000);
+	nv_icmd(priv, 0x0009c5, 0x00000000);
+	nv_icmd(priv, 0x0009c6, 0x00000000);
+	nv_icmd(priv, 0x0009c7, 0x00000000);
+	nv_icmd(priv, 0x0009c8, 0x00000000);
+	nv_icmd(priv, 0x0009c9, 0x00000000);
+	nv_icmd(priv, 0x0009ca, 0x00000000);
+	nv_icmd(priv, 0x0009cb, 0x00000000);
+	nv_icmd(priv, 0x0009cc, 0x00000000);
+	nv_icmd(priv, 0x0009cd, 0x00000000);
+	nv_icmd(priv, 0x0009ce, 0x00000000);
+	nv_icmd(priv, 0x0009cf, 0x00000000);
+	nv_icmd(priv, 0x0009d0, 0x00000000);
+	nv_icmd(priv, 0x0009d1, 0x00000000);
+	nv_icmd(priv, 0x0009d2, 0x00000000);
+	nv_icmd(priv, 0x0009d3, 0x00000000);
+	nv_icmd(priv, 0x0009d4, 0x00000000);
+	nv_icmd(priv, 0x0009d5, 0x00000000);
+	nv_icmd(priv, 0x0009d6, 0x00000000);
+	nv_icmd(priv, 0x0009d7, 0x00000000);
+	nv_icmd(priv, 0x0009d8, 0x00000000);
+	nv_icmd(priv, 0x0009d9, 0x00000000);
+	nv_icmd(priv, 0x0009da, 0x00000000);
+	nv_icmd(priv, 0x0009db, 0x00000000);
+	nv_icmd(priv, 0x0009dc, 0x00000000);
+	nv_icmd(priv, 0x0009dd, 0x00000000);
+	nv_icmd(priv, 0x0009de, 0x00000000);
+	nv_icmd(priv, 0x0009df, 0x00000000);
+	nv_icmd(priv, 0x0009e0, 0x00000000);
+	nv_icmd(priv, 0x0009e1, 0x00000000);
+	nv_icmd(priv, 0x0009e2, 0x00000000);
+	nv_icmd(priv, 0x0009e3, 0x00000000);
+	nv_icmd(priv, 0x0009e4, 0x00000000);
+	nv_icmd(priv, 0x0009e5, 0x00000000);
+	nv_icmd(priv, 0x0009e6, 0x00000000);
+	nv_icmd(priv, 0x0009e7, 0x00000000);
+	nv_icmd(priv, 0x0009e8, 0x00000000);
+	nv_icmd(priv, 0x0009e9, 0x00000000);
+	nv_icmd(priv, 0x0009ea, 0x00000000);
+	nv_icmd(priv, 0x0009eb, 0x00000000);
+	nv_icmd(priv, 0x0009ec, 0x00000000);
+	nv_icmd(priv, 0x0009ed, 0x00000000);
+	nv_icmd(priv, 0x0009ee, 0x00000000);
+	nv_icmd(priv, 0x0009ef, 0x00000000);
+	nv_icmd(priv, 0x0009f0, 0x00000000);
+	nv_icmd(priv, 0x0009f1, 0x00000000);
+	nv_icmd(priv, 0x0009f2, 0x00000000);
+	nv_icmd(priv, 0x0009f3, 0x00000000);
+	nv_icmd(priv, 0x0009f4, 0x00000000);
+	nv_icmd(priv, 0x0009f5, 0x00000000);
+	nv_icmd(priv, 0x0009f6, 0x00000000);
+	nv_icmd(priv, 0x0009f7, 0x00000000);
+	nv_icmd(priv, 0x0009f8, 0x00000000);
+	nv_icmd(priv, 0x0009f9, 0x00000000);
+	nv_icmd(priv, 0x0009fa, 0x00000000);
+	nv_icmd(priv, 0x0009fb, 0x00000000);
+	nv_icmd(priv, 0x0009fc, 0x00000000);
+	nv_icmd(priv, 0x0009fd, 0x00000000);
+	nv_icmd(priv, 0x0009fe, 0x00000000);
+	nv_icmd(priv, 0x0009ff, 0x00000000);
+	nv_icmd(priv, 0x000468, 0x00000004);
+	nv_icmd(priv, 0x00046c, 0x00000001);
+	nv_icmd(priv, 0x000470, 0x00000000);
+	nv_icmd(priv, 0x000471, 0x00000000);
+	nv_icmd(priv, 0x000472, 0x00000000);
+	nv_icmd(priv, 0x000473, 0x00000000);
+	nv_icmd(priv, 0x000474, 0x00000000);
+	nv_icmd(priv, 0x000475, 0x00000000);
+	nv_icmd(priv, 0x000476, 0x00000000);
+	nv_icmd(priv, 0x000477, 0x00000000);
+	nv_icmd(priv, 0x000478, 0x00000000);
+	nv_icmd(priv, 0x000479, 0x00000000);
+	nv_icmd(priv, 0x00047a, 0x00000000);
+	nv_icmd(priv, 0x00047b, 0x00000000);
+	nv_icmd(priv, 0x00047c, 0x00000000);
+	nv_icmd(priv, 0x00047d, 0x00000000);
+	nv_icmd(priv, 0x00047e, 0x00000000);
+	nv_icmd(priv, 0x00047f, 0x00000000);
+	nv_icmd(priv, 0x000480, 0x00000000);
+	nv_icmd(priv, 0x000481, 0x00000000);
+	nv_icmd(priv, 0x000482, 0x00000000);
+	nv_icmd(priv, 0x000483, 0x00000000);
+	nv_icmd(priv, 0x000484, 0x00000000);
+	nv_icmd(priv, 0x000485, 0x00000000);
+	nv_icmd(priv, 0x000486, 0x00000000);
+	nv_icmd(priv, 0x000487, 0x00000000);
+	nv_icmd(priv, 0x000488, 0x00000000);
+	nv_icmd(priv, 0x000489, 0x00000000);
+	nv_icmd(priv, 0x00048a, 0x00000000);
+	nv_icmd(priv, 0x00048b, 0x00000000);
+	nv_icmd(priv, 0x00048c, 0x00000000);
+	nv_icmd(priv, 0x00048d, 0x00000000);
+	nv_icmd(priv, 0x00048e, 0x00000000);
+	nv_icmd(priv, 0x00048f, 0x00000000);
+	nv_icmd(priv, 0x000490, 0x00000000);
+	nv_icmd(priv, 0x000491, 0x00000000);
+	nv_icmd(priv, 0x000492, 0x00000000);
+	nv_icmd(priv, 0x000493, 0x00000000);
+	nv_icmd(priv, 0x000494, 0x00000000);
+	nv_icmd(priv, 0x000495, 0x00000000);
+	nv_icmd(priv, 0x000496, 0x00000000);
+	nv_icmd(priv, 0x000497, 0x00000000);
+	nv_icmd(priv, 0x000498, 0x00000000);
+	nv_icmd(priv, 0x000499, 0x00000000);
+	nv_icmd(priv, 0x00049a, 0x00000000);
+	nv_icmd(priv, 0x00049b, 0x00000000);
+	nv_icmd(priv, 0x00049c, 0x00000000);
+	nv_icmd(priv, 0x00049d, 0x00000000);
+	nv_icmd(priv, 0x00049e, 0x00000000);
+	nv_icmd(priv, 0x00049f, 0x00000000);
+	nv_icmd(priv, 0x0004a0, 0x00000000);
+	nv_icmd(priv, 0x0004a1, 0x00000000);
+	nv_icmd(priv, 0x0004a2, 0x00000000);
+	nv_icmd(priv, 0x0004a3, 0x00000000);
+	nv_icmd(priv, 0x0004a4, 0x00000000);
+	nv_icmd(priv, 0x0004a5, 0x00000000);
+	nv_icmd(priv, 0x0004a6, 0x00000000);
+	nv_icmd(priv, 0x0004a7, 0x00000000);
+	nv_icmd(priv, 0x0004a8, 0x00000000);
+	nv_icmd(priv, 0x0004a9, 0x00000000);
+	nv_icmd(priv, 0x0004aa, 0x00000000);
+	nv_icmd(priv, 0x0004ab, 0x00000000);
+	nv_icmd(priv, 0x0004ac, 0x00000000);
+	nv_icmd(priv, 0x0004ad, 0x00000000);
+	nv_icmd(priv, 0x0004ae, 0x00000000);
+	nv_icmd(priv, 0x0004af, 0x00000000);
+	nv_icmd(priv, 0x0004b0, 0x00000000);
+	nv_icmd(priv, 0x0004b1, 0x00000000);
+	nv_icmd(priv, 0x0004b2, 0x00000000);
+	nv_icmd(priv, 0x0004b3, 0x00000000);
+	nv_icmd(priv, 0x0004b4, 0x00000000);
+	nv_icmd(priv, 0x0004b5, 0x00000000);
+	nv_icmd(priv, 0x0004b6, 0x00000000);
+	nv_icmd(priv, 0x0004b7, 0x00000000);
+	nv_icmd(priv, 0x0004b8, 0x00000000);
+	nv_icmd(priv, 0x0004b9, 0x00000000);
+	nv_icmd(priv, 0x0004ba, 0x00000000);
+	nv_icmd(priv, 0x0004bb, 0x00000000);
+	nv_icmd(priv, 0x0004bc, 0x00000000);
+	nv_icmd(priv, 0x0004bd, 0x00000000);
+	nv_icmd(priv, 0x0004be, 0x00000000);
+	nv_icmd(priv, 0x0004bf, 0x00000000);
+	nv_icmd(priv, 0x0004c0, 0x00000000);
+	nv_icmd(priv, 0x0004c1, 0x00000000);
+	nv_icmd(priv, 0x0004c2, 0x00000000);
+	nv_icmd(priv, 0x0004c3, 0x00000000);
+	nv_icmd(priv, 0x0004c4, 0x00000000);
+	nv_icmd(priv, 0x0004c5, 0x00000000);
+	nv_icmd(priv, 0x0004c6, 0x00000000);
+	nv_icmd(priv, 0x0004c7, 0x00000000);
+	nv_icmd(priv, 0x0004c8, 0x00000000);
+	nv_icmd(priv, 0x0004c9, 0x00000000);
+	nv_icmd(priv, 0x0004ca, 0x00000000);
+	nv_icmd(priv, 0x0004cb, 0x00000000);
+	nv_icmd(priv, 0x0004cc, 0x00000000);
+	nv_icmd(priv, 0x0004cd, 0x00000000);
+	nv_icmd(priv, 0x0004ce, 0x00000000);
+	nv_icmd(priv, 0x0004cf, 0x00000000);
+	nv_icmd(priv, 0x000510, 0x3f800000);
+	nv_icmd(priv, 0x000511, 0x3f800000);
+	nv_icmd(priv, 0x000512, 0x3f800000);
+	nv_icmd(priv, 0x000513, 0x3f800000);
+	nv_icmd(priv, 0x000514, 0x3f800000);
+	nv_icmd(priv, 0x000515, 0x3f800000);
+	nv_icmd(priv, 0x000516, 0x3f800000);
+	nv_icmd(priv, 0x000517, 0x3f800000);
+	nv_icmd(priv, 0x000518, 0x3f800000);
+	nv_icmd(priv, 0x000519, 0x3f800000);
+	nv_icmd(priv, 0x00051a, 0x3f800000);
+	nv_icmd(priv, 0x00051b, 0x3f800000);
+	nv_icmd(priv, 0x00051c, 0x3f800000);
+	nv_icmd(priv, 0x00051d, 0x3f800000);
+	nv_icmd(priv, 0x00051e, 0x3f800000);
+	nv_icmd(priv, 0x00051f, 0x3f800000);
+	nv_icmd(priv, 0x000520, 0x000002b6);
+	nv_icmd(priv, 0x000529, 0x00000001);
+	nv_icmd(priv, 0x000530, 0xffff0000);
+	nv_icmd(priv, 0x000531, 0xffff0000);
+	nv_icmd(priv, 0x000532, 0xffff0000);
+	nv_icmd(priv, 0x000533, 0xffff0000);
+	nv_icmd(priv, 0x000534, 0xffff0000);
+	nv_icmd(priv, 0x000535, 0xffff0000);
+	nv_icmd(priv, 0x000536, 0xffff0000);
+	nv_icmd(priv, 0x000537, 0xffff0000);
+	nv_icmd(priv, 0x000538, 0xffff0000);
+	nv_icmd(priv, 0x000539, 0xffff0000);
+	nv_icmd(priv, 0x00053a, 0xffff0000);
+	nv_icmd(priv, 0x00053b, 0xffff0000);
+	nv_icmd(priv, 0x00053c, 0xffff0000);
+	nv_icmd(priv, 0x00053d, 0xffff0000);
+	nv_icmd(priv, 0x00053e, 0xffff0000);
+	nv_icmd(priv, 0x00053f, 0xffff0000);
+	nv_icmd(priv, 0x000585, 0x0000003f);
+	nv_icmd(priv, 0x000576, 0x00000003);
+	nv_icmd(priv, 0x00057b, 0x00000059);
+	nv_icmd(priv, 0x000586, 0x00000040);
+	nv_icmd(priv, 0x000582, 0x00000080);
+	nv_icmd(priv, 0x000583, 0x00000080);
+	nv_icmd(priv, 0x0005c2, 0x00000001);
+	nv_icmd(priv, 0x000638, 0x00000001);
+	nv_icmd(priv, 0x000639, 0x00000001);
+	nv_icmd(priv, 0x00063a, 0x00000002);
+	nv_icmd(priv, 0x00063b, 0x00000001);
+	nv_icmd(priv, 0x00063c, 0x00000001);
+	nv_icmd(priv, 0x00063d, 0x00000002);
+	nv_icmd(priv, 0x00063e, 0x00000001);
+	nv_icmd(priv, 0x0008b8, 0x00000001);
+	nv_icmd(priv, 0x0008b9, 0x00000001);
+	nv_icmd(priv, 0x0008ba, 0x00000001);
+	nv_icmd(priv, 0x0008bb, 0x00000001);
+	nv_icmd(priv, 0x0008bc, 0x00000001);
+	nv_icmd(priv, 0x0008bd, 0x00000001);
+	nv_icmd(priv, 0x0008be, 0x00000001);
+	nv_icmd(priv, 0x0008bf, 0x00000001);
+	nv_icmd(priv, 0x000900, 0x00000001);
+	nv_icmd(priv, 0x000901, 0x00000001);
+	nv_icmd(priv, 0x000902, 0x00000001);
+	nv_icmd(priv, 0x000903, 0x00000001);
+	nv_icmd(priv, 0x000904, 0x00000001);
+	nv_icmd(priv, 0x000905, 0x00000001);
+	nv_icmd(priv, 0x000906, 0x00000001);
+	nv_icmd(priv, 0x000907, 0x00000001);
+	nv_icmd(priv, 0x000908, 0x00000002);
+	nv_icmd(priv, 0x000909, 0x00000002);
+	nv_icmd(priv, 0x00090a, 0x00000002);
+	nv_icmd(priv, 0x00090b, 0x00000002);
+	nv_icmd(priv, 0x00090c, 0x00000002);
+	nv_icmd(priv, 0x00090d, 0x00000002);
+	nv_icmd(priv, 0x00090e, 0x00000002);
+	nv_icmd(priv, 0x00090f, 0x00000002);
+	nv_icmd(priv, 0x000910, 0x00000001);
+	nv_icmd(priv, 0x000911, 0x00000001);
+	nv_icmd(priv, 0x000912, 0x00000001);
+	nv_icmd(priv, 0x000913, 0x00000001);
+	nv_icmd(priv, 0x000914, 0x00000001);
+	nv_icmd(priv, 0x000915, 0x00000001);
+	nv_icmd(priv, 0x000916, 0x00000001);
+	nv_icmd(priv, 0x000917, 0x00000001);
+	nv_icmd(priv, 0x000918, 0x00000001);
+	nv_icmd(priv, 0x000919, 0x00000001);
+	nv_icmd(priv, 0x00091a, 0x00000001);
+	nv_icmd(priv, 0x00091b, 0x00000001);
+	nv_icmd(priv, 0x00091c, 0x00000001);
+	nv_icmd(priv, 0x00091d, 0x00000001);
+	nv_icmd(priv, 0x00091e, 0x00000001);
+	nv_icmd(priv, 0x00091f, 0x00000001);
+	nv_icmd(priv, 0x000920, 0x00000002);
+	nv_icmd(priv, 0x000921, 0x00000002);
+	nv_icmd(priv, 0x000922, 0x00000002);
+	nv_icmd(priv, 0x000923, 0x00000002);
+	nv_icmd(priv, 0x000924, 0x00000002);
+	nv_icmd(priv, 0x000925, 0x00000002);
+	nv_icmd(priv, 0x000926, 0x00000002);
+	nv_icmd(priv, 0x000927, 0x00000002);
+	nv_icmd(priv, 0x000928, 0x00000001);
+	nv_icmd(priv, 0x000929, 0x00000001);
+	nv_icmd(priv, 0x00092a, 0x00000001);
+	nv_icmd(priv, 0x00092b, 0x00000001);
+	nv_icmd(priv, 0x00092c, 0x00000001);
+	nv_icmd(priv, 0x00092d, 0x00000001);
+	nv_icmd(priv, 0x00092e, 0x00000001);
+	nv_icmd(priv, 0x00092f, 0x00000001);
+	nv_icmd(priv, 0x000648, 0x00000001);
+	nv_icmd(priv, 0x000649, 0x00000001);
+	nv_icmd(priv, 0x00064a, 0x00000001);
+	nv_icmd(priv, 0x00064b, 0x00000001);
+	nv_icmd(priv, 0x00064c, 0x00000001);
+	nv_icmd(priv, 0x00064d, 0x00000001);
+	nv_icmd(priv, 0x00064e, 0x00000001);
+	nv_icmd(priv, 0x00064f, 0x00000001);
+	nv_icmd(priv, 0x000650, 0x00000001);
+	nv_icmd(priv, 0x000658, 0x0000000f);
+	nv_icmd(priv, 0x0007ff, 0x0000000a);
+	nv_icmd(priv, 0x00066a, 0x40000000);
+	nv_icmd(priv, 0x00066b, 0x10000000);
+	nv_icmd(priv, 0x00066c, 0xffff0000);
+	nv_icmd(priv, 0x00066d, 0xffff0000);
+	nv_icmd(priv, 0x0007af, 0x00000008);
+	nv_icmd(priv, 0x0007b0, 0x00000008);
+	nv_icmd(priv, 0x0007f6, 0x00000001);
+	nv_icmd(priv, 0x0006b2, 0x00000055);
+	nv_icmd(priv, 0x0007ad, 0x00000003);
+	nv_icmd(priv, 0x000937, 0x00000001);
+	nv_icmd(priv, 0x000971, 0x00000008);
+	nv_icmd(priv, 0x000972, 0x00000040);
+	nv_icmd(priv, 0x000973, 0x0000012c);
+	nv_icmd(priv, 0x00097c, 0x00000040);
+	nv_icmd(priv, 0x000979, 0x00000003);
+	nv_icmd(priv, 0x000975, 0x00000020);
+	nv_icmd(priv, 0x000976, 0x00000001);
+	nv_icmd(priv, 0x000977, 0x00000020);
+	nv_icmd(priv, 0x000978, 0x00000001);
+	nv_icmd(priv, 0x000957, 0x00000003);
+	nv_icmd(priv, 0x00095e, 0x20164010);
+	nv_icmd(priv, 0x00095f, 0x00000020);
+	nv_icmd(priv, 0x00097d, 0x00000020);
+	nv_icmd(priv, 0x000683, 0x00000006);
+	nv_icmd(priv, 0x000685, 0x003fffff);
+	nv_icmd(priv, 0x000687, 0x003fffff);
+	nv_icmd(priv, 0x0006a0, 0x00000005);
+	nv_icmd(priv, 0x000840, 0x00400008);
+	nv_icmd(priv, 0x000841, 0x08000080);
+	nv_icmd(priv, 0x000842, 0x00400008);
+	nv_icmd(priv, 0x000843, 0x08000080);
+	nv_icmd(priv, 0x000818, 0x00000000);
+	nv_icmd(priv, 0x000819, 0x00000000);
+	nv_icmd(priv, 0x00081a, 0x00000000);
+	nv_icmd(priv, 0x00081b, 0x00000000);
+	nv_icmd(priv, 0x00081c, 0x00000000);
+	nv_icmd(priv, 0x00081d, 0x00000000);
+	nv_icmd(priv, 0x00081e, 0x00000000);
+	nv_icmd(priv, 0x00081f, 0x00000000);
+	nv_icmd(priv, 0x000848, 0x00000000);
+	nv_icmd(priv, 0x000849, 0x00000000);
+	nv_icmd(priv, 0x00084a, 0x00000000);
+	nv_icmd(priv, 0x00084b, 0x00000000);
+	nv_icmd(priv, 0x00084c, 0x00000000);
+	nv_icmd(priv, 0x00084d, 0x00000000);
+	nv_icmd(priv, 0x00084e, 0x00000000);
+	nv_icmd(priv, 0x00084f, 0x00000000);
+	nv_icmd(priv, 0x000850, 0x00000000);
+	nv_icmd(priv, 0x000851, 0x00000000);
+	nv_icmd(priv, 0x000852, 0x00000000);
+	nv_icmd(priv, 0x000853, 0x00000000);
+	nv_icmd(priv, 0x000854, 0x00000000);
+	nv_icmd(priv, 0x000855, 0x00000000);
+	nv_icmd(priv, 0x000856, 0x00000000);
+	nv_icmd(priv, 0x000857, 0x00000000);
+	nv_icmd(priv, 0x000738, 0x00000000);
+	nv_icmd(priv, 0x0006aa, 0x00000001);
+	nv_icmd(priv, 0x0006ab, 0x00000002);
+	nv_icmd(priv, 0x0006ac, 0x00000080);
+	nv_icmd(priv, 0x0006ad, 0x00000100);
+	nv_icmd(priv, 0x0006ae, 0x00000100);
+	nv_icmd(priv, 0x0006b1, 0x00000011);
+	nv_icmd(priv, 0x0006bb, 0x000000cf);
+	nv_icmd(priv, 0x0006ce, 0x2a712488);
+	nv_icmd(priv, 0x000739, 0x4085c000);
+	nv_icmd(priv, 0x00073a, 0x00000080);
+	nv_icmd(priv, 0x000786, 0x80000100);
+	nv_icmd(priv, 0x00073c, 0x00010100);
+	nv_icmd(priv, 0x00073d, 0x02800000);
+	nv_icmd(priv, 0x000787, 0x000000cf);
+	nv_icmd(priv, 0x00078c, 0x00000008);
+	nv_icmd(priv, 0x000792, 0x00000001);
+	nv_icmd(priv, 0x000794, 0x00000001);
+	nv_icmd(priv, 0x000795, 0x00000001);
+	nv_icmd(priv, 0x000796, 0x00000001);
+	nv_icmd(priv, 0x000797, 0x000000cf);
+	nv_icmd(priv, 0x000836, 0x00000001);
+	nv_icmd(priv, 0x00079a, 0x00000002);
+	nv_icmd(priv, 0x000833, 0x04444480);
+	nv_icmd(priv, 0x0007a1, 0x00000001);
+	nv_icmd(priv, 0x0007a3, 0x00000001);
+	nv_icmd(priv, 0x0007a4, 0x00000001);
+	nv_icmd(priv, 0x0007a5, 0x00000001);
+	nv_icmd(priv, 0x000831, 0x00000004);
+	nv_icmd(priv, 0x000b07, 0x00000002);
+	nv_icmd(priv, 0x000b08, 0x00000100);
+	nv_icmd(priv, 0x000b09, 0x00000100);
+	nv_icmd(priv, 0x000b0a, 0x00000001);
+	nv_icmd(priv, 0x000a04, 0x000000ff);
+	nv_icmd(priv, 0x000a0b, 0x00000040);
+	nv_icmd(priv, 0x00097f, 0x00000100);
+	nv_icmd(priv, 0x000a02, 0x00000001);
+	nv_icmd(priv, 0x000809, 0x00000007);
+	nv_icmd(priv, 0x00c221, 0x00000040);
+	nv_icmd(priv, 0x00c1b0, 0x0000000f);
+	nv_icmd(priv, 0x00c1b1, 0x0000000f);
+	nv_icmd(priv, 0x00c1b2, 0x0000000f);
+	nv_icmd(priv, 0x00c1b3, 0x0000000f);
+	nv_icmd(priv, 0x00c1b4, 0x0000000f);
+	nv_icmd(priv, 0x00c1b5, 0x0000000f);
+	nv_icmd(priv, 0x00c1b6, 0x0000000f);
+	nv_icmd(priv, 0x00c1b7, 0x0000000f);
+	nv_icmd(priv, 0x00c1b8, 0x0fac6881);
+	nv_icmd(priv, 0x00c1b9, 0x00fac688);
+	nv_icmd(priv, 0x00c401, 0x00000001);
+	nv_icmd(priv, 0x00c402, 0x00010001);
+	nv_icmd(priv, 0x00c403, 0x00000001);
+	nv_icmd(priv, 0x00c404, 0x00000001);
+	nv_icmd(priv, 0x00c40e, 0x00000020);
+	nv_icmd(priv, 0x00c500, 0x00000003);
+	nv_icmd(priv, 0x01e100, 0x00000001);
+	nv_icmd(priv, 0x001000, 0x00000002);
+	nv_icmd(priv, 0x0006aa, 0x00000001);
+	nv_icmd(priv, 0x0006ad, 0x00000100);
+	nv_icmd(priv, 0x0006ae, 0x00000100);
+	nv_icmd(priv, 0x0006b1, 0x00000011);
+	nv_icmd(priv, 0x00078c, 0x00000008);
+	nv_icmd(priv, 0x000792, 0x00000001);
+	nv_icmd(priv, 0x000794, 0x00000001);
+	nv_icmd(priv, 0x000795, 0x00000001);
+	nv_icmd(priv, 0x000796, 0x00000001);
+	nv_icmd(priv, 0x000797, 0x000000cf);
+	nv_icmd(priv, 0x00079a, 0x00000002);
+	nv_icmd(priv, 0x000833, 0x04444480);
+	nv_icmd(priv, 0x0007a1, 0x00000001);
+	nv_icmd(priv, 0x0007a3, 0x00000001);
+	nv_icmd(priv, 0x0007a4, 0x00000001);
+	nv_icmd(priv, 0x0007a5, 0x00000001);
+	nv_icmd(priv, 0x000831, 0x00000004);
+	nv_icmd(priv, 0x01e100, 0x00000001);
+	nv_icmd(priv, 0x001000, 0x00000008);
+	nv_icmd(priv, 0x000039, 0x00000000);
+	nv_icmd(priv, 0x00003a, 0x00000000);
+	nv_icmd(priv, 0x00003b, 0x00000000);
+	nv_icmd(priv, 0x000380, 0x00000001);
+	nv_icmd(priv, 0x000366, 0x00000000);
+	nv_icmd(priv, 0x000367, 0x00000000);
+	nv_icmd(priv, 0x000368, 0x00000fff);
+	nv_icmd(priv, 0x000370, 0x00000000);
+	nv_icmd(priv, 0x000371, 0x00000000);
+	nv_icmd(priv, 0x000372, 0x000fffff);
+	nv_icmd(priv, 0x000813, 0x00000006);
+	nv_icmd(priv, 0x000814, 0x00000008);
+	nv_icmd(priv, 0x000957, 0x00000003);
+	nv_icmd(priv, 0x000818, 0x00000000);
+	nv_icmd(priv, 0x000819, 0x00000000);
+	nv_icmd(priv, 0x00081a, 0x00000000);
+	nv_icmd(priv, 0x00081b, 0x00000000);
+	nv_icmd(priv, 0x00081c, 0x00000000);
+	nv_icmd(priv, 0x00081d, 0x00000000);
+	nv_icmd(priv, 0x00081e, 0x00000000);
+	nv_icmd(priv, 0x00081f, 0x00000000);
+	nv_icmd(priv, 0x000848, 0x00000000);
+	nv_icmd(priv, 0x000849, 0x00000000);
+	nv_icmd(priv, 0x00084a, 0x00000000);
+	nv_icmd(priv, 0x00084b, 0x00000000);
+	nv_icmd(priv, 0x00084c, 0x00000000);
+	nv_icmd(priv, 0x00084d, 0x00000000);
+	nv_icmd(priv, 0x00084e, 0x00000000);
+	nv_icmd(priv, 0x00084f, 0x00000000);
+	nv_icmd(priv, 0x000850, 0x00000000);
+	nv_icmd(priv, 0x000851, 0x00000000);
+	nv_icmd(priv, 0x000852, 0x00000000);
+	nv_icmd(priv, 0x000853, 0x00000000);
+	nv_icmd(priv, 0x000854, 0x00000000);
+	nv_icmd(priv, 0x000855, 0x00000000);
+	nv_icmd(priv, 0x000856, 0x00000000);
+	nv_icmd(priv, 0x000857, 0x00000000);
+	nv_icmd(priv, 0x000738, 0x00000000);
+	nv_icmd(priv, 0x000b07, 0x00000002);
+	nv_icmd(priv, 0x000b08, 0x00000100);
+	nv_icmd(priv, 0x000b09, 0x00000100);
+	nv_icmd(priv, 0x000b0a, 0x00000001);
+	nv_icmd(priv, 0x000a04, 0x000000ff);
+	nv_icmd(priv, 0x00097f, 0x00000100);
+	nv_icmd(priv, 0x000a02, 0x00000001);
+	nv_icmd(priv, 0x000809, 0x00000007);
+	nv_icmd(priv, 0x00c221, 0x00000040);
+	nv_icmd(priv, 0x00c401, 0x00000001);
+	nv_icmd(priv, 0x00c402, 0x00010001);
+	nv_icmd(priv, 0x00c403, 0x00000001);
+	nv_icmd(priv, 0x00c404, 0x00000001);
+	nv_icmd(priv, 0x00c40e, 0x00000020);
+	nv_icmd(priv, 0x00c500, 0x00000003);
+	nv_icmd(priv, 0x01e100, 0x00000001);
+	nv_icmd(priv, 0x001000, 0x00000001);
+	nv_icmd(priv, 0x000b07, 0x00000002);
+	nv_icmd(priv, 0x000b08, 0x00000100);
+	nv_icmd(priv, 0x000b09, 0x00000100);
+	nv_icmd(priv, 0x000b0a, 0x00000001);
+	nv_icmd(priv, 0x01e100, 0x00000001);
+	nv_wr32(priv, 0x400208, 0x00000000);
+}
+
+static void
+nve0_grctx_generate_a097(struct nvc0_graph_priv *priv)
+{
+	nv_mthd(priv, 0xa097, 0x0800, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0840, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0880, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x08c0, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0900, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0940, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0980, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x09c0, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0804, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0844, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0884, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x08c4, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0904, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0944, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0984, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x09c4, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0808, 0x00000400);
+	nv_mthd(priv, 0xa097, 0x0848, 0x00000400);
+	nv_mthd(priv, 0xa097, 0x0888, 0x00000400);
+	nv_mthd(priv, 0xa097, 0x08c8, 0x00000400);
+	nv_mthd(priv, 0xa097, 0x0908, 0x00000400);
+	nv_mthd(priv, 0xa097, 0x0948, 0x00000400);
+	nv_mthd(priv, 0xa097, 0x0988, 0x00000400);
+	nv_mthd(priv, 0xa097, 0x09c8, 0x00000400);
+	nv_mthd(priv, 0xa097, 0x080c, 0x00000300);
+	nv_mthd(priv, 0xa097, 0x084c, 0x00000300);
+	nv_mthd(priv, 0xa097, 0x088c, 0x00000300);
+	nv_mthd(priv, 0xa097, 0x08cc, 0x00000300);
+	nv_mthd(priv, 0xa097, 0x090c, 0x00000300);
+	nv_mthd(priv, 0xa097, 0x094c, 0x00000300);
+	nv_mthd(priv, 0xa097, 0x098c, 0x00000300);
+	nv_mthd(priv, 0xa097, 0x09cc, 0x00000300);
+	nv_mthd(priv, 0xa097, 0x0810, 0x000000cf);
+	nv_mthd(priv, 0xa097, 0x0850, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0890, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x08d0, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0910, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0950, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0990, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x09d0, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0814, 0x00000040);
+	nv_mthd(priv, 0xa097, 0x0854, 0x00000040);
+	nv_mthd(priv, 0xa097, 0x0894, 0x00000040);
+	nv_mthd(priv, 0xa097, 0x08d4, 0x00000040);
+	nv_mthd(priv, 0xa097, 0x0914, 0x00000040);
+	nv_mthd(priv, 0xa097, 0x0954, 0x00000040);
+	nv_mthd(priv, 0xa097, 0x0994, 0x00000040);
+	nv_mthd(priv, 0xa097, 0x09d4, 0x00000040);
+	nv_mthd(priv, 0xa097, 0x0818, 0x00000001);
+	nv_mthd(priv, 0xa097, 0x0858, 0x00000001);
+	nv_mthd(priv, 0xa097, 0x0898, 0x00000001);
+	nv_mthd(priv, 0xa097, 0x08d8, 0x00000001);
+	nv_mthd(priv, 0xa097, 0x0918, 0x00000001);
+	nv_mthd(priv, 0xa097, 0x0958, 0x00000001);
+	nv_mthd(priv, 0xa097, 0x0998, 0x00000001);
+	nv_mthd(priv, 0xa097, 0x09d8, 0x00000001);
+	nv_mthd(priv, 0xa097, 0x081c, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x085c, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x089c, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x08dc, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x091c, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x095c, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x099c, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x09dc, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0820, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0860, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x08a0, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x08e0, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0920, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0960, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x09a0, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x09e0, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1c00, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1c10, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1c20, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1c30, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1c40, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1c50, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1c60, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1c70, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1c80, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1c90, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1ca0, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1cb0, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1cc0, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1cd0, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1ce0, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1cf0, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1c04, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1c14, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1c24, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1c34, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1c44, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1c54, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1c64, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1c74, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1c84, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1c94, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1ca4, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1cb4, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1cc4, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1cd4, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1ce4, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1cf4, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1c08, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1c18, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1c28, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1c38, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1c48, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1c58, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1c68, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1c78, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1c88, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1c98, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1ca8, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1cb8, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1cc8, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1cd8, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1ce8, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1cf8, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1c0c, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1c1c, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1c2c, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1c3c, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1c4c, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1c5c, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1c6c, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1c7c, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1c8c, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1c9c, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1cac, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1cbc, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1ccc, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1cdc, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1cec, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1cfc, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1d00, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1d10, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1d20, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1d30, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1d40, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1d50, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1d60, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1d70, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1d80, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1d90, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1da0, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1db0, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1dc0, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1dd0, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1de0, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1df0, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1d04, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1d14, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1d24, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1d34, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1d44, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1d54, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1d64, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1d74, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1d84, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1d94, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1da4, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1db4, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1dc4, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1dd4, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1de4, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1df4, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1d08, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1d18, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1d28, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1d38, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1d48, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1d58, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1d68, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1d78, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1d88, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1d98, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1da8, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1db8, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1dc8, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1dd8, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1de8, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1df8, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1d0c, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1d1c, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1d2c, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1d3c, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1d4c, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1d5c, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1d6c, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1d7c, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1d8c, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1d9c, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1dac, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1dbc, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1dcc, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1ddc, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1dec, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1dfc, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1f00, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1f08, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1f10, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1f18, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1f20, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1f28, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1f30, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1f38, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1f40, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1f48, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1f50, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1f58, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1f60, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1f68, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1f70, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1f78, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1f04, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1f0c, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1f14, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1f1c, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1f24, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1f2c, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1f34, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1f3c, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1f44, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1f4c, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1f54, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1f5c, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1f64, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1f6c, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1f74, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1f7c, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1f80, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1f88, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1f90, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1f98, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1fa0, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1fa8, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1fb0, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1fb8, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1fc0, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1fc8, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1fd0, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1fd8, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1fe0, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1fe8, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1ff0, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1ff8, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1f84, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1f8c, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1f94, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1f9c, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1fa4, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1fac, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1fb4, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1fbc, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1fc4, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1fcc, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1fd4, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1fdc, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1fe4, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1fec, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1ff4, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1ffc, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x2000, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x2040, 0x00000011);
+	nv_mthd(priv, 0xa097, 0x2080, 0x00000020);
+	nv_mthd(priv, 0xa097, 0x20c0, 0x00000030);
+	nv_mthd(priv, 0xa097, 0x2100, 0x00000040);
+	nv_mthd(priv, 0xa097, 0x2140, 0x00000051);
+	nv_mthd(priv, 0xa097, 0x200c, 0x00000001);
+	nv_mthd(priv, 0xa097, 0x204c, 0x00000001);
+	nv_mthd(priv, 0xa097, 0x208c, 0x00000001);
+	nv_mthd(priv, 0xa097, 0x20cc, 0x00000001);
+	nv_mthd(priv, 0xa097, 0x210c, 0x00000001);
+	nv_mthd(priv, 0xa097, 0x214c, 0x00000001);
+	nv_mthd(priv, 0xa097, 0x2010, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x2050, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x2090, 0x00000001);
+	nv_mthd(priv, 0xa097, 0x20d0, 0x00000002);
+	nv_mthd(priv, 0xa097, 0x2110, 0x00000003);
+	nv_mthd(priv, 0xa097, 0x2150, 0x00000004);
+	nv_mthd(priv, 0xa097, 0x0380, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x03a0, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x03c0, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x03e0, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0384, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x03a4, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x03c4, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x03e4, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0388, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x03a8, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x03c8, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x03e8, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x038c, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x03ac, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x03cc, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x03ec, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0700, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0710, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0720, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0730, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0704, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0714, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0724, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0734, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0708, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0718, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0728, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0738, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x2800, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x2804, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x2808, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x280c, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x2810, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x2814, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x2818, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x281c, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x2820, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x2824, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x2828, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x282c, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x2830, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x2834, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x2838, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x283c, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x2840, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x2844, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x2848, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x284c, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x2850, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x2854, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x2858, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x285c, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x2860, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x2864, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x2868, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x286c, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x2870, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x2874, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x2878, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x287c, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x2880, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x2884, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x2888, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x288c, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x2890, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x2894, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x2898, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x289c, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x28a0, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x28a4, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x28a8, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x28ac, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x28b0, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x28b4, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x28b8, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x28bc, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x28c0, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x28c4, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x28c8, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x28cc, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x28d0, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x28d4, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x28d8, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x28dc, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x28e0, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x28e4, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x28e8, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x28ec, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x28f0, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x28f4, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x28f8, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x28fc, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x2900, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x2904, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x2908, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x290c, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x2910, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x2914, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x2918, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x291c, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x2920, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x2924, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x2928, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x292c, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x2930, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x2934, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x2938, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x293c, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x2940, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x2944, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x2948, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x294c, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x2950, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x2954, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x2958, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x295c, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x2960, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x2964, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x2968, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x296c, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x2970, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x2974, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x2978, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x297c, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x2980, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x2984, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x2988, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x298c, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x2990, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x2994, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x2998, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x299c, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x29a0, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x29a4, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x29a8, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x29ac, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x29b0, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x29b4, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x29b8, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x29bc, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x29c0, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x29c4, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x29c8, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x29cc, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x29d0, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x29d4, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x29d8, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x29dc, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x29e0, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x29e4, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x29e8, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x29ec, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x29f0, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x29f4, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x29f8, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x29fc, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0a00, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0a20, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0a40, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0a60, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0a80, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0aa0, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0ac0, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0ae0, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0b00, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0b20, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0b40, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0b60, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0b80, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0ba0, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0bc0, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0be0, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0a04, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0a24, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0a44, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0a64, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0a84, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0aa4, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0ac4, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0ae4, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0b04, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0b24, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0b44, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0b64, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0b84, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0ba4, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0bc4, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0be4, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0a08, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0a28, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0a48, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0a68, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0a88, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0aa8, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0ac8, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0ae8, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0b08, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0b28, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0b48, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0b68, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0b88, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0ba8, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0bc8, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0be8, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0a0c, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0a2c, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0a4c, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0a6c, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0a8c, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0aac, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0acc, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0aec, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0b0c, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0b2c, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0b4c, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0b6c, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0b8c, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0bac, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0bcc, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0bec, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0a10, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0a30, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0a50, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0a70, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0a90, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0ab0, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0ad0, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0af0, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0b10, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0b30, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0b50, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0b70, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0b90, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0bb0, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0bd0, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0bf0, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0a14, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0a34, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0a54, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0a74, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0a94, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0ab4, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0ad4, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0af4, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0b14, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0b34, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0b54, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0b74, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0b94, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0bb4, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0bd4, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0bf4, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0c00, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0c10, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0c20, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0c30, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0c40, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0c50, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0c60, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0c70, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0c80, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0c90, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0ca0, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0cb0, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0cc0, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0cd0, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0ce0, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0cf0, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0c04, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0c14, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0c24, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0c34, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0c44, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0c54, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0c64, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0c74, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0c84, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0c94, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0ca4, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0cb4, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0cc4, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0cd4, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0ce4, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0cf4, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0c08, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0c18, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0c28, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0c38, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0c48, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0c58, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0c68, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0c78, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0c88, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0c98, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0ca8, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0cb8, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0cc8, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0cd8, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0ce8, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0cf8, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0c0c, 0x3f800000);
+	nv_mthd(priv, 0xa097, 0x0c1c, 0x3f800000);
+	nv_mthd(priv, 0xa097, 0x0c2c, 0x3f800000);
+	nv_mthd(priv, 0xa097, 0x0c3c, 0x3f800000);
+	nv_mthd(priv, 0xa097, 0x0c4c, 0x3f800000);
+	nv_mthd(priv, 0xa097, 0x0c5c, 0x3f800000);
+	nv_mthd(priv, 0xa097, 0x0c6c, 0x3f800000);
+	nv_mthd(priv, 0xa097, 0x0c7c, 0x3f800000);
+	nv_mthd(priv, 0xa097, 0x0c8c, 0x3f800000);
+	nv_mthd(priv, 0xa097, 0x0c9c, 0x3f800000);
+	nv_mthd(priv, 0xa097, 0x0cac, 0x3f800000);
+	nv_mthd(priv, 0xa097, 0x0cbc, 0x3f800000);
+	nv_mthd(priv, 0xa097, 0x0ccc, 0x3f800000);
+	nv_mthd(priv, 0xa097, 0x0cdc, 0x3f800000);
+	nv_mthd(priv, 0xa097, 0x0cec, 0x3f800000);
+	nv_mthd(priv, 0xa097, 0x0cfc, 0x3f800000);
+	nv_mthd(priv, 0xa097, 0x0d00, 0xffff0000);
+	nv_mthd(priv, 0xa097, 0x0d08, 0xffff0000);
+	nv_mthd(priv, 0xa097, 0x0d10, 0xffff0000);
+	nv_mthd(priv, 0xa097, 0x0d18, 0xffff0000);
+	nv_mthd(priv, 0xa097, 0x0d20, 0xffff0000);
+	nv_mthd(priv, 0xa097, 0x0d28, 0xffff0000);
+	nv_mthd(priv, 0xa097, 0x0d30, 0xffff0000);
+	nv_mthd(priv, 0xa097, 0x0d38, 0xffff0000);
+	nv_mthd(priv, 0xa097, 0x0d04, 0xffff0000);
+	nv_mthd(priv, 0xa097, 0x0d0c, 0xffff0000);
+	nv_mthd(priv, 0xa097, 0x0d14, 0xffff0000);
+	nv_mthd(priv, 0xa097, 0x0d1c, 0xffff0000);
+	nv_mthd(priv, 0xa097, 0x0d24, 0xffff0000);
+	nv_mthd(priv, 0xa097, 0x0d2c, 0xffff0000);
+	nv_mthd(priv, 0xa097, 0x0d34, 0xffff0000);
+	nv_mthd(priv, 0xa097, 0x0d3c, 0xffff0000);
+	nv_mthd(priv, 0xa097, 0x0e00, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0e10, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0e20, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0e30, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0e40, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0e50, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0e60, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0e70, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0e80, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0e90, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0ea0, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0eb0, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0ec0, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0ed0, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0ee0, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0ef0, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0e04, 0xffff0000);
+	nv_mthd(priv, 0xa097, 0x0e14, 0xffff0000);
+	nv_mthd(priv, 0xa097, 0x0e24, 0xffff0000);
+	nv_mthd(priv, 0xa097, 0x0e34, 0xffff0000);
+	nv_mthd(priv, 0xa097, 0x0e44, 0xffff0000);
+	nv_mthd(priv, 0xa097, 0x0e54, 0xffff0000);
+	nv_mthd(priv, 0xa097, 0x0e64, 0xffff0000);
+	nv_mthd(priv, 0xa097, 0x0e74, 0xffff0000);
+	nv_mthd(priv, 0xa097, 0x0e84, 0xffff0000);
+	nv_mthd(priv, 0xa097, 0x0e94, 0xffff0000);
+	nv_mthd(priv, 0xa097, 0x0ea4, 0xffff0000);
+	nv_mthd(priv, 0xa097, 0x0eb4, 0xffff0000);
+	nv_mthd(priv, 0xa097, 0x0ec4, 0xffff0000);
+	nv_mthd(priv, 0xa097, 0x0ed4, 0xffff0000);
+	nv_mthd(priv, 0xa097, 0x0ee4, 0xffff0000);
+	nv_mthd(priv, 0xa097, 0x0ef4, 0xffff0000);
+	nv_mthd(priv, 0xa097, 0x0e08, 0xffff0000);
+	nv_mthd(priv, 0xa097, 0x0e18, 0xffff0000);
+	nv_mthd(priv, 0xa097, 0x0e28, 0xffff0000);
+	nv_mthd(priv, 0xa097, 0x0e38, 0xffff0000);
+	nv_mthd(priv, 0xa097, 0x0e48, 0xffff0000);
+	nv_mthd(priv, 0xa097, 0x0e58, 0xffff0000);
+	nv_mthd(priv, 0xa097, 0x0e68, 0xffff0000);
+	nv_mthd(priv, 0xa097, 0x0e78, 0xffff0000);
+	nv_mthd(priv, 0xa097, 0x0e88, 0xffff0000);
+	nv_mthd(priv, 0xa097, 0x0e98, 0xffff0000);
+	nv_mthd(priv, 0xa097, 0x0ea8, 0xffff0000);
+	nv_mthd(priv, 0xa097, 0x0eb8, 0xffff0000);
+	nv_mthd(priv, 0xa097, 0x0ec8, 0xffff0000);
+	nv_mthd(priv, 0xa097, 0x0ed8, 0xffff0000);
+	nv_mthd(priv, 0xa097, 0x0ee8, 0xffff0000);
+	nv_mthd(priv, 0xa097, 0x0ef8, 0xffff0000);
+	nv_mthd(priv, 0xa097, 0x0d40, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0d48, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0d50, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0d58, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0d44, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0d4c, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0d54, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0d5c, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1e00, 0x00000001);
+	nv_mthd(priv, 0xa097, 0x1e20, 0x00000001);
+	nv_mthd(priv, 0xa097, 0x1e40, 0x00000001);
+	nv_mthd(priv, 0xa097, 0x1e60, 0x00000001);
+	nv_mthd(priv, 0xa097, 0x1e80, 0x00000001);
+	nv_mthd(priv, 0xa097, 0x1ea0, 0x00000001);
+	nv_mthd(priv, 0xa097, 0x1ec0, 0x00000001);
+	nv_mthd(priv, 0xa097, 0x1ee0, 0x00000001);
+	nv_mthd(priv, 0xa097, 0x1e04, 0x00000001);
+	nv_mthd(priv, 0xa097, 0x1e24, 0x00000001);
+	nv_mthd(priv, 0xa097, 0x1e44, 0x00000001);
+	nv_mthd(priv, 0xa097, 0x1e64, 0x00000001);
+	nv_mthd(priv, 0xa097, 0x1e84, 0x00000001);
+	nv_mthd(priv, 0xa097, 0x1ea4, 0x00000001);
+	nv_mthd(priv, 0xa097, 0x1ec4, 0x00000001);
+	nv_mthd(priv, 0xa097, 0x1ee4, 0x00000001);
+	nv_mthd(priv, 0xa097, 0x1e08, 0x00000002);
+	nv_mthd(priv, 0xa097, 0x1e28, 0x00000002);
+	nv_mthd(priv, 0xa097, 0x1e48, 0x00000002);
+	nv_mthd(priv, 0xa097, 0x1e68, 0x00000002);
+	nv_mthd(priv, 0xa097, 0x1e88, 0x00000002);
+	nv_mthd(priv, 0xa097, 0x1ea8, 0x00000002);
+	nv_mthd(priv, 0xa097, 0x1ec8, 0x00000002);
+	nv_mthd(priv, 0xa097, 0x1ee8, 0x00000002);
+	nv_mthd(priv, 0xa097, 0x1e0c, 0x00000001);
+	nv_mthd(priv, 0xa097, 0x1e2c, 0x00000001);
+	nv_mthd(priv, 0xa097, 0x1e4c, 0x00000001);
+	nv_mthd(priv, 0xa097, 0x1e6c, 0x00000001);
+	nv_mthd(priv, 0xa097, 0x1e8c, 0x00000001);
+	nv_mthd(priv, 0xa097, 0x1eac, 0x00000001);
+	nv_mthd(priv, 0xa097, 0x1ecc, 0x00000001);
+	nv_mthd(priv, 0xa097, 0x1eec, 0x00000001);
+	nv_mthd(priv, 0xa097, 0x1e10, 0x00000001);
+	nv_mthd(priv, 0xa097, 0x1e30, 0x00000001);
+	nv_mthd(priv, 0xa097, 0x1e50, 0x00000001);
+	nv_mthd(priv, 0xa097, 0x1e70, 0x00000001);
+	nv_mthd(priv, 0xa097, 0x1e90, 0x00000001);
+	nv_mthd(priv, 0xa097, 0x1eb0, 0x00000001);
+	nv_mthd(priv, 0xa097, 0x1ed0, 0x00000001);
+	nv_mthd(priv, 0xa097, 0x1ef0, 0x00000001);
+	nv_mthd(priv, 0xa097, 0x1e14, 0x00000002);
+	nv_mthd(priv, 0xa097, 0x1e34, 0x00000002);
+	nv_mthd(priv, 0xa097, 0x1e54, 0x00000002);
+	nv_mthd(priv, 0xa097, 0x1e74, 0x00000002);
+	nv_mthd(priv, 0xa097, 0x1e94, 0x00000002);
+	nv_mthd(priv, 0xa097, 0x1eb4, 0x00000002);
+	nv_mthd(priv, 0xa097, 0x1ed4, 0x00000002);
+	nv_mthd(priv, 0xa097, 0x1ef4, 0x00000002);
+	nv_mthd(priv, 0xa097, 0x1e18, 0x00000001);
+	nv_mthd(priv, 0xa097, 0x1e38, 0x00000001);
+	nv_mthd(priv, 0xa097, 0x1e58, 0x00000001);
+	nv_mthd(priv, 0xa097, 0x1e78, 0x00000001);
+	nv_mthd(priv, 0xa097, 0x1e98, 0x00000001);
+	nv_mthd(priv, 0xa097, 0x1eb8, 0x00000001);
+	nv_mthd(priv, 0xa097, 0x1ed8, 0x00000001);
+	nv_mthd(priv, 0xa097, 0x1ef8, 0x00000001);
+	nv_mthd(priv, 0xa097, 0x3400, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x3404, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x3408, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x340c, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x3410, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x3414, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x3418, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x341c, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x3420, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x3424, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x3428, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x342c, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x3430, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x3434, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x3438, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x343c, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x3440, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x3444, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x3448, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x344c, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x3450, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x3454, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x3458, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x345c, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x3460, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x3464, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x3468, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x346c, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x3470, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x3474, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x3478, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x347c, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x3480, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x3484, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x3488, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x348c, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x3490, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x3494, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x3498, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x349c, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x34a0, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x34a4, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x34a8, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x34ac, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x34b0, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x34b4, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x34b8, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x34bc, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x34c0, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x34c4, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x34c8, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x34cc, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x34d0, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x34d4, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x34d8, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x34dc, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x34e0, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x34e4, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x34e8, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x34ec, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x34f0, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x34f4, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x34f8, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x34fc, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x3500, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x3504, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x3508, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x350c, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x3510, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x3514, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x3518, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x351c, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x3520, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x3524, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x3528, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x352c, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x3530, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x3534, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x3538, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x353c, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x3540, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x3544, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x3548, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x354c, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x3550, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x3554, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x3558, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x355c, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x3560, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x3564, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x3568, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x356c, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x3570, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x3574, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x3578, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x357c, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x3580, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x3584, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x3588, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x358c, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x3590, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x3594, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x3598, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x359c, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x35a0, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x35a4, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x35a8, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x35ac, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x35b0, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x35b4, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x35b8, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x35bc, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x35c0, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x35c4, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x35c8, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x35cc, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x35d0, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x35d4, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x35d8, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x35dc, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x35e0, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x35e4, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x35e8, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x35ec, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x35f0, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x35f4, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x35f8, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x35fc, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x030c, 0x00000001);
+	nv_mthd(priv, 0xa097, 0x1944, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1514, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0d68, 0x0000ffff);
+	nv_mthd(priv, 0xa097, 0x121c, 0x0fac6881);
+	nv_mthd(priv, 0xa097, 0x0fac, 0x00000001);
+	nv_mthd(priv, 0xa097, 0x1538, 0x00000001);
+	nv_mthd(priv, 0xa097, 0x0fe0, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0fe4, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0fe8, 0x00000014);
+	nv_mthd(priv, 0xa097, 0x0fec, 0x00000040);
+	nv_mthd(priv, 0xa097, 0x0ff0, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x179c, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1228, 0x00000400);
+	nv_mthd(priv, 0xa097, 0x122c, 0x00000300);
+	nv_mthd(priv, 0xa097, 0x1230, 0x00010001);
+	nv_mthd(priv, 0xa097, 0x07f8, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x15b4, 0x00000001);
+	nv_mthd(priv, 0xa097, 0x15cc, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1534, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0fb0, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x15d0, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x153c, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x16b4, 0x00000003);
+	nv_mthd(priv, 0xa097, 0x0fbc, 0x0000ffff);
+	nv_mthd(priv, 0xa097, 0x0fc0, 0x0000ffff);
+	nv_mthd(priv, 0xa097, 0x0fc4, 0x0000ffff);
+	nv_mthd(priv, 0xa097, 0x0fc8, 0x0000ffff);
+	nv_mthd(priv, 0xa097, 0x0df8, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0dfc, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1948, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1970, 0x00000001);
+	nv_mthd(priv, 0xa097, 0x161c, 0x000009f0);
+	nv_mthd(priv, 0xa097, 0x0dcc, 0x00000010);
+	nv_mthd(priv, 0xa097, 0x163c, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x15e4, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1160, 0x25e00040);
+	nv_mthd(priv, 0xa097, 0x1164, 0x25e00040);
+	nv_mthd(priv, 0xa097, 0x1168, 0x25e00040);
+	nv_mthd(priv, 0xa097, 0x116c, 0x25e00040);
+	nv_mthd(priv, 0xa097, 0x1170, 0x25e00040);
+	nv_mthd(priv, 0xa097, 0x1174, 0x25e00040);
+	nv_mthd(priv, 0xa097, 0x1178, 0x25e00040);
+	nv_mthd(priv, 0xa097, 0x117c, 0x25e00040);
+	nv_mthd(priv, 0xa097, 0x1180, 0x25e00040);
+	nv_mthd(priv, 0xa097, 0x1184, 0x25e00040);
+	nv_mthd(priv, 0xa097, 0x1188, 0x25e00040);
+	nv_mthd(priv, 0xa097, 0x118c, 0x25e00040);
+	nv_mthd(priv, 0xa097, 0x1190, 0x25e00040);
+	nv_mthd(priv, 0xa097, 0x1194, 0x25e00040);
+	nv_mthd(priv, 0xa097, 0x1198, 0x25e00040);
+	nv_mthd(priv, 0xa097, 0x119c, 0x25e00040);
+	nv_mthd(priv, 0xa097, 0x11a0, 0x25e00040);
+	nv_mthd(priv, 0xa097, 0x11a4, 0x25e00040);
+	nv_mthd(priv, 0xa097, 0x11a8, 0x25e00040);
+	nv_mthd(priv, 0xa097, 0x11ac, 0x25e00040);
+	nv_mthd(priv, 0xa097, 0x11b0, 0x25e00040);
+	nv_mthd(priv, 0xa097, 0x11b4, 0x25e00040);
+	nv_mthd(priv, 0xa097, 0x11b8, 0x25e00040);
+	nv_mthd(priv, 0xa097, 0x11bc, 0x25e00040);
+	nv_mthd(priv, 0xa097, 0x11c0, 0x25e00040);
+	nv_mthd(priv, 0xa097, 0x11c4, 0x25e00040);
+	nv_mthd(priv, 0xa097, 0x11c8, 0x25e00040);
+	nv_mthd(priv, 0xa097, 0x11cc, 0x25e00040);
+	nv_mthd(priv, 0xa097, 0x11d0, 0x25e00040);
+	nv_mthd(priv, 0xa097, 0x11d4, 0x25e00040);
+	nv_mthd(priv, 0xa097, 0x11d8, 0x25e00040);
+	nv_mthd(priv, 0xa097, 0x11dc, 0x25e00040);
+	nv_mthd(priv, 0xa097, 0x1880, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1884, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1888, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x188c, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1890, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1894, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1898, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x189c, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x18a0, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x18a4, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x18a8, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x18ac, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x18b0, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x18b4, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x18b8, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x18bc, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x18c0, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x18c4, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x18c8, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x18cc, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x18d0, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x18d4, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x18d8, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x18dc, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x18e0, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x18e4, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x18e8, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x18ec, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x18f0, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x18f4, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x18f8, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x18fc, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0f84, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0f88, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x17c8, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x17cc, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x17d0, 0x000000ff);
+	nv_mthd(priv, 0xa097, 0x17d4, 0xffffffff);
+	nv_mthd(priv, 0xa097, 0x17d8, 0x00000002);
+	nv_mthd(priv, 0xa097, 0x17dc, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x15f4, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x15f8, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1434, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1438, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0d74, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0dec, 0x00000001);
+	nv_mthd(priv, 0xa097, 0x13a4, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1318, 0x00000001);
+	nv_mthd(priv, 0xa097, 0x1644, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0748, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0de8, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1648, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x12a4, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1120, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1124, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1128, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x112c, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1118, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x164c, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1658, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1910, 0x00000290);
+	nv_mthd(priv, 0xa097, 0x1518, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x165c, 0x00000001);
+	nv_mthd(priv, 0xa097, 0x1520, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1604, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1570, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x13b0, 0x3f800000);
+	nv_mthd(priv, 0xa097, 0x13b4, 0x3f800000);
+	nv_mthd(priv, 0xa097, 0x020c, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1670, 0x30201000);
+	nv_mthd(priv, 0xa097, 0x1674, 0x70605040);
+	nv_mthd(priv, 0xa097, 0x1678, 0xb8a89888);
+	nv_mthd(priv, 0xa097, 0x167c, 0xf8e8d8c8);
+	nv_mthd(priv, 0xa097, 0x166c, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1680, 0x00ffff00);
+	nv_mthd(priv, 0xa097, 0x12d0, 0x00000003);
+	nv_mthd(priv, 0xa097, 0x12d4, 0x00000002);
+	nv_mthd(priv, 0xa097, 0x1684, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1688, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0dac, 0x00001b02);
+	nv_mthd(priv, 0xa097, 0x0db0, 0x00001b02);
+	nv_mthd(priv, 0xa097, 0x0db4, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x168c, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x15bc, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x156c, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x187c, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1110, 0x00000001);
+	nv_mthd(priv, 0xa097, 0x0dc0, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0dc4, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0dc8, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1234, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1690, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x12ac, 0x00000001);
+	nv_mthd(priv, 0xa097, 0x0790, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0794, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0798, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x079c, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x07a0, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x077c, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1000, 0x00000010);
+	nv_mthd(priv, 0xa097, 0x10fc, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1290, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0218, 0x00000010);
+	nv_mthd(priv, 0xa097, 0x12d8, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x12dc, 0x00000010);
+	nv_mthd(priv, 0xa097, 0x0d94, 0x00000001);
+	nv_mthd(priv, 0xa097, 0x155c, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1560, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1564, 0x00000fff);
+	nv_mthd(priv, 0xa097, 0x1574, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1578, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x157c, 0x000fffff);
+	nv_mthd(priv, 0xa097, 0x1354, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1610, 0x00000012);
+	nv_mthd(priv, 0xa097, 0x1608, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x160c, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x260c, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x07ac, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x162c, 0x00000003);
+	nv_mthd(priv, 0xa097, 0x0210, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0320, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0324, 0x3f800000);
+	nv_mthd(priv, 0xa097, 0x0328, 0x3f800000);
+	nv_mthd(priv, 0xa097, 0x032c, 0x3f800000);
+	nv_mthd(priv, 0xa097, 0x0330, 0x3f800000);
+	nv_mthd(priv, 0xa097, 0x0334, 0x3f800000);
+	nv_mthd(priv, 0xa097, 0x0338, 0x3f800000);
+	nv_mthd(priv, 0xa097, 0x0750, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0760, 0x39291909);
+	nv_mthd(priv, 0xa097, 0x0764, 0x79695949);
+	nv_mthd(priv, 0xa097, 0x0768, 0xb9a99989);
+	nv_mthd(priv, 0xa097, 0x076c, 0xf9e9d9c9);
+	nv_mthd(priv, 0xa097, 0x0770, 0x30201000);
+	nv_mthd(priv, 0xa097, 0x0774, 0x70605040);
+	nv_mthd(priv, 0xa097, 0x0778, 0x00009080);
+	nv_mthd(priv, 0xa097, 0x0780, 0x39291909);
+	nv_mthd(priv, 0xa097, 0x0784, 0x79695949);
+	nv_mthd(priv, 0xa097, 0x0788, 0xb9a99989);
+	nv_mthd(priv, 0xa097, 0x078c, 0xf9e9d9c9);
+	nv_mthd(priv, 0xa097, 0x07d0, 0x30201000);
+	nv_mthd(priv, 0xa097, 0x07d4, 0x70605040);
+	nv_mthd(priv, 0xa097, 0x07d8, 0x00009080);
+	nv_mthd(priv, 0xa097, 0x037c, 0x00000001);
+	nv_mthd(priv, 0xa097, 0x0740, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0744, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x2600, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1918, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x191c, 0x00000900);
+	nv_mthd(priv, 0xa097, 0x1920, 0x00000405);
+	nv_mthd(priv, 0xa097, 0x1308, 0x00000001);
+	nv_mthd(priv, 0xa097, 0x1924, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x13ac, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x192c, 0x00000001);
+	nv_mthd(priv, 0xa097, 0x193c, 0x00002c1c);
+	nv_mthd(priv, 0xa097, 0x0d7c, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0f8c, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x02c0, 0x00000001);
+	nv_mthd(priv, 0xa097, 0x1510, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1940, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0ff4, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0ff8, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x194c, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1950, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1968, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1590, 0x0000003f);
+	nv_mthd(priv, 0xa097, 0x07e8, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x07ec, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x07f0, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x07f4, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x196c, 0x00000011);
+	nv_mthd(priv, 0xa097, 0x02e4, 0x0000b001);
+	nv_mthd(priv, 0xa097, 0x036c, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0370, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x197c, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0fcc, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0fd0, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x02d8, 0x00000040);
+	nv_mthd(priv, 0xa097, 0x1980, 0x00000080);
+	nv_mthd(priv, 0xa097, 0x1504, 0x00000080);
+	nv_mthd(priv, 0xa097, 0x1984, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0300, 0x00000001);
+	nv_mthd(priv, 0xa097, 0x13a8, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x12ec, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1310, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1314, 0x00000001);
+	nv_mthd(priv, 0xa097, 0x1380, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1384, 0x00000001);
+	nv_mthd(priv, 0xa097, 0x1388, 0x00000001);
+	nv_mthd(priv, 0xa097, 0x138c, 0x00000001);
+	nv_mthd(priv, 0xa097, 0x1390, 0x00000001);
+	nv_mthd(priv, 0xa097, 0x1394, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x139c, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1398, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1594, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1598, 0x00000001);
+	nv_mthd(priv, 0xa097, 0x159c, 0x00000001);
+	nv_mthd(priv, 0xa097, 0x15a0, 0x00000001);
+	nv_mthd(priv, 0xa097, 0x15a4, 0x00000001);
+	nv_mthd(priv, 0xa097, 0x0f54, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0f58, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0f5c, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x19bc, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0f9c, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0fa0, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x12cc, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x12e8, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x130c, 0x00000001);
+	nv_mthd(priv, 0xa097, 0x1360, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1364, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1368, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x136c, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1370, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1374, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1378, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x137c, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x133c, 0x00000001);
+	nv_mthd(priv, 0xa097, 0x1340, 0x00000001);
+	nv_mthd(priv, 0xa097, 0x1344, 0x00000002);
+	nv_mthd(priv, 0xa097, 0x1348, 0x00000001);
+	nv_mthd(priv, 0xa097, 0x134c, 0x00000001);
+	nv_mthd(priv, 0xa097, 0x1350, 0x00000002);
+	nv_mthd(priv, 0xa097, 0x1358, 0x00000001);
+	nv_mthd(priv, 0xa097, 0x12e4, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x131c, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1320, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1324, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1328, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x19c0, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1140, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x19c4, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x19c8, 0x00001500);
+	nv_mthd(priv, 0xa097, 0x135c, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0f90, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x19e0, 0x00000001);
+	nv_mthd(priv, 0xa097, 0x19e4, 0x00000001);
+	nv_mthd(priv, 0xa097, 0x19e8, 0x00000001);
+	nv_mthd(priv, 0xa097, 0x19ec, 0x00000001);
+	nv_mthd(priv, 0xa097, 0x19f0, 0x00000001);
+	nv_mthd(priv, 0xa097, 0x19f4, 0x00000001);
+	nv_mthd(priv, 0xa097, 0x19f8, 0x00000001);
+	nv_mthd(priv, 0xa097, 0x19fc, 0x00000001);
+	nv_mthd(priv, 0xa097, 0x19cc, 0x00000001);
+	nv_mthd(priv, 0xa097, 0x15b8, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1a00, 0x00001111);
+	nv_mthd(priv, 0xa097, 0x1a04, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1a08, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1a0c, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1a10, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1a14, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1a18, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1a1c, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0d6c, 0xffff0000);
+	nv_mthd(priv, 0xa097, 0x0d70, 0xffff0000);
+	nv_mthd(priv, 0xa097, 0x10f8, 0x00001010);
+	nv_mthd(priv, 0xa097, 0x0d80, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0d84, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0d88, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0d8c, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0d90, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0da0, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x07a4, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x07a8, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1508, 0x80000000);
+	nv_mthd(priv, 0xa097, 0x150c, 0x40000000);
+	nv_mthd(priv, 0xa097, 0x1668, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0318, 0x00000008);
+	nv_mthd(priv, 0xa097, 0x031c, 0x00000008);
+	nv_mthd(priv, 0xa097, 0x0d9c, 0x00000001);
+	nv_mthd(priv, 0xa097, 0x0374, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0378, 0x00000020);
+	nv_mthd(priv, 0xa097, 0x07dc, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x074c, 0x00000055);
+	nv_mthd(priv, 0xa097, 0x1420, 0x00000003);
+	nv_mthd(priv, 0xa097, 0x17bc, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x17c0, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x17c4, 0x00000001);
+	nv_mthd(priv, 0xa097, 0x1008, 0x00000008);
+	nv_mthd(priv, 0xa097, 0x100c, 0x00000040);
+	nv_mthd(priv, 0xa097, 0x1010, 0x0000012c);
+	nv_mthd(priv, 0xa097, 0x0d60, 0x00000040);
+	nv_mthd(priv, 0xa097, 0x075c, 0x00000003);
+	nv_mthd(priv, 0xa097, 0x1018, 0x00000020);
+	nv_mthd(priv, 0xa097, 0x101c, 0x00000001);
+	nv_mthd(priv, 0xa097, 0x1020, 0x00000020);
+	nv_mthd(priv, 0xa097, 0x1024, 0x00000001);
+	nv_mthd(priv, 0xa097, 0x1444, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1448, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x144c, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0360, 0x20164010);
+	nv_mthd(priv, 0xa097, 0x0364, 0x00000020);
+	nv_mthd(priv, 0xa097, 0x0368, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0de4, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0204, 0x00000006);
+	nv_mthd(priv, 0xa097, 0x0208, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x02cc, 0x003fffff);
+	nv_mthd(priv, 0xa097, 0x02d0, 0x003fffff);
+	nv_mthd(priv, 0xa097, 0x1220, 0x00000005);
+	nv_mthd(priv, 0xa097, 0x0fdc, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0f98, 0x00400008);
+	nv_mthd(priv, 0xa097, 0x1284, 0x08000080);
+	nv_mthd(priv, 0xa097, 0x1450, 0x00400008);
+	nv_mthd(priv, 0xa097, 0x1454, 0x08000080);
+	nv_mthd(priv, 0xa097, 0x0214, 0x00000000);
+}
+
+static void
+nve0_grctx_generate_902d(struct nvc0_graph_priv *priv)
+{
+	nv_mthd(priv, 0x902d, 0x0200, 0x000000cf);
+	nv_mthd(priv, 0x902d, 0x0204, 0x00000001);
+	nv_mthd(priv, 0x902d, 0x0208, 0x00000020);
+	nv_mthd(priv, 0x902d, 0x020c, 0x00000001);
+	nv_mthd(priv, 0x902d, 0x0210, 0x00000000);
+	nv_mthd(priv, 0x902d, 0x0214, 0x00000080);
+	nv_mthd(priv, 0x902d, 0x0218, 0x00000100);
+	nv_mthd(priv, 0x902d, 0x021c, 0x00000100);
+	nv_mthd(priv, 0x902d, 0x0220, 0x00000000);
+	nv_mthd(priv, 0x902d, 0x0224, 0x00000000);
+	nv_mthd(priv, 0x902d, 0x0230, 0x000000cf);
+	nv_mthd(priv, 0x902d, 0x0234, 0x00000001);
+	nv_mthd(priv, 0x902d, 0x0238, 0x00000020);
+	nv_mthd(priv, 0x902d, 0x023c, 0x00000001);
+	nv_mthd(priv, 0x902d, 0x0244, 0x00000080);
+	nv_mthd(priv, 0x902d, 0x0248, 0x00000100);
+	nv_mthd(priv, 0x902d, 0x024c, 0x00000100);
+	nv_mthd(priv, 0x902d, 0x3410, 0x00000000);
+}
+
+static void
+nve0_graph_generate_unk40xx(struct nvc0_graph_priv *priv)
+{
+	nv_wr32(priv, 0x404010, 0x0);
+	nv_wr32(priv, 0x404014, 0x0);
+	nv_wr32(priv, 0x404018, 0x0);
+	nv_wr32(priv, 0x40401c, 0x0);
+	nv_wr32(priv, 0x404020, 0x0);
+	nv_wr32(priv, 0x404024, 0xe000);
+	nv_wr32(priv, 0x404028, 0x0);
+	nv_wr32(priv, 0x4040a8, 0x0);
+	nv_wr32(priv, 0x4040ac, 0x0);
+	nv_wr32(priv, 0x4040b0, 0x0);
+	nv_wr32(priv, 0x4040b4, 0x0);
+	nv_wr32(priv, 0x4040b8, 0x0);
+	nv_wr32(priv, 0x4040bc, 0x0);
+	nv_wr32(priv, 0x4040c0, 0x0);
+	nv_wr32(priv, 0x4040c4, 0x0);
+	nv_wr32(priv, 0x4040c8, 0xf800008f);
+	nv_wr32(priv, 0x4040d0, 0x0);
+	nv_wr32(priv, 0x4040d4, 0x0);
+	nv_wr32(priv, 0x4040d8, 0x0);
+	nv_wr32(priv, 0x4040dc, 0x0);
+	nv_wr32(priv, 0x4040e0, 0x0);
+	nv_wr32(priv, 0x4040e4, 0x0);
+	nv_wr32(priv, 0x4040e8, 0x1000);
+	nv_wr32(priv, 0x4040f8, 0x0);
+	nv_wr32(priv, 0x404130, 0x0);
+	nv_wr32(priv, 0x404134, 0x0);
+	nv_wr32(priv, 0x404138, 0x20000040);
+	nv_wr32(priv, 0x404150, 0x2e);
+	nv_wr32(priv, 0x404154, 0x400);
+	nv_wr32(priv, 0x404158, 0x200);
+	nv_wr32(priv, 0x404164, 0x55);
+	nv_wr32(priv, 0x4041a0, 0x0);
+	nv_wr32(priv, 0x4041a4, 0x0);
+	nv_wr32(priv, 0x4041a8, 0x0);
+	nv_wr32(priv, 0x4041ac, 0x0);
+	nv_wr32(priv, 0x404200, 0x0);
+	nv_wr32(priv, 0x404204, 0x0);
+	nv_wr32(priv, 0x404208, 0x0);
+	nv_wr32(priv, 0x40420c, 0x0);
+}
+
+static void
+nve0_graph_generate_unk44xx(struct nvc0_graph_priv *priv)
+{
+	nv_wr32(priv, 0x404404, 0x0);
+	nv_wr32(priv, 0x404408, 0x0);
+	nv_wr32(priv, 0x40440c, 0x0);
+	nv_wr32(priv, 0x404410, 0x0);
+	nv_wr32(priv, 0x404414, 0x0);
+	nv_wr32(priv, 0x404418, 0x0);
+	nv_wr32(priv, 0x40441c, 0x0);
+	nv_wr32(priv, 0x404420, 0x0);
+	nv_wr32(priv, 0x404424, 0x0);
+	nv_wr32(priv, 0x404428, 0x0);
+	nv_wr32(priv, 0x40442c, 0x0);
+	nv_wr32(priv, 0x404430, 0x0);
+	nv_wr32(priv, 0x404434, 0x0);
+	nv_wr32(priv, 0x404438, 0x0);
+	nv_wr32(priv, 0x404460, 0x0);
+	nv_wr32(priv, 0x404464, 0x0);
+	nv_wr32(priv, 0x404468, 0xffffff);
+	nv_wr32(priv, 0x40446c, 0x0);
+	nv_wr32(priv, 0x404480, 0x1);
+	nv_wr32(priv, 0x404498, 0x1);
+}
+
+static void
+nve0_graph_generate_unk46xx(struct nvc0_graph_priv *priv)
+{
+	nv_wr32(priv, 0x404604, 0x14);
+	nv_wr32(priv, 0x404608, 0x0);
+	nv_wr32(priv, 0x40460c, 0x3fff);
+	nv_wr32(priv, 0x404610, 0x100);
+	nv_wr32(priv, 0x404618, 0x0);
+	nv_wr32(priv, 0x40461c, 0x0);
+	nv_wr32(priv, 0x404620, 0x0);
+	nv_wr32(priv, 0x404624, 0x0);
+	nv_wr32(priv, 0x40462c, 0x0);
+	nv_wr32(priv, 0x404630, 0x0);
+	nv_wr32(priv, 0x404640, 0x0);
+	nv_wr32(priv, 0x404654, 0x0);
+	nv_wr32(priv, 0x404660, 0x0);
+	nv_wr32(priv, 0x404678, 0x0);
+	nv_wr32(priv, 0x40467c, 0x2);
+	nv_wr32(priv, 0x404680, 0x0);
+	nv_wr32(priv, 0x404684, 0x0);
+	nv_wr32(priv, 0x404688, 0x0);
+	nv_wr32(priv, 0x40468c, 0x0);
+	nv_wr32(priv, 0x404690, 0x0);
+	nv_wr32(priv, 0x404694, 0x0);
+	nv_wr32(priv, 0x404698, 0x0);
+	nv_wr32(priv, 0x40469c, 0x0);
+	nv_wr32(priv, 0x4046a0, 0x7f0080);
+	nv_wr32(priv, 0x4046a4, 0x0);
+	nv_wr32(priv, 0x4046a8, 0x0);
+	nv_wr32(priv, 0x4046ac, 0x0);
+	nv_wr32(priv, 0x4046b0, 0x0);
+	nv_wr32(priv, 0x4046b4, 0x0);
+	nv_wr32(priv, 0x4046b8, 0x0);
+	nv_wr32(priv, 0x4046bc, 0x0);
+	nv_wr32(priv, 0x4046c0, 0x0);
+	nv_wr32(priv, 0x4046c8, 0x0);
+	nv_wr32(priv, 0x4046cc, 0x0);
+	nv_wr32(priv, 0x4046d0, 0x0);
+}
+
+static void
+nve0_graph_generate_unk47xx(struct nvc0_graph_priv *priv)
+{
+	nv_wr32(priv, 0x404700, 0x0);
+	nv_wr32(priv, 0x404704, 0x0);
+	nv_wr32(priv, 0x404708, 0x0);
+	nv_wr32(priv, 0x404718, 0x0);
+	nv_wr32(priv, 0x40471c, 0x0);
+	nv_wr32(priv, 0x404720, 0x0);
+	nv_wr32(priv, 0x404724, 0x0);
+	nv_wr32(priv, 0x404728, 0x0);
+	nv_wr32(priv, 0x40472c, 0x0);
+	nv_wr32(priv, 0x404730, 0x0);
+	nv_wr32(priv, 0x404734, 0x100);
+	nv_wr32(priv, 0x404738, 0x0);
+	nv_wr32(priv, 0x40473c, 0x0);
+	nv_wr32(priv, 0x404744, 0x0);
+	nv_wr32(priv, 0x404748, 0x0);
+	nv_wr32(priv, 0x404754, 0x0);
+}
+
+static void
+nve0_graph_generate_unk58xx(struct nvc0_graph_priv *priv)
+{
+	nv_wr32(priv, 0x405800, 0xf8000bf);
+	nv_wr32(priv, 0x405830, 0x2180648);
+	nv_wr32(priv, 0x405834, 0x8000000);
+	nv_wr32(priv, 0x405838, 0x0);
+	nv_wr32(priv, 0x405854, 0x0);
+	nv_wr32(priv, 0x405870, 0x1);
+	nv_wr32(priv, 0x405874, 0x1);
+	nv_wr32(priv, 0x405878, 0x1);
+	nv_wr32(priv, 0x40587c, 0x1);
+	nv_wr32(priv, 0x405a00, 0x0);
+	nv_wr32(priv, 0x405a04, 0x0);
+	nv_wr32(priv, 0x405a18, 0x0);
+	nv_wr32(priv, 0x405b00, 0x0);
+	nv_wr32(priv, 0x405b10, 0x1000);
+}
+
+static void
+nve0_graph_generate_unk60xx(struct nvc0_graph_priv *priv)
+{
+	nv_wr32(priv, 0x406020, 0x4103c1);
+	nv_wr32(priv, 0x406028, 0x1);
+	nv_wr32(priv, 0x40602c, 0x1);
+	nv_wr32(priv, 0x406030, 0x1);
+	nv_wr32(priv, 0x406034, 0x1);
+}
+
+static void
+nve0_graph_generate_unk64xx(struct nvc0_graph_priv *priv)
+{
+	nv_wr32(priv, 0x4064a8, 0x0);
+	nv_wr32(priv, 0x4064ac, 0x3fff);
+	nv_wr32(priv, 0x4064b4, 0x0);
+	nv_wr32(priv, 0x4064b8, 0x0);
+	nv_wr32(priv, 0x4064c0, 0x801a00f0);
+	nv_wr32(priv, 0x4064c4, 0x192ffff);
+	nv_wr32(priv, 0x4064c8, 0x1800600);
+	nv_wr32(priv, 0x4064cc, 0x0);
+	nv_wr32(priv, 0x4064d0, 0x0);
+	nv_wr32(priv, 0x4064d4, 0x0);
+	nv_wr32(priv, 0x4064d8, 0x0);
+	nv_wr32(priv, 0x4064dc, 0x0);
+	nv_wr32(priv, 0x4064e0, 0x0);
+	nv_wr32(priv, 0x4064e4, 0x0);
+	nv_wr32(priv, 0x4064e8, 0x0);
+	nv_wr32(priv, 0x4064ec, 0x0);
+	nv_wr32(priv, 0x4064fc, 0x22a);
+}
+
+static void
+nve0_graph_generate_unk70xx(struct nvc0_graph_priv *priv)
+{
+	nv_wr32(priv, 0x407040, 0x0);
+}
+
+static void
+nve0_graph_generate_unk78xx(struct nvc0_graph_priv *priv)
+{
+	nv_wr32(priv, 0x407804, 0x23);
+	nv_wr32(priv, 0x40780c, 0xa418820);
+	nv_wr32(priv, 0x407810, 0x62080e6);
+	nv_wr32(priv, 0x407814, 0x20398a4);
+	nv_wr32(priv, 0x407818, 0xe629062);
+	nv_wr32(priv, 0x40781c, 0xa418820);
+	nv_wr32(priv, 0x407820, 0xe6);
+	nv_wr32(priv, 0x4078bc, 0x103);
+}
+
+static void
+nve0_graph_generate_unk80xx(struct nvc0_graph_priv *priv)
+{
+	nv_wr32(priv, 0x408000, 0x0);
+	nv_wr32(priv, 0x408004, 0x0);
+	nv_wr32(priv, 0x408008, 0x30);
+	nv_wr32(priv, 0x40800c, 0x0);
+	nv_wr32(priv, 0x408010, 0x0);
+	nv_wr32(priv, 0x408014, 0x69);
+	nv_wr32(priv, 0x408018, 0xe100e100);
+	nv_wr32(priv, 0x408064, 0x0);
+}
+
+static void
+nve0_graph_generate_unk88xx(struct nvc0_graph_priv *priv)
+{
+	nv_wr32(priv, 0x408800, 0x2802a3c);
+	nv_wr32(priv, 0x408804, 0x40);
+	nv_wr32(priv, 0x408808, 0x1043e005);
+	nv_wr32(priv, 0x408840, 0xb);
+	nv_wr32(priv, 0x408900, 0x3080b801);
+	nv_wr32(priv, 0x408904, 0x62000001);
+	nv_wr32(priv, 0x408908, 0xc8102f);
+	nv_wr32(priv, 0x408980, 0x11d);
+}
+
+static void
+nve0_graph_generate_gpc(struct nvc0_graph_priv *priv)
+{
+	nv_wr32(priv, 0x418380, 0x16);
+	nv_wr32(priv, 0x418400, 0x38004e00);
+	nv_wr32(priv, 0x418404, 0x71e0ffff);
+	nv_wr32(priv, 0x41840c, 0x1008);
+	nv_wr32(priv, 0x418410, 0xfff0fff);
+	nv_wr32(priv, 0x418414, 0x2200fff);
+	nv_wr32(priv, 0x418450, 0x0);
+	nv_wr32(priv, 0x418454, 0x0);
+	nv_wr32(priv, 0x418458, 0x0);
+	nv_wr32(priv, 0x41845c, 0x0);
+	nv_wr32(priv, 0x418460, 0x0);
+	nv_wr32(priv, 0x418464, 0x0);
+	nv_wr32(priv, 0x418468, 0x1);
+	nv_wr32(priv, 0x41846c, 0x0);
+	nv_wr32(priv, 0x418470, 0x0);
+	nv_wr32(priv, 0x418600, 0x1f);
+	nv_wr32(priv, 0x418684, 0xf);
+	nv_wr32(priv, 0x418700, 0x2);
+	nv_wr32(priv, 0x418704, 0x80);
+	nv_wr32(priv, 0x418708, 0x0);
+	nv_wr32(priv, 0x41870c, 0x0);
+	nv_wr32(priv, 0x418710, 0x0);
+	nv_wr32(priv, 0x418800, 0x7006860a);
+	nv_wr32(priv, 0x418808, 0x0);
+	nv_wr32(priv, 0x41880c, 0x0);
+	nv_wr32(priv, 0x418810, 0x0);
+	nv_wr32(priv, 0x418828, 0x44);
+	nv_wr32(priv, 0x418830, 0x10000001);
+	nv_wr32(priv, 0x4188d8, 0x8);
+	nv_wr32(priv, 0x4188e0, 0x1000000);
+	nv_wr32(priv, 0x4188e8, 0x0);
+	nv_wr32(priv, 0x4188ec, 0x0);
+	nv_wr32(priv, 0x4188f0, 0x0);
+	nv_wr32(priv, 0x4188f4, 0x0);
+	nv_wr32(priv, 0x4188f8, 0x0);
+	nv_wr32(priv, 0x4188fc, 0x20100018);
+	nv_wr32(priv, 0x41891c, 0xff00ff);
+	nv_wr32(priv, 0x418924, 0x0);
+	nv_wr32(priv, 0x418928, 0xffff00);
+	nv_wr32(priv, 0x41892c, 0xff00);
+	nv_wr32(priv, 0x418a00, 0x0);
+	nv_wr32(priv, 0x418a04, 0x0);
+	nv_wr32(priv, 0x418a08, 0x0);
+	nv_wr32(priv, 0x418a0c, 0x10000);
+	nv_wr32(priv, 0x418a10, 0x0);
+	nv_wr32(priv, 0x418a14, 0x0);
+	nv_wr32(priv, 0x418a18, 0x0);
+	nv_wr32(priv, 0x418a20, 0x0);
+	nv_wr32(priv, 0x418a24, 0x0);
+	nv_wr32(priv, 0x418a28, 0x0);
+	nv_wr32(priv, 0x418a2c, 0x10000);
+	nv_wr32(priv, 0x418a30, 0x0);
+	nv_wr32(priv, 0x418a34, 0x0);
+	nv_wr32(priv, 0x418a38, 0x0);
+	nv_wr32(priv, 0x418a40, 0x0);
+	nv_wr32(priv, 0x418a44, 0x0);
+	nv_wr32(priv, 0x418a48, 0x0);
+	nv_wr32(priv, 0x418a4c, 0x10000);
+	nv_wr32(priv, 0x418a50, 0x0);
+	nv_wr32(priv, 0x418a54, 0x0);
+	nv_wr32(priv, 0x418a58, 0x0);
+	nv_wr32(priv, 0x418a60, 0x0);
+	nv_wr32(priv, 0x418a64, 0x0);
+	nv_wr32(priv, 0x418a68, 0x0);
+	nv_wr32(priv, 0x418a6c, 0x10000);
+	nv_wr32(priv, 0x418a70, 0x0);
+	nv_wr32(priv, 0x418a74, 0x0);
+	nv_wr32(priv, 0x418a78, 0x0);
+	nv_wr32(priv, 0x418a80, 0x0);
+	nv_wr32(priv, 0x418a84, 0x0);
+	nv_wr32(priv, 0x418a88, 0x0);
+	nv_wr32(priv, 0x418a8c, 0x10000);
+	nv_wr32(priv, 0x418a90, 0x0);
+	nv_wr32(priv, 0x418a94, 0x0);
+	nv_wr32(priv, 0x418a98, 0x0);
+	nv_wr32(priv, 0x418aa0, 0x0);
+	nv_wr32(priv, 0x418aa4, 0x0);
+	nv_wr32(priv, 0x418aa8, 0x0);
+	nv_wr32(priv, 0x418aac, 0x10000);
+	nv_wr32(priv, 0x418ab0, 0x0);
+	nv_wr32(priv, 0x418ab4, 0x0);
+	nv_wr32(priv, 0x418ab8, 0x0);
+	nv_wr32(priv, 0x418ac0, 0x0);
+	nv_wr32(priv, 0x418ac4, 0x0);
+	nv_wr32(priv, 0x418ac8, 0x0);
+	nv_wr32(priv, 0x418acc, 0x10000);
+	nv_wr32(priv, 0x418ad0, 0x0);
+	nv_wr32(priv, 0x418ad4, 0x0);
+	nv_wr32(priv, 0x418ad8, 0x0);
+	nv_wr32(priv, 0x418ae0, 0x0);
+	nv_wr32(priv, 0x418ae4, 0x0);
+	nv_wr32(priv, 0x418ae8, 0x0);
+	nv_wr32(priv, 0x418aec, 0x10000);
+	nv_wr32(priv, 0x418af0, 0x0);
+	nv_wr32(priv, 0x418af4, 0x0);
+	nv_wr32(priv, 0x418af8, 0x0);
+	nv_wr32(priv, 0x418b00, 0x6);
+	nv_wr32(priv, 0x418b08, 0xa418820);
+	nv_wr32(priv, 0x418b0c, 0x62080e6);
+	nv_wr32(priv, 0x418b10, 0x20398a4);
+	nv_wr32(priv, 0x418b14, 0xe629062);
+	nv_wr32(priv, 0x418b18, 0xa418820);
+	nv_wr32(priv, 0x418b1c, 0xe6);
+	nv_wr32(priv, 0x418bb8, 0x103);
+	nv_wr32(priv, 0x418c08, 0x1);
+	nv_wr32(priv, 0x418c10, 0x0);
+	nv_wr32(priv, 0x418c14, 0x0);
+	nv_wr32(priv, 0x418c18, 0x0);
+	nv_wr32(priv, 0x418c1c, 0x0);
+	nv_wr32(priv, 0x418c20, 0x0);
+	nv_wr32(priv, 0x418c24, 0x0);
+	nv_wr32(priv, 0x418c28, 0x0);
+	nv_wr32(priv, 0x418c2c, 0x0);
+	nv_wr32(priv, 0x418c40, 0xffffffff);
+	nv_wr32(priv, 0x418c6c, 0x1);
+	nv_wr32(priv, 0x418c80, 0x20200004);
+	nv_wr32(priv, 0x418c8c, 0x1);
+	nv_wr32(priv, 0x419000, 0x780);
+	nv_wr32(priv, 0x419004, 0x0);
+	nv_wr32(priv, 0x419008, 0x0);
+	nv_wr32(priv, 0x419014, 0x4);
+}
+
+static void
+nve0_graph_generate_tpc(struct nvc0_graph_priv *priv)
+{
+	nv_wr32(priv, 0x419848, 0x0);
+	nv_wr32(priv, 0x419864, 0x129);
+	nv_wr32(priv, 0x419888, 0x0);
+	nv_wr32(priv, 0x419a00, 0xf0);
+	nv_wr32(priv, 0x419a04, 0x1);
+	nv_wr32(priv, 0x419a08, 0x21);
+	nv_wr32(priv, 0x419a0c, 0x20000);
+	nv_wr32(priv, 0x419a10, 0x0);
+	nv_wr32(priv, 0x419a14, 0x200);
+	nv_wr32(priv, 0x419a1c, 0xc000);
+	nv_wr32(priv, 0x419a20, 0x800);
+	nv_wr32(priv, 0x419a30, 0x1);
+	nv_wr32(priv, 0x419ac4, 0x37f440);
+	nv_wr32(priv, 0x419c00, 0xa);
+	nv_wr32(priv, 0x419c04, 0x80000006);
+	nv_wr32(priv, 0x419c08, 0x2);
+	nv_wr32(priv, 0x419c20, 0x0);
+	nv_wr32(priv, 0x419c24, 0x84210);
+	nv_wr32(priv, 0x419c28, 0x3efbefbe);
+	nv_wr32(priv, 0x419ce8, 0x0);
+	nv_wr32(priv, 0x419cf4, 0x3203);
+	nv_wr32(priv, 0x419e04, 0x0);
+	nv_wr32(priv, 0x419e08, 0x0);
+	nv_wr32(priv, 0x419e0c, 0x0);
+	nv_wr32(priv, 0x419e10, 0x402);
+	nv_wr32(priv, 0x419e44, 0x13eff2);
+	nv_wr32(priv, 0x419e48, 0x0);
+	nv_wr32(priv, 0x419e4c, 0x7f);
+	nv_wr32(priv, 0x419e50, 0x0);
+	nv_wr32(priv, 0x419e54, 0x0);
+	nv_wr32(priv, 0x419e58, 0x0);
+	nv_wr32(priv, 0x419e5c, 0x0);
+	nv_wr32(priv, 0x419e60, 0x0);
+	nv_wr32(priv, 0x419e64, 0x0);
+	nv_wr32(priv, 0x419e68, 0x0);
+	nv_wr32(priv, 0x419e6c, 0x0);
+	nv_wr32(priv, 0x419e70, 0x0);
+	nv_wr32(priv, 0x419e74, 0x0);
+	nv_wr32(priv, 0x419e78, 0x0);
+	nv_wr32(priv, 0x419e7c, 0x0);
+	nv_wr32(priv, 0x419e80, 0x0);
+	nv_wr32(priv, 0x419e84, 0x0);
+	nv_wr32(priv, 0x419e88, 0x0);
+	nv_wr32(priv, 0x419e8c, 0x0);
+	nv_wr32(priv, 0x419e90, 0x0);
+	nv_wr32(priv, 0x419e94, 0x0);
+	nv_wr32(priv, 0x419e98, 0x0);
+	nv_wr32(priv, 0x419eac, 0x1fcf);
+	nv_wr32(priv, 0x419eb0, 0xd3f);
+	nv_wr32(priv, 0x419ec8, 0x1304f);
+	nv_wr32(priv, 0x419f30, 0x0);
+	nv_wr32(priv, 0x419f34, 0x0);
+	nv_wr32(priv, 0x419f38, 0x0);
+	nv_wr32(priv, 0x419f3c, 0x0);
+	nv_wr32(priv, 0x419f40, 0x0);
+	nv_wr32(priv, 0x419f44, 0x0);
+	nv_wr32(priv, 0x419f48, 0x0);
+	nv_wr32(priv, 0x419f4c, 0x0);
+	nv_wr32(priv, 0x419f58, 0x0);
+	nv_wr32(priv, 0x419f78, 0xb);
+}
+
+static void
+nve0_graph_generate_tpcunk(struct nvc0_graph_priv *priv)
+{
+	nv_wr32(priv, 0x41be24, 0x6);
+	nv_wr32(priv, 0x41bec0, 0x12180000);
+	nv_wr32(priv, 0x41bec4, 0x37f7f);
+	nv_wr32(priv, 0x41bee4, 0x6480430);
+	nv_wr32(priv, 0x41bf00, 0xa418820);
+	nv_wr32(priv, 0x41bf04, 0x62080e6);
+	nv_wr32(priv, 0x41bf08, 0x20398a4);
+	nv_wr32(priv, 0x41bf0c, 0xe629062);
+	nv_wr32(priv, 0x41bf10, 0xa418820);
+	nv_wr32(priv, 0x41bf14, 0xe6);
+	nv_wr32(priv, 0x41bfd0, 0x900103);
+	nv_wr32(priv, 0x41bfe0, 0x400001);
+	nv_wr32(priv, 0x41bfe4, 0x0);
+}
+
+int
+nve0_grctx_generate(struct nvc0_graph_priv *priv)
+{
+	struct nvc0_grctx info;
+	int ret, i, gpc, tpc, id;
+	u32 data[6] = {}, data2[2] = {}, tmp;
+	u32 tpc_set = 0, tpc_mask = 0;
+	u32 magic[GPC_MAX][2], offset;
+	u8 tpcnr[GPC_MAX], a, b;
+	u8 shift, ntpcv;
+
+	ret = nvc0_grctx_init(priv, &info);
+	if (ret)
+		return ret;
+
+	nv_mask(priv, 0x000260, 0x00000001, 0x00000000);
+	nv_wr32(priv, 0x400204, 0x00000000);
+	nv_wr32(priv, 0x400208, 0x00000000);
+
+	nve0_graph_generate_unk40xx(priv);
+	nve0_graph_generate_unk44xx(priv);
+	nve0_graph_generate_unk46xx(priv);
+	nve0_graph_generate_unk47xx(priv);
+	nve0_graph_generate_unk58xx(priv);
+	nve0_graph_generate_unk60xx(priv);
+	nve0_graph_generate_unk64xx(priv);
+	nve0_graph_generate_unk70xx(priv);
+	nve0_graph_generate_unk78xx(priv);
+	nve0_graph_generate_unk80xx(priv);
+	nve0_graph_generate_unk88xx(priv);
+	nve0_graph_generate_gpc(priv);
+	nve0_graph_generate_tpc(priv);
+	nve0_graph_generate_tpcunk(priv);
+
+	nv_wr32(priv, 0x404154, 0x0);
+
+	mmio_data(0x003000, 0x0100, NV_MEM_ACCESS_RW | NV_MEM_ACCESS_SYS);
+	mmio_data(0x008000, 0x0100, NV_MEM_ACCESS_RW | NV_MEM_ACCESS_SYS);
+	mmio_data(0x060000, 0x1000, NV_MEM_ACCESS_RW);
+	mmio_list(0x40800c, 0x00000000,  8, 1);
+	mmio_list(0x408010, 0x80000000,  0, 0);
+	mmio_list(0x419004, 0x00000000,  8, 1);
+	mmio_list(0x419008, 0x00000000,  0, 0);
+	mmio_list(0x4064cc, 0x80000000,  0, 0);
+	mmio_list(0x408004, 0x00000000,  8, 0);
+	mmio_list(0x408008, 0x80000030,  0, 0);
+	mmio_list(0x418808, 0x00000000,  8, 0);
+	mmio_list(0x41880c, 0x80000030,  0, 0);
+	mmio_list(0x4064c8, 0x01800600,  0, 0);
+	mmio_list(0x418810, 0x80000000, 12, 2);
+	mmio_list(0x419848, 0x10000000, 12, 2);
+	mmio_list(0x405830, 0x02180648,  0, 0);
+	mmio_list(0x4064c4, 0x0192ffff,  0, 0);
+	for (gpc = 0, offset = 0; gpc < priv->gpc_nr; gpc++) {
+		u16 magic0 = 0x0218 * priv->tpc_nr[gpc];
+		u16 magic1 = 0x0648 * priv->tpc_nr[gpc];
+		magic[gpc][0]  = 0x10000000 | (magic0 << 16) | offset;
+		magic[gpc][1]  = 0x00000000 | (magic1 << 16);
+		offset += 0x0324 * priv->tpc_nr[gpc];
+	}
+	for (gpc = 0; gpc < priv->gpc_nr; gpc++) {
+		mmio_list(GPC_UNIT(gpc, 0x30c0), magic[gpc][0], 0, 0);
+		mmio_list(GPC_UNIT(gpc, 0x30e4), magic[gpc][1] | offset, 0, 0);
+		offset += 0x07ff * priv->tpc_nr[gpc];
+	}
+	mmio_list(0x17e91c, 0x06060609, 0, 0);
+	mmio_list(0x17e920, 0x00090a05, 0, 0);
+
+	nv_wr32(priv, 0x418c6c, 0x1);
+	nv_wr32(priv, 0x41980c, 0x10);
+	nv_wr32(priv, 0x41be08, 0x4);
+	nv_wr32(priv, 0x4064c0, 0x801a00f0);
+	nv_wr32(priv, 0x405800, 0xf8000bf);
+	nv_wr32(priv, 0x419c00, 0xa);
+
+	for (tpc = 0, id = 0; tpc < 4; tpc++) {
+		for (gpc = 0; gpc < priv->gpc_nr; gpc++) {
+			if (tpc < priv->tpc_nr[gpc]) {
+				nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x0698), id);
+				nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x04e8), id);
+				nv_wr32(priv, GPC_UNIT(gpc, 0x0c10 + tpc * 4), id);
+				nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x0088), id++);
+			}
+
+			nv_wr32(priv, GPC_UNIT(gpc, 0x0c08), priv->tpc_nr[gpc]);
+			nv_wr32(priv, GPC_UNIT(gpc, 0x0c8c), priv->tpc_nr[gpc]);
+		}
+	}
+
+	tmp = 0;
+	for (i = 0; i < priv->gpc_nr; i++)
+		tmp |= priv->tpc_nr[i] << (i * 4);
+	nv_wr32(priv, 0x406028, tmp);
+	nv_wr32(priv, 0x405870, tmp);
+
+	nv_wr32(priv, 0x40602c, 0x0);
+	nv_wr32(priv, 0x405874, 0x0);
+	nv_wr32(priv, 0x406030, 0x0);
+	nv_wr32(priv, 0x405878, 0x0);
+	nv_wr32(priv, 0x406034, 0x0);
+	nv_wr32(priv, 0x40587c, 0x0);
+
+	/* calculate first set of magics */
+	memcpy(tpcnr, priv->tpc_nr, sizeof(priv->tpc_nr));
+
+	gpc = -1;
+	for (tpc = 0; tpc < priv->tpc_total; tpc++) {
+		do {
+			gpc = (gpc + 1) % priv->gpc_nr;
+		} while (!tpcnr[gpc]);
+		tpcnr[gpc]--;
+
+		data[tpc / 6] |= gpc << ((tpc % 6) * 5);
+	}
+
+	for (; tpc < 32; tpc++)
+		data[tpc / 6] |= 7 << ((tpc % 6) * 5);
+
+	/* and the second... */
+	shift = 0;
+	ntpcv = priv->tpc_total;
+	while (!(ntpcv & (1 << 4))) {
+		ntpcv <<= 1;
+		shift++;
+	}
+
+	data2[0]  = ntpcv << 16;
+	data2[0] |= shift << 21;
+	data2[0] |= (((1 << (0 + 5)) % ntpcv) << 24);
+	data2[0] |= priv->tpc_total << 8;
+	data2[0] |= priv->magic_not_rop_nr;
+	for (i = 1; i < 7; i++)
+		data2[1] |= ((1 << (i + 5)) % ntpcv) << ((i - 1) * 5);
+
+	/* and write it all the various parts of PGRAPH */
+	nv_wr32(priv, 0x418bb8, (priv->tpc_total << 8) | priv->magic_not_rop_nr);
+	for (i = 0; i < 6; i++)
+		nv_wr32(priv, 0x418b08 + (i * 4), data[i]);
+
+	nv_wr32(priv, 0x41bfd0, data2[0]);
+	nv_wr32(priv, 0x41bfe4, data2[1]);
+	for (i = 0; i < 6; i++)
+		nv_wr32(priv, 0x41bf00 + (i * 4), data[i]);
+
+	nv_wr32(priv, 0x4078bc, (priv->tpc_total << 8) | priv->magic_not_rop_nr);
+	for (i = 0; i < 6; i++)
+		nv_wr32(priv, 0x40780c + (i * 4), data[i]);
+
+
+	memcpy(tpcnr, priv->tpc_nr, sizeof(priv->tpc_nr));
+	for (gpc = 0; gpc < priv->gpc_nr; gpc++)
+		tpc_mask |= ((1 << priv->tpc_nr[gpc]) - 1) << (gpc * 8);
+
+	for (i = 0, gpc = -1, b = -1; i < 32; i++) {
+		a = (i * (priv->tpc_total - 1)) / 32;
+		if (a != b) {
+			b = a;
+			do {
+				gpc = (gpc + 1) % priv->gpc_nr;
+			} while (!tpcnr[gpc]);
+			tpc = priv->tpc_nr[gpc] - tpcnr[gpc]--;
+
+			tpc_set |= 1 << ((gpc * 8) + tpc);
+		}
+
+		nv_wr32(priv, 0x406800 + (i * 0x20), tpc_set);
+		nv_wr32(priv, 0x406c00 + (i * 0x20), tpc_set ^ tpc_mask);
+	}
+
+	for (i = 0; i < 8; i++)
+		nv_wr32(priv, 0x4064d0 + (i * 0x04), 0x00000000);
+
+	nv_wr32(priv, 0x405b00, 0x201);
+	nv_wr32(priv, 0x408850, 0x2);
+	nv_wr32(priv, 0x408958, 0x2);
+	nv_wr32(priv, 0x419f78, 0xa);
+
+	nve0_grctx_generate_icmd(priv);
+	nve0_grctx_generate_a097(priv);
+	nve0_grctx_generate_902d(priv);
+
+	nv_mask(priv, 0x000260, 0x00000001, 0x00000001);
+	nv_wr32(priv, 0x418800, 0x7026860a); //XXX
+	nv_wr32(priv, 0x41be10, 0x00bb8bc7); //XXX
+	return nvc0_grctx_fini(&info);
+}
diff --git a/drivers/gpu/drm/nouveau/nvc0_grgpc.fuc b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnvc0.fuc
index 15272be33b66..b86cc60dcd56 100644
--- a/drivers/gpu/drm/nouveau/nvc0_grgpc.fuc
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnvc0.fuc
@@ -24,7 +24,7 @@
  */
 
 /* To build:
- *    m4 nvc0_grgpc.fuc | envyas -a -w -m fuc -V nva3 -o nvc0_grgpc.fuc.h
+ *    m4 gpcnvc0.fuc | envyas -a -w -m fuc -V fuc3 -o gpcnvc0.fuc.h
  */
 
 /* TODO
@@ -33,7 +33,7 @@
  */
 
 .section #nvc0_grgpc_data
-include(`nvc0_graph.fuc')
+include(`nvc0.fuc')
 gpc_id:			.b32 0
 gpc_mmio_list_head:	.b32 0
 gpc_mmio_list_tail:	.b32 0
@@ -209,11 +209,11 @@ nvd9_tpc_mmio_tail:
 .section #nvc0_grgpc_code
 bra #init
 define(`include_code')
-include(`nvc0_graph.fuc')
+include(`nvc0.fuc')
 
 // reports an exception to the host
 //
-// In: $r15 error code (see nvc0_graph.fuc)
+// In: $r15 error code (see nvc0.fuc)
 //
 error:
 	push $r14
diff --git a/drivers/gpu/drm/nouveau/nvc0_grgpc.fuc.h b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnvc0.fuc.h
index a988b8ad00ac..96050ddb22ca 100644
--- a/drivers/gpu/drm/nouveau/nvc0_grgpc.fuc.h
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnvc0.fuc.h
@@ -1,11 +1,19 @@
 uint32_t nvc0_grgpc_data[] = {
+/* 0x0000: gpc_id */
 	0x00000000,
+/* 0x0004: gpc_mmio_list_head */
 	0x00000000,
+/* 0x0008: gpc_mmio_list_tail */
 	0x00000000,
+/* 0x000c: tpc_count */
 	0x00000000,
+/* 0x0010: tpc_mask */
 	0x00000000,
+/* 0x0014: tpc_mmio_list_head */
 	0x00000000,
+/* 0x0018: tpc_mmio_list_tail */
 	0x00000000,
+/* 0x001c: cmd_queue */
 	0x00000000,
 	0x00000000,
 	0x00000000,
@@ -24,6 +32,7 @@ uint32_t nvc0_grgpc_data[] = {
 	0x00000000,
 	0x00000000,
 	0x00000000,
+/* 0x0064: chipsets */
 	0x000000c0,
 	0x012800c8,
 	0x01e40194,
@@ -49,6 +58,7 @@ uint32_t nvc0_grgpc_data[] = {
 	0x0194012c,
 	0x025401f8,
 	0x00000000,
+/* 0x00c8: nvc0_gpc_mmio_head */
 	0x00000380,
 	0x14000400,
 	0x20000450,
@@ -73,7 +83,10 @@ uint32_t nvc0_grgpc_data[] = {
 	0x00000c8c,
 	0x08001000,
 	0x00001014,
+/* 0x0128: nvc0_gpc_mmio_tail */
 	0x00000c6c,
+/* 0x012c: nvc1_gpc_mmio_tail */
+/* 0x012c: nvd9_gpc_mmio_head */
 	0x00000380,
 	0x04000400,
 	0x0800040c,
@@ -100,6 +113,8 @@ uint32_t nvc0_grgpc_data[] = {
 	0x00000c8c,
 	0x08001000,
 	0x00001014,
+/* 0x0194: nvd9_gpc_mmio_tail */
+/* 0x0194: nvc0_tpc_mmio_head */
 	0x00000018,
 	0x0000003c,
 	0x00000048,
@@ -120,11 +135,16 @@ uint32_t nvc0_grgpc_data[] = {
 	0x4c000644,
 	0x00000698,
 	0x04000750,
+/* 0x01e4: nvc0_tpc_mmio_tail */
 	0x00000758,
 	0x000002c4,
 	0x000006e0,
+/* 0x01f0: nvcf_tpc_mmio_tail */
 	0x000004bc,
+/* 0x01f4: nvc3_tpc_mmio_tail */
 	0x00000544,
+/* 0x01f8: nvc1_tpc_mmio_tail */
+/* 0x01f8: nvd9_tpc_mmio_head */
 	0x00000018,
 	0x0000003c,
 	0x00000048,
@@ -152,12 +172,14 @@ uint32_t nvc0_grgpc_data[] = {
 
 uint32_t nvc0_grgpc_code[] = {
 	0x03060ef5,
+/* 0x0004: queue_put */
 	0x9800d898,
 	0x86f001d9,
 	0x0489b808,
 	0xf00c1bf4,
 	0x21f502f7,
 	0x00f802ec,
+/* 0x001c: queue_put_next */
 	0xb60798c4,
 	0x8dbb0384,
 	0x0880b600,
@@ -165,6 +187,7 @@ uint32_t nvc0_grgpc_code[] = {
 	0x90b6018f,
 	0x0f94f001,
 	0xf801d980,
+/* 0x0039: queue_get */
 	0x0131f400,
 	0x9800d898,
 	0x89b801d9,
@@ -176,37 +199,46 @@ uint32_t nvc0_grgpc_code[] = {
 	0x80b6019f,
 	0x0f84f001,
 	0xf400d880,
+/* 0x0066: queue_get_done */
 	0x00f80132,
+/* 0x0068: nv_rd32 */
 	0x0728b7f1,
 	0xb906b4b6,
 	0xc9f002ec,
 	0x00bcd01f,
+/* 0x0078: nv_rd32_wait */
 	0xc800bccf,
 	0x1bf41fcc,
 	0x06a7f0fa,
 	0x010321f5,
 	0xf840bfcf,
+/* 0x008d: nv_wr32 */
 	0x28b7f100,
 	0x06b4b607,
 	0xb980bfd0,
 	0xc9f002ec,
 	0x1ec9f01f,
+/* 0x00a3: nv_wr32_wait */
 	0xcf00bcd0,
 	0xccc800bc,
 	0xfa1bf41f,
+/* 0x00ae: watchdog_reset */
 	0x87f100f8,
 	0x84b60430,
 	0x1ff9f006,
 	0xf8008fd0,
+/* 0x00bd: watchdog_clear */
 	0x3087f100,
 	0x0684b604,
 	0xf80080d0,
+/* 0x00c9: wait_donez */
 	0x3c87f100,
 	0x0684b608,
 	0x99f094bd,
 	0x0089d000,
 	0x081887f1,
 	0xd00684b6,
+/* 0x00e2: wait_done_wait_donez */
 	0x87f1008a,
 	0x84b60400,
 	0x0088cf06,
@@ -215,6 +247,7 @@ uint32_t nvc0_grgpc_code[] = {
 	0x84b6085c,
 	0xf094bd06,
 	0x89d00099,
+/* 0x0103: wait_doneo */
 	0xf100f800,
 	0xb6083c87,
 	0x94bd0684,
@@ -222,6 +255,7 @@ uint32_t nvc0_grgpc_code[] = {
 	0x87f10089,
 	0x84b60818,
 	0x008ad006,
+/* 0x011c: wait_done_wait_doneo */
 	0x040087f1,
 	0xcf0684b6,
 	0x8aff0088,
@@ -230,6 +264,8 @@ uint32_t nvc0_grgpc_code[] = {
 	0xbd0684b6,
 	0x0099f094,
 	0xf80089d0,
+/* 0x013d: mmctx_size */
+/* 0x013f: nv_mmctx_size_loop */
 	0x9894bd00,
 	0x85b600e8,
 	0x0180b61a,
@@ -238,6 +274,7 @@ uint32_t nvc0_grgpc_code[] = {
 	0x04efb804,
 	0xb9eb1bf4,
 	0x00f8029f,
+/* 0x015c: mmctx_xfer */
 	0x083c87f1,
 	0xbd0684b6,
 	0x0199f094,
@@ -247,9 +284,11 @@ uint32_t nvc0_grgpc_code[] = {
 	0xf405bbfd,
 	0x8bd0090b,
 	0x0099f000,
+/* 0x0180: mmctx_base_disabled */
 	0xf405eefd,
 	0x8ed00c0b,
 	0xc08fd080,
+/* 0x018f: mmctx_multi_disabled */
 	0xb70199f0,
 	0xc8010080,
 	0xb4b600ab,
@@ -257,6 +296,8 @@ uint32_t nvc0_grgpc_code[] = {
 	0xb601aec8,
 	0xbefd11e4,
 	0x008bd005,
+/* 0x01a8: mmctx_exec_loop */
+/* 0x01a8: mmctx_wait_free */
 	0xf0008ecf,
 	0x0bf41fe4,
 	0x00ce98fa,
@@ -265,34 +306,42 @@ uint32_t nvc0_grgpc_code[] = {
 	0x04cdb804,
 	0xc8e81bf4,
 	0x1bf402ab,
+/* 0x01c9: mmctx_fini_wait */
 	0x008bcf18,
 	0xb01fb4f0,
 	0x1bf410b4,
 	0x02a7f0f7,
 	0xf4c921f4,
+/* 0x01de: mmctx_stop */
 	0xabc81b0e,
 	0x10b4b600,
 	0xf00cb9f0,
 	0x8bd012b9,
+/* 0x01ed: mmctx_stop_wait */
 	0x008bcf00,
 	0xf412bbc8,
+/* 0x01f6: mmctx_done */
 	0x87f1fa1b,
 	0x84b6085c,
 	0xf094bd06,
 	0x89d00199,
+/* 0x0207: strand_wait */
 	0xf900f800,
 	0x02a7f0a0,
 	0xfcc921f4,
+/* 0x0213: strand_pre */
 	0xf100f8a0,
 	0xf04afc87,
 	0x97f00283,
 	0x0089d00c,
 	0x020721f5,
+/* 0x0226: strand_post */
 	0x87f100f8,
 	0x83f04afc,
 	0x0d97f002,
 	0xf50089d0,
 	0xf8020721,
+/* 0x0239: strand_set */
 	0xfca7f100,
 	0x02a3f04f,
 	0x0500aba2,
@@ -303,6 +352,7 @@ uint32_t nvc0_grgpc_code[] = {
 	0xf000aed0,
 	0xbcd00ac7,
 	0x0721f500,
+/* 0x0263: strand_ctx_init */
 	0xf100f802,
 	0xb6083c87,
 	0x94bd0684,
@@ -325,6 +375,7 @@ uint32_t nvc0_grgpc_code[] = {
 	0x0684b608,
 	0xb70089cf,
 	0x95220080,
+/* 0x02ba: ctx_init_strand_loop */
 	0x8ed008fe,
 	0x408ed000,
 	0xb6808acf,
@@ -338,12 +389,14 @@ uint32_t nvc0_grgpc_code[] = {
 	0x94bd0684,
 	0xd00399f0,
 	0x00f80089,
+/* 0x02ec: error */
 	0xe7f1e0f9,
 	0xe3f09814,
 	0x8d21f440,
 	0x041ce0b7,
 	0xf401f7f0,
 	0xe0fc8d21,
+/* 0x0306: init */
 	0x04bd00f8,
 	0xf10004fe,
 	0xf0120017,
@@ -366,11 +419,13 @@ uint32_t nvc0_grgpc_code[] = {
 	0x27f10002,
 	0x24b60800,
 	0x0022cf06,
+/* 0x035f: init_find_chipset */
 	0xb65817f0,
 	0x13980c10,
 	0x0432b800,
 	0xb00b0bf4,
 	0x1bf40034,
+/* 0x0373: init_context */
 	0xf100f8f1,
 	0xb6080027,
 	0x22cf0624,
@@ -407,6 +462,7 @@ uint32_t nvc0_grgpc_code[] = {
 	0x0010b740,
 	0xf024bd08,
 	0x12d01f29,
+/* 0x0401: main */
 	0x0031f400,
 	0xf00028f4,
 	0x21f41cd7,
@@ -419,9 +475,11 @@ uint32_t nvc0_grgpc_code[] = {
 	0xfe051efd,
 	0x21f50018,
 	0x0ef404c3,
+/* 0x0431: main_not_ctx_xfer */
 	0x10ef94d3,
 	0xf501f5f0,
 	0xf402ec21,
+/* 0x043e: ih */
 	0x80f9c60e,
 	0xf90188fe,
 	0xf990f980,
@@ -436,30 +494,36 @@ uint32_t nvc0_grgpc_code[] = {
 	0xb0b70421,
 	0xe7f00400,
 	0x00bed001,
+/* 0x0474: ih_no_fifo */
 	0xfc400ad0,
 	0xfce0fcf0,
 	0xfcb0fcd0,
 	0xfc90fca0,
 	0x0088fe80,
 	0x32f480fc,
+/* 0x048f: hub_barrier_done */
 	0xf001f800,
 	0x0e9801f7,
 	0x04febb00,
 	0x9418e7f1,
 	0xf440e3f0,
 	0x00f88d21,
+/* 0x04a4: ctx_redswitch */
 	0x0614e7f1,
 	0xf006e4b6,
 	0xefd020f7,
 	0x08f7f000,
+/* 0x04b4: ctx_redswitch_delay */
 	0xf401f2b6,
 	0xf7f1fd1b,
 	0xefd00a20,
+/* 0x04c3: ctx_xfer */
 	0xf100f800,
 	0xb60a0417,
 	0x1fd00614,
 	0x0711f400,
 	0x04a421f5,
+/* 0x04d4: ctx_xfer_not_load */
 	0x4afc17f1,
 	0xf00213f0,
 	0x12d00c27,
@@ -489,11 +553,13 @@ uint32_t nvc0_grgpc_code[] = {
 	0x5c21f508,
 	0x0721f501,
 	0x0601f402,
+/* 0x054b: ctx_xfer_post */
 	0xf11412f4,
 	0xf04afc17,
 	0x27f00213,
 	0x0012d00d,
 	0x020721f5,
+/* 0x055c: ctx_xfer_done */
 	0x048f21f5,
 	0x000000f8,
 	0x00000000,
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnve0.fuc b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnve0.fuc
new file mode 100644
index 000000000000..7b715fda2763
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnve0.fuc
@@ -0,0 +1,451 @@
+/* fuc microcode for nve0 PGRAPH/GPC
+ *
+ * Copyright 2011 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+/* To build:
+ *    m4 nve0_grgpc.fuc | envyas -a -w -m fuc -V nva3 -o nve0_grgpc.fuc.h
+ */
+
+/* TODO
+ * - bracket certain functions with scratch writes, useful for debugging
+ * - watchdog timer around ctx operations
+ */
+
+.section #nve0_grgpc_data
+include(`nve0.fuc')
+gpc_id:			.b32 0
+gpc_mmio_list_head:	.b32 0
+gpc_mmio_list_tail:	.b32 0
+
+tpc_count:		.b32 0
+tpc_mask:		.b32 0
+tpc_mmio_list_head:	.b32 0
+tpc_mmio_list_tail:	.b32 0
+
+cmd_queue:		queue_init
+
+// chipset descriptions
+chipsets:
+.b8  0xe4 0 0 0
+.b16 #nve4_gpc_mmio_head
+.b16 #nve4_gpc_mmio_tail
+.b16 #nve4_tpc_mmio_head
+.b16 #nve4_tpc_mmio_tail
+.b8  0xe7 0 0 0
+.b16 #nve4_gpc_mmio_head
+.b16 #nve4_gpc_mmio_tail
+.b16 #nve4_tpc_mmio_head
+.b16 #nve4_tpc_mmio_tail
+.b8  0 0 0 0
+
+// GPC mmio lists
+nve4_gpc_mmio_head:
+mmctx_data(0x000380, 1)
+mmctx_data(0x000400, 2)
+mmctx_data(0x00040c, 3)
+mmctx_data(0x000450, 9)
+mmctx_data(0x000600, 1)
+mmctx_data(0x000684, 1)
+mmctx_data(0x000700, 5)
+mmctx_data(0x000800, 1)
+mmctx_data(0x000808, 3)
+mmctx_data(0x000828, 1)
+mmctx_data(0x000830, 1)
+mmctx_data(0x0008d8, 1)
+mmctx_data(0x0008e0, 1)
+mmctx_data(0x0008e8, 6)
+mmctx_data(0x00091c, 1)
+mmctx_data(0x000924, 3)
+mmctx_data(0x000b00, 1)
+mmctx_data(0x000b08, 6)
+mmctx_data(0x000bb8, 1)
+mmctx_data(0x000c08, 1)
+mmctx_data(0x000c10, 8)
+mmctx_data(0x000c40, 1)
+mmctx_data(0x000c6c, 1)
+mmctx_data(0x000c80, 1)
+mmctx_data(0x000c8c, 1)
+mmctx_data(0x001000, 3)
+mmctx_data(0x001014, 1)
+mmctx_data(0x003024, 1)
+mmctx_data(0x0030c0, 2)
+mmctx_data(0x0030e4, 1)
+mmctx_data(0x003100, 6)
+mmctx_data(0x0031d0, 1)
+mmctx_data(0x0031e0, 2)
+nve4_gpc_mmio_tail:
+
+// TPC mmio lists
+nve4_tpc_mmio_head:
+mmctx_data(0x000048, 1)
+mmctx_data(0x000064, 1)
+mmctx_data(0x000088, 1)
+mmctx_data(0x000200, 6)
+mmctx_data(0x00021c, 2)
+mmctx_data(0x000230, 1)
+mmctx_data(0x0002c4, 1)
+mmctx_data(0x000400, 3)
+mmctx_data(0x000420, 3)
+mmctx_data(0x0004e8, 1)
+mmctx_data(0x0004f4, 1)
+mmctx_data(0x000604, 4)
+mmctx_data(0x000644, 22)
+mmctx_data(0x0006ac, 2)
+mmctx_data(0x0006c8, 1)
+mmctx_data(0x000730, 8)
+mmctx_data(0x000758, 1)
+mmctx_data(0x000778, 1)
+nve4_tpc_mmio_tail:
+
+.section #nve0_grgpc_code
+bra #init
+define(`include_code')
+include(`nve0.fuc')
+
+// reports an exception to the host
+//
+// In: $r15 error code (see nve0.fuc)
+//
+error:
+	push $r14
+	mov $r14 -0x67ec 	// 0x9814
+	sethi $r14 0x400000
+	call #nv_wr32		// HUB_CTXCTL_CC_SCRATCH[5] = error code
+	add b32 $r14 0x41c
+	mov $r15 1
+	call #nv_wr32		// HUB_CTXCTL_INTR_UP_SET
+	pop $r14
+	ret
+
+// GPC fuc initialisation, executed by triggering ucode start, will
+// fall through to main loop after completion.
+//
+// Input:
+//   CC_SCRATCH[0]: chipset (PMC_BOOT_0 read returns 0x0bad0bad... sigh)
+//   CC_SCRATCH[1]: context base
+//
+// Output:
+//   CC_SCRATCH[0]:
+//	     31:31: set to signal completion
+//   CC_SCRATCH[1]:
+//	      31:0: GPC context size
+//
+init:
+	clear b32 $r0
+	mov $sp $r0
+
+	// enable fifo access
+	mov $r1 0x1200
+	mov $r2 2
+	iowr I[$r1 + 0x000] $r2		// FIFO_ENABLE
+
+	// setup i0 handler, and route all interrupts to it
+	mov $r1 #ih
+	mov $iv0 $r1
+	mov $r1 0x400
+	iowr I[$r1 + 0x300] $r0		// INTR_DISPATCH
+
+	// enable fifo interrupt
+	mov $r2 4
+	iowr I[$r1 + 0x000] $r2		// INTR_EN_SET
+
+	// enable interrupts
+	bset $flags ie0
+
+	// figure out which GPC we are, and how many TPCs we have
+	mov $r1 0x608
+	shl b32 $r1 6
+	iord $r2 I[$r1 + 0x000]		// UNITS
+	mov $r3 1
+	and $r2 0x1f
+	shl b32 $r3 $r2
+	sub b32 $r3 1
+	st b32 D[$r0 + #tpc_count] $r2
+	st b32 D[$r0 + #tpc_mask] $r3
+	add b32 $r1 0x400
+	iord $r2 I[$r1 + 0x000]		// MYINDEX
+	st b32 D[$r0 + #gpc_id] $r2
+
+	// find context data for this chipset
+	mov $r2 0x800
+	shl b32 $r2 6
+	iord $r2 I[$r2 + 0x000]		// CC_SCRATCH[0]
+	mov $r1 #chipsets - 12
+	init_find_chipset:
+		add b32 $r1 12
+		ld b32 $r3 D[$r1 + 0x00]
+		cmpu b32 $r3 $r2
+		bra e #init_context
+		cmpu b32 $r3 0
+		bra ne #init_find_chipset
+		// unknown chipset
+		ret
+
+	// initialise context base, and size tracking
+	init_context:
+	mov $r2 0x800
+	shl b32 $r2 6
+	iord $r2 I[$r2 + 0x100]	// CC_SCRATCH[1], initial base
+	clear b32 $r3		// track GPC context size here
+
+	// set mmctx base addresses now so we don't have to do it later,
+	// they don't currently ever change
+	mov $r4 0x700
+	shl b32 $r4 6
+	shr b32 $r5 $r2 8
+	iowr I[$r4 + 0x000] $r5		// MMCTX_SAVE_SWBASE
+	iowr I[$r4 + 0x100] $r5		// MMCTX_LOAD_SWBASE
+
+	// calculate GPC mmio context size, store the chipset-specific
+	// mmio list pointers somewhere we can get at them later without
+	// re-parsing the chipset list
+	clear b32 $r14
+	clear b32 $r15
+	ld b16 $r14 D[$r1 + 4]
+	ld b16 $r15 D[$r1 + 6]
+	st b16 D[$r0 + #gpc_mmio_list_head] $r14
+	st b16 D[$r0 + #gpc_mmio_list_tail] $r15
+	call #mmctx_size
+	add b32 $r2 $r15
+	add b32 $r3 $r15
+
+	// calculate per-TPC mmio context size, store the list pointers
+	ld b16 $r14 D[$r1 + 8]
+	ld b16 $r15 D[$r1 + 10]
+	st b16 D[$r0 + #tpc_mmio_list_head] $r14
+	st b16 D[$r0 + #tpc_mmio_list_tail] $r15
+	call #mmctx_size
+	ld b32 $r14 D[$r0 + #tpc_count]
+	mulu $r14 $r15
+	add b32 $r2 $r14
+	add b32 $r3 $r14
+
+	// round up base/size to 256 byte boundary (for strand SWBASE)
+	add b32 $r4 0x1300
+	shr b32 $r3 2
+	iowr I[$r4 + 0x000] $r3		// MMCTX_LOAD_COUNT, wtf for?!?
+	shr b32 $r2 8
+	shr b32 $r3 6
+	add b32 $r2 1
+	add b32 $r3 1
+	shl b32 $r2 8
+	shl b32 $r3 8
+
+	// calculate size of strand context data
+	mov b32 $r15 $r2
+	call #strand_ctx_init
+	add b32 $r3 $r15
+
+	// save context size, and tell HUB we're done
+	mov $r1 0x800
+	shl b32 $r1 6
+	iowr I[$r1 + 0x100] $r3		// CC_SCRATCH[1]  = context size
+	add b32 $r1 0x800
+	clear b32 $r2
+	bset $r2 31
+	iowr I[$r1 + 0x000] $r2		// CC_SCRATCH[0] |= 0x80000000
+
+// Main program loop, very simple, sleeps until woken up by the interrupt
+// handler, pulls a command from the queue and executes its handler
+//
+main:
+	bset $flags $p0
+	sleep $p0
+	mov $r13 #cmd_queue
+	call #queue_get
+	bra $p1 #main
+
+	// 0x0000-0x0003 are all context transfers
+	cmpu b32 $r14 0x04
+	bra nc #main_not_ctx_xfer
+		// fetch $flags and mask off $p1/$p2
+		mov $r1 $flags
+		mov $r2 0x0006
+		not b32 $r2
+		and $r1 $r2
+		// set $p1/$p2 according to transfer type
+		shl b32 $r14 1
+		or $r1 $r14
+		mov $flags $r1
+		// transfer context data
+		call #ctx_xfer
+		bra #main
+
+	main_not_ctx_xfer:
+	shl b32 $r15 $r14 16
+	or $r15 E_BAD_COMMAND
+	call #error
+	bra #main
+
+// interrupt handler
+ih:
+	push $r8
+	mov $r8 $flags
+	push $r8
+	push $r9
+	push $r10
+	push $r11
+	push $r13
+	push $r14
+	push $r15
+
+	// incoming fifo command?
+	iord $r10 I[$r0 + 0x200]	// INTR
+	and $r11 $r10 0x00000004
+	bra e #ih_no_fifo
+		// queue incoming fifo command for later processing
+		mov $r11 0x1900
+		mov $r13 #cmd_queue
+		iord $r14 I[$r11 + 0x100]	// FIFO_CMD
+		iord $r15 I[$r11 + 0x000]	// FIFO_DATA
+		call #queue_put
+		add b32 $r11 0x400
+		mov $r14 1
+		iowr I[$r11 + 0x000] $r14	// FIFO_ACK
+
+	// ack, and wake up main()
+	ih_no_fifo:
+	iowr I[$r0 + 0x100] $r10	// INTR_ACK
+
+	pop $r15
+	pop $r14
+	pop $r13
+	pop $r11
+	pop $r10
+	pop $r9
+	pop $r8
+	mov $flags $r8
+	pop $r8
+	bclr $flags $p0
+	iret
+
+// Set this GPC's bit in HUB_BAR, used to signal completion of various
+// activities to the HUB fuc
+//
+hub_barrier_done:
+	mov $r15 1
+	ld b32 $r14 D[$r0 + #gpc_id]
+	shl b32 $r15 $r14
+	mov $r14 -0x6be8 	// 0x409418 - HUB_BAR_SET
+	sethi $r14 0x400000
+	call #nv_wr32
+	ret
+
+// Disables various things, waits a bit, and re-enables them..
+//
+// Not sure how exactly this helps, perhaps "ENABLE" is not such a
+// good description for the bits we turn off?  Anyways, without this,
+// funny things happen.
+//
+ctx_redswitch:
+	mov $r14 0x614
+	shl b32 $r14 6
+	mov $r15 0x020
+	iowr I[$r14] $r15	// GPC_RED_SWITCH = POWER
+	mov $r15 8
+	ctx_redswitch_delay:
+		sub b32 $r15 1
+		bra ne #ctx_redswitch_delay
+	mov $r15 0xa20
+	iowr I[$r14] $r15	// GPC_RED_SWITCH = UNK11, ENABLE, POWER
+	ret
+
+// Transfer GPC context data between GPU and storage area
+//
+// In: $r15 context base address
+//     $p1 clear on save, set on load
+//     $p2 set if opposite direction done/will be done, so:
+//		on save it means: "a load will follow this save"
+//		on load it means: "a save preceeded this load"
+//
+ctx_xfer:
+	// set context base address
+	mov $r1 0xa04
+	shl b32 $r1 6
+	iowr I[$r1 + 0x000] $r15// MEM_BASE
+	bra not $p1 #ctx_xfer_not_load
+		call #ctx_redswitch
+	ctx_xfer_not_load:
+
+	// strands
+	mov $r1 0x4afc
+	sethi $r1 0x20000
+	mov $r2 0xc
+	iowr I[$r1] $r2		// STRAND_CMD(0x3f) = 0x0c
+	call #strand_wait
+	mov $r2 0x47fc
+	sethi $r2 0x20000
+	iowr I[$r2] $r0		// STRAND_FIRST_GENE(0x3f) = 0x00
+	xbit $r2 $flags $p1
+	add b32 $r2 3
+	iowr I[$r1] $r2		// STRAND_CMD(0x3f) = 0x03/0x04 (SAVE/LOAD)
+
+	// mmio context
+	xbit $r10 $flags $p1	// direction
+	or $r10 2		// first
+	mov $r11 0x0000
+	sethi $r11 0x500000
+	ld b32 $r12 D[$r0 + #gpc_id]
+	shl b32 $r12 15
+	add b32 $r11 $r12	// base = NV_PGRAPH_GPCn
+	ld b32 $r12 D[$r0 + #gpc_mmio_list_head]
+	ld b32 $r13 D[$r0 + #gpc_mmio_list_tail]
+	mov $r14 0		// not multi
+	call #mmctx_xfer
+
+	// per-TPC mmio context
+	xbit $r10 $flags $p1	// direction
+	or $r10 4		// last
+	mov $r11 0x4000
+	sethi $r11 0x500000	// base = NV_PGRAPH_GPC0_TPC0
+	ld b32 $r12 D[$r0 + #gpc_id]
+	shl b32 $r12 15
+	add b32 $r11 $r12	// base = NV_PGRAPH_GPCn_TPC0
+	ld b32 $r12 D[$r0 + #tpc_mmio_list_head]
+	ld b32 $r13 D[$r0 + #tpc_mmio_list_tail]
+	ld b32 $r15 D[$r0 + #tpc_mask]
+	mov $r14 0x800		// stride = 0x800
+	call #mmctx_xfer
+
+	// wait for strands to finish
+	call #strand_wait
+
+	// if load, or a save without a load following, do some
+	// unknown stuff that's done after finishing a block of
+	// strand commands
+	bra $p1 #ctx_xfer_post
+	bra not $p2 #ctx_xfer_done
+	ctx_xfer_post:
+		mov $r1 0x4afc
+		sethi $r1 0x20000
+		mov $r2 0xd
+		iowr I[$r1] $r2		// STRAND_CMD(0x3f) = 0x0d
+		call #strand_wait
+
+	// mark completion in HUB's barrier
+	ctx_xfer_done:
+	call #hub_barrier_done
+	ret
+
+.align 256
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnve0.fuc.h b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnve0.fuc.h
new file mode 100644
index 000000000000..26c2165bad0f
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnve0.fuc.h
@@ -0,0 +1,530 @@
+uint32_t nve0_grgpc_data[] = {
+/* 0x0000: gpc_id */
+	0x00000000,
+/* 0x0004: gpc_mmio_list_head */
+	0x00000000,
+/* 0x0008: gpc_mmio_list_tail */
+	0x00000000,
+/* 0x000c: tpc_count */
+	0x00000000,
+/* 0x0010: tpc_mask */
+	0x00000000,
+/* 0x0014: tpc_mmio_list_head */
+	0x00000000,
+/* 0x0018: tpc_mmio_list_tail */
+	0x00000000,
+/* 0x001c: cmd_queue */
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+/* 0x0064: chipsets */
+	0x000000e4,
+	0x01040080,
+	0x014c0104,
+	0x000000e7,
+	0x01040080,
+	0x014c0104,
+	0x00000000,
+/* 0x0080: nve4_gpc_mmio_head */
+	0x00000380,
+	0x04000400,
+	0x0800040c,
+	0x20000450,
+	0x00000600,
+	0x00000684,
+	0x10000700,
+	0x00000800,
+	0x08000808,
+	0x00000828,
+	0x00000830,
+	0x000008d8,
+	0x000008e0,
+	0x140008e8,
+	0x0000091c,
+	0x08000924,
+	0x00000b00,
+	0x14000b08,
+	0x00000bb8,
+	0x00000c08,
+	0x1c000c10,
+	0x00000c40,
+	0x00000c6c,
+	0x00000c80,
+	0x00000c8c,
+	0x08001000,
+	0x00001014,
+	0x00003024,
+	0x040030c0,
+	0x000030e4,
+	0x14003100,
+	0x000031d0,
+	0x040031e0,
+/* 0x0104: nve4_gpc_mmio_tail */
+/* 0x0104: nve4_tpc_mmio_head */
+	0x00000048,
+	0x00000064,
+	0x00000088,
+	0x14000200,
+	0x0400021c,
+	0x00000230,
+	0x000002c4,
+	0x08000400,
+	0x08000420,
+	0x000004e8,
+	0x000004f4,
+	0x0c000604,
+	0x54000644,
+	0x040006ac,
+	0x000006c8,
+	0x1c000730,
+	0x00000758,
+	0x00000778,
+};
+
+uint32_t nve0_grgpc_code[] = {
+	0x03060ef5,
+/* 0x0004: queue_put */
+	0x9800d898,
+	0x86f001d9,
+	0x0489b808,
+	0xf00c1bf4,
+	0x21f502f7,
+	0x00f802ec,
+/* 0x001c: queue_put_next */
+	0xb60798c4,
+	0x8dbb0384,
+	0x0880b600,
+	0x80008e80,
+	0x90b6018f,
+	0x0f94f001,
+	0xf801d980,
+/* 0x0039: queue_get */
+	0x0131f400,
+	0x9800d898,
+	0x89b801d9,
+	0x210bf404,
+	0xb60789c4,
+	0x9dbb0394,
+	0x0890b600,
+	0x98009e98,
+	0x80b6019f,
+	0x0f84f001,
+	0xf400d880,
+/* 0x0066: queue_get_done */
+	0x00f80132,
+/* 0x0068: nv_rd32 */
+	0x0728b7f1,
+	0xb906b4b6,
+	0xc9f002ec,
+	0x00bcd01f,
+/* 0x0078: nv_rd32_wait */
+	0xc800bccf,
+	0x1bf41fcc,
+	0x06a7f0fa,
+	0x010321f5,
+	0xf840bfcf,
+/* 0x008d: nv_wr32 */
+	0x28b7f100,
+	0x06b4b607,
+	0xb980bfd0,
+	0xc9f002ec,
+	0x1ec9f01f,
+/* 0x00a3: nv_wr32_wait */
+	0xcf00bcd0,
+	0xccc800bc,
+	0xfa1bf41f,
+/* 0x00ae: watchdog_reset */
+	0x87f100f8,
+	0x84b60430,
+	0x1ff9f006,
+	0xf8008fd0,
+/* 0x00bd: watchdog_clear */
+	0x3087f100,
+	0x0684b604,
+	0xf80080d0,
+/* 0x00c9: wait_donez */
+	0x3c87f100,
+	0x0684b608,
+	0x99f094bd,
+	0x0089d000,
+	0x081887f1,
+	0xd00684b6,
+/* 0x00e2: wait_done_wait_donez */
+	0x87f1008a,
+	0x84b60400,
+	0x0088cf06,
+	0xf4888aff,
+	0x87f1f31b,
+	0x84b6085c,
+	0xf094bd06,
+	0x89d00099,
+/* 0x0103: wait_doneo */
+	0xf100f800,
+	0xb6083c87,
+	0x94bd0684,
+	0xd00099f0,
+	0x87f10089,
+	0x84b60818,
+	0x008ad006,
+/* 0x011c: wait_done_wait_doneo */
+	0x040087f1,
+	0xcf0684b6,
+	0x8aff0088,
+	0xf30bf488,
+	0x085c87f1,
+	0xbd0684b6,
+	0x0099f094,
+	0xf80089d0,
+/* 0x013d: mmctx_size */
+/* 0x013f: nv_mmctx_size_loop */
+	0x9894bd00,
+	0x85b600e8,
+	0x0180b61a,
+	0xbb0284b6,
+	0xe0b60098,
+	0x04efb804,
+	0xb9eb1bf4,
+	0x00f8029f,
+/* 0x015c: mmctx_xfer */
+	0x083c87f1,
+	0xbd0684b6,
+	0x0199f094,
+	0xf10089d0,
+	0xb6071087,
+	0x94bd0684,
+	0xf405bbfd,
+	0x8bd0090b,
+	0x0099f000,
+/* 0x0180: mmctx_base_disabled */
+	0xf405eefd,
+	0x8ed00c0b,
+	0xc08fd080,
+/* 0x018f: mmctx_multi_disabled */
+	0xb70199f0,
+	0xc8010080,
+	0xb4b600ab,
+	0x0cb9f010,
+	0xb601aec8,
+	0xbefd11e4,
+	0x008bd005,
+/* 0x01a8: mmctx_exec_loop */
+/* 0x01a8: mmctx_wait_free */
+	0xf0008ecf,
+	0x0bf41fe4,
+	0x00ce98fa,
+	0xd005e9fd,
+	0xc0b6c08e,
+	0x04cdb804,
+	0xc8e81bf4,
+	0x1bf402ab,
+/* 0x01c9: mmctx_fini_wait */
+	0x008bcf18,
+	0xb01fb4f0,
+	0x1bf410b4,
+	0x02a7f0f7,
+	0xf4c921f4,
+/* 0x01de: mmctx_stop */
+	0xabc81b0e,
+	0x10b4b600,
+	0xf00cb9f0,
+	0x8bd012b9,
+/* 0x01ed: mmctx_stop_wait */
+	0x008bcf00,
+	0xf412bbc8,
+/* 0x01f6: mmctx_done */
+	0x87f1fa1b,
+	0x84b6085c,
+	0xf094bd06,
+	0x89d00199,
+/* 0x0207: strand_wait */
+	0xf900f800,
+	0x02a7f0a0,
+	0xfcc921f4,
+/* 0x0213: strand_pre */
+	0xf100f8a0,
+	0xf04afc87,
+	0x97f00283,
+	0x0089d00c,
+	0x020721f5,
+/* 0x0226: strand_post */
+	0x87f100f8,
+	0x83f04afc,
+	0x0d97f002,
+	0xf50089d0,
+	0xf8020721,
+/* 0x0239: strand_set */
+	0xfca7f100,
+	0x02a3f04f,
+	0x0500aba2,
+	0xd00fc7f0,
+	0xc7f000ac,
+	0x00bcd00b,
+	0x020721f5,
+	0xf000aed0,
+	0xbcd00ac7,
+	0x0721f500,
+/* 0x0263: strand_ctx_init */
+	0xf100f802,
+	0xb6083c87,
+	0x94bd0684,
+	0xd00399f0,
+	0x21f50089,
+	0xe7f00213,
+	0x3921f503,
+	0xfca7f102,
+	0x02a3f046,
+	0x0400aba0,
+	0xf040a0d0,
+	0xbcd001c7,
+	0x0721f500,
+	0x010c9202,
+	0xf000acd0,
+	0xbcd002c7,
+	0x0721f500,
+	0x2621f502,
+	0x8087f102,
+	0x0684b608,
+	0xb70089cf,
+	0x95220080,
+/* 0x02ba: ctx_init_strand_loop */
+	0x8ed008fe,
+	0x408ed000,
+	0xb6808acf,
+	0xa0b606a5,
+	0x00eabb01,
+	0xb60480b6,
+	0x1bf40192,
+	0x08e4b6e8,
+	0xf1f2efbc,
+	0xb6085c87,
+	0x94bd0684,
+	0xd00399f0,
+	0x00f80089,
+/* 0x02ec: error */
+	0xe7f1e0f9,
+	0xe3f09814,
+	0x8d21f440,
+	0x041ce0b7,
+	0xf401f7f0,
+	0xe0fc8d21,
+/* 0x0306: init */
+	0x04bd00f8,
+	0xf10004fe,
+	0xf0120017,
+	0x12d00227,
+	0x3e17f100,
+	0x0010fe04,
+	0x040017f1,
+	0xf0c010d0,
+	0x12d00427,
+	0x1031f400,
+	0x060817f1,
+	0xcf0614b6,
+	0x37f00012,
+	0x1f24f001,
+	0xb60432bb,
+	0x02800132,
+	0x04038003,
+	0x040010b7,
+	0x800012cf,
+	0x27f10002,
+	0x24b60800,
+	0x0022cf06,
+/* 0x035f: init_find_chipset */
+	0xb65817f0,
+	0x13980c10,
+	0x0432b800,
+	0xb00b0bf4,
+	0x1bf40034,
+/* 0x0373: init_context */
+	0xf100f8f1,
+	0xb6080027,
+	0x22cf0624,
+	0xf134bd40,
+	0xb6070047,
+	0x25950644,
+	0x0045d008,
+	0xbd4045d0,
+	0x58f4bde4,
+	0x1f58021e,
+	0x020e4003,
+	0xf5040f40,
+	0xbb013d21,
+	0x3fbb002f,
+	0x041e5800,
+	0x40051f58,
+	0x0f400a0e,
+	0x3d21f50c,
+	0x030e9801,
+	0xbb00effd,
+	0x3ebb002e,
+	0x0040b700,
+	0x0235b613,
+	0xb60043d0,
+	0x35b60825,
+	0x0120b606,
+	0xb60130b6,
+	0x34b60824,
+	0x022fb908,
+	0x026321f5,
+	0xf1003fbb,
+	0xb6080017,
+	0x13d00614,
+	0x0010b740,
+	0xf024bd08,
+	0x12d01f29,
+/* 0x0401: main */
+	0x0031f400,
+	0xf00028f4,
+	0x21f41cd7,
+	0xf401f439,
+	0xf404e4b0,
+	0x81fe1e18,
+	0x0627f001,
+	0x12fd20bd,
+	0x01e4b604,
+	0xfe051efd,
+	0x21f50018,
+	0x0ef404c3,
+/* 0x0431: main_not_ctx_xfer */
+	0x10ef94d3,
+	0xf501f5f0,
+	0xf402ec21,
+/* 0x043e: ih */
+	0x80f9c60e,
+	0xf90188fe,
+	0xf990f980,
+	0xf9b0f9a0,
+	0xf9e0f9d0,
+	0x800acff0,
+	0xf404abc4,
+	0xb7f11d0b,
+	0xd7f01900,
+	0x40becf1c,
+	0xf400bfcf,
+	0xb0b70421,
+	0xe7f00400,
+	0x00bed001,
+/* 0x0474: ih_no_fifo */
+	0xfc400ad0,
+	0xfce0fcf0,
+	0xfcb0fcd0,
+	0xfc90fca0,
+	0x0088fe80,
+	0x32f480fc,
+/* 0x048f: hub_barrier_done */
+	0xf001f800,
+	0x0e9801f7,
+	0x04febb00,
+	0x9418e7f1,
+	0xf440e3f0,
+	0x00f88d21,
+/* 0x04a4: ctx_redswitch */
+	0x0614e7f1,
+	0xf006e4b6,
+	0xefd020f7,
+	0x08f7f000,
+/* 0x04b4: ctx_redswitch_delay */
+	0xf401f2b6,
+	0xf7f1fd1b,
+	0xefd00a20,
+/* 0x04c3: ctx_xfer */
+	0xf100f800,
+	0xb60a0417,
+	0x1fd00614,
+	0x0711f400,
+	0x04a421f5,
+/* 0x04d4: ctx_xfer_not_load */
+	0x4afc17f1,
+	0xf00213f0,
+	0x12d00c27,
+	0x0721f500,
+	0xfc27f102,
+	0x0223f047,
+	0xf00020d0,
+	0x20b6012c,
+	0x0012d003,
+	0xf001acf0,
+	0xb7f002a5,
+	0x50b3f000,
+	0xb6000c98,
+	0xbcbb0fc4,
+	0x010c9800,
+	0xf0020d98,
+	0x21f500e7,
+	0xacf0015c,
+	0x04a5f001,
+	0x4000b7f1,
+	0x9850b3f0,
+	0xc4b6000c,
+	0x00bcbb0f,
+	0x98050c98,
+	0x0f98060d,
+	0x00e7f104,
+	0x5c21f508,
+	0x0721f501,
+	0x0601f402,
+/* 0x054b: ctx_xfer_post */
+	0xf11412f4,
+	0xf04afc17,
+	0x27f00213,
+	0x0012d00d,
+	0x020721f5,
+/* 0x055c: ctx_xfer_done */
+	0x048f21f5,
+	0x000000f8,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+};
diff --git a/drivers/gpu/drm/nouveau/nvc0_grhub.fuc b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnvc0.fuc
index 98acddb2c5bb..acfc457654bd 100644
--- a/drivers/gpu/drm/nouveau/nvc0_grhub.fuc
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnvc0.fuc
@@ -24,11 +24,11 @@
  */
 
 /* To build:
- *    m4 nvc0_grhub.fuc | envyas -a -w -m fuc -V nva3 -o nvc0_grhub.fuc.h
+ *    m4 hubnvc0.fuc | envyas -a -w -m fuc -V fuc3 -o hubnvc0.fuc.h
  */
 
 .section #nvc0_grhub_data
-include(`nvc0_graph.fuc')
+include(`nvc0.fuc')
 gpc_count:		.b32 0
 rop_count:		.b32 0
 cmd_queue:		queue_init
@@ -161,11 +161,11 @@ xfer_data: 		.b32 0
 .section #nvc0_grhub_code
 bra #init
 define(`include_code')
-include(`nvc0_graph.fuc')
+include(`nvc0.fuc')
 
 // reports an exception to the host
 //
-// In: $r15 error code (see nvc0_graph.fuc)
+// In: $r15 error code (see nvc0.fuc)
 //
 error:
 	push $r14
diff --git a/drivers/gpu/drm/nouveau/nvc0_grhub.fuc.h b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnvc0.fuc.h
index c5ed307abeb9..85a8d556f484 100644
--- a/drivers/gpu/drm/nouveau/nvc0_grhub.fuc.h
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnvc0.fuc.h
@@ -1,6 +1,9 @@
 uint32_t nvc0_grhub_data[] = {
+/* 0x0000: gpc_count */
 	0x00000000,
+/* 0x0004: rop_count */
 	0x00000000,
+/* 0x0008: cmd_queue */
 	0x00000000,
 	0x00000000,
 	0x00000000,
@@ -19,9 +22,13 @@ uint32_t nvc0_grhub_data[] = {
 	0x00000000,
 	0x00000000,
 	0x00000000,
+/* 0x0050: hub_mmio_list_head */
 	0x00000000,
+/* 0x0054: hub_mmio_list_tail */
 	0x00000000,
+/* 0x0058: ctx_current */
 	0x00000000,
+/* 0x005c: chipsets */
 	0x000000c0,
 	0x013c00a0,
 	0x000000c1,
@@ -39,6 +46,7 @@ uint32_t nvc0_grhub_data[] = {
 	0x000000d9,
 	0x01dc0140,
 	0x00000000,
+/* 0x00a0: nvc0_hub_mmio_head */
 	0x0417e91c,
 	0x04400204,
 	0x28404004,
@@ -78,7 +86,10 @@ uint32_t nvc0_grhub_data[] = {
 	0x08408800,
 	0x0c408900,
 	0x00408980,
+/* 0x013c: nvc0_hub_mmio_tail */
 	0x044064c0,
+/* 0x0140: nvc1_hub_mmio_tail */
+/* 0x0140: nvd9_hub_mmio_head */
 	0x0417e91c,
 	0x04400204,
 	0x24404004,
@@ -118,6 +129,7 @@ uint32_t nvc0_grhub_data[] = {
 	0x08408800,
 	0x0c408900,
 	0x00408980,
+/* 0x01dc: nvd9_hub_mmio_tail */
 	0x00000000,
 	0x00000000,
 	0x00000000,
@@ -127,7 +139,10 @@ uint32_t nvc0_grhub_data[] = {
 	0x00000000,
 	0x00000000,
 	0x00000000,
+/* 0x0200: chan_data */
+/* 0x0200: chan_mmio_count */
 	0x00000000,
+/* 0x0204: chan_mmio_address */
 	0x00000000,
 	0x00000000,
 	0x00000000,
@@ -191,17 +206,20 @@ uint32_t nvc0_grhub_data[] = {
 	0x00000000,
 	0x00000000,
 	0x00000000,
+/* 0x0300: xfer_data */
 	0x00000000,
 };
 
 uint32_t nvc0_grhub_code[] = {
 	0x03090ef5,
+/* 0x0004: queue_put */
 	0x9800d898,
 	0x86f001d9,
 	0x0489b808,
 	0xf00c1bf4,
 	0x21f502f7,
 	0x00f802ec,
+/* 0x001c: queue_put_next */
 	0xb60798c4,
 	0x8dbb0384,
 	0x0880b600,
@@ -209,6 +227,7 @@ uint32_t nvc0_grhub_code[] = {
 	0x90b6018f,
 	0x0f94f001,
 	0xf801d980,
+/* 0x0039: queue_get */
 	0x0131f400,
 	0x9800d898,
 	0x89b801d9,
@@ -220,37 +239,46 @@ uint32_t nvc0_grhub_code[] = {
 	0x80b6019f,
 	0x0f84f001,
 	0xf400d880,
+/* 0x0066: queue_get_done */
 	0x00f80132,
+/* 0x0068: nv_rd32 */
 	0x0728b7f1,
 	0xb906b4b6,
 	0xc9f002ec,
 	0x00bcd01f,
+/* 0x0078: nv_rd32_wait */
 	0xc800bccf,
 	0x1bf41fcc,
 	0x06a7f0fa,
 	0x010321f5,
 	0xf840bfcf,
+/* 0x008d: nv_wr32 */
 	0x28b7f100,
 	0x06b4b607,
 	0xb980bfd0,
 	0xc9f002ec,
 	0x1ec9f01f,
+/* 0x00a3: nv_wr32_wait */
 	0xcf00bcd0,
 	0xccc800bc,
 	0xfa1bf41f,
+/* 0x00ae: watchdog_reset */
 	0x87f100f8,
 	0x84b60430,
 	0x1ff9f006,
 	0xf8008fd0,
+/* 0x00bd: watchdog_clear */
 	0x3087f100,
 	0x0684b604,
 	0xf80080d0,
+/* 0x00c9: wait_donez */
 	0x3c87f100,
 	0x0684b608,
 	0x99f094bd,
 	0x0089d000,
 	0x081887f1,
 	0xd00684b6,
+/* 0x00e2: wait_done_wait_donez */
 	0x87f1008a,
 	0x84b60400,
 	0x0088cf06,
@@ -259,6 +287,7 @@ uint32_t nvc0_grhub_code[] = {
 	0x84b6085c,
 	0xf094bd06,
 	0x89d00099,
+/* 0x0103: wait_doneo */
 	0xf100f800,
 	0xb6083c87,
 	0x94bd0684,
@@ -266,6 +295,7 @@ uint32_t nvc0_grhub_code[] = {
 	0x87f10089,
 	0x84b60818,
 	0x008ad006,
+/* 0x011c: wait_done_wait_doneo */
 	0x040087f1,
 	0xcf0684b6,
 	0x8aff0088,
@@ -274,6 +304,8 @@ uint32_t nvc0_grhub_code[] = {
 	0xbd0684b6,
 	0x0099f094,
 	0xf80089d0,
+/* 0x013d: mmctx_size */
+/* 0x013f: nv_mmctx_size_loop */
 	0x9894bd00,
 	0x85b600e8,
 	0x0180b61a,
@@ -282,6 +314,7 @@ uint32_t nvc0_grhub_code[] = {
 	0x04efb804,
 	0xb9eb1bf4,
 	0x00f8029f,
+/* 0x015c: mmctx_xfer */
 	0x083c87f1,
 	0xbd0684b6,
 	0x0199f094,
@@ -291,9 +324,11 @@ uint32_t nvc0_grhub_code[] = {
 	0xf405bbfd,
 	0x8bd0090b,
 	0x0099f000,
+/* 0x0180: mmctx_base_disabled */
 	0xf405eefd,
 	0x8ed00c0b,
 	0xc08fd080,
+/* 0x018f: mmctx_multi_disabled */
 	0xb70199f0,
 	0xc8010080,
 	0xb4b600ab,
@@ -301,6 +336,8 @@ uint32_t nvc0_grhub_code[] = {
 	0xb601aec8,
 	0xbefd11e4,
 	0x008bd005,
+/* 0x01a8: mmctx_exec_loop */
+/* 0x01a8: mmctx_wait_free */
 	0xf0008ecf,
 	0x0bf41fe4,
 	0x00ce98fa,
@@ -309,34 +346,42 @@ uint32_t nvc0_grhub_code[] = {
 	0x04cdb804,
 	0xc8e81bf4,
 	0x1bf402ab,
+/* 0x01c9: mmctx_fini_wait */
 	0x008bcf18,
 	0xb01fb4f0,
 	0x1bf410b4,
 	0x02a7f0f7,
 	0xf4c921f4,
+/* 0x01de: mmctx_stop */
 	0xabc81b0e,
 	0x10b4b600,
 	0xf00cb9f0,
 	0x8bd012b9,
+/* 0x01ed: mmctx_stop_wait */
 	0x008bcf00,
 	0xf412bbc8,
+/* 0x01f6: mmctx_done */
 	0x87f1fa1b,
 	0x84b6085c,
 	0xf094bd06,
 	0x89d00199,
+/* 0x0207: strand_wait */
 	0xf900f800,
 	0x02a7f0a0,
 	0xfcc921f4,
+/* 0x0213: strand_pre */
 	0xf100f8a0,
 	0xf04afc87,
 	0x97f00283,
 	0x0089d00c,
 	0x020721f5,
+/* 0x0226: strand_post */
 	0x87f100f8,
 	0x83f04afc,
 	0x0d97f002,
 	0xf50089d0,
 	0xf8020721,
+/* 0x0239: strand_set */
 	0xfca7f100,
 	0x02a3f04f,
 	0x0500aba2,
@@ -347,6 +392,7 @@ uint32_t nvc0_grhub_code[] = {
 	0xf000aed0,
 	0xbcd00ac7,
 	0x0721f500,
+/* 0x0263: strand_ctx_init */
 	0xf100f802,
 	0xb6083c87,
 	0x94bd0684,
@@ -369,6 +415,7 @@ uint32_t nvc0_grhub_code[] = {
 	0x0684b608,
 	0xb70089cf,
 	0x95220080,
+/* 0x02ba: ctx_init_strand_loop */
 	0x8ed008fe,
 	0x408ed000,
 	0xb6808acf,
@@ -382,6 +429,7 @@ uint32_t nvc0_grhub_code[] = {
 	0x94bd0684,
 	0xd00399f0,
 	0x00f80089,
+/* 0x02ec: error */
 	0xe7f1e0f9,
 	0xe4b60814,
 	0x00efd006,
@@ -389,6 +437,7 @@ uint32_t nvc0_grhub_code[] = {
 	0xf006e4b6,
 	0xefd001f7,
 	0xf8e0fc00,
+/* 0x0309: init */
 	0xfe04bd00,
 	0x07fe0004,
 	0x0017f100,
@@ -429,11 +478,13 @@ uint32_t nvc0_grhub_code[] = {
 	0x080027f1,
 	0xcf0624b6,
 	0xf7f00022,
+/* 0x03a9: init_find_chipset */
 	0x08f0b654,
 	0xb800f398,
 	0x0bf40432,
 	0x0034b00b,
 	0xf8f11bf4,
+/* 0x03bd: init_context */
 	0x0017f100,
 	0x02fe5801,
 	0xf003ff58,
@@ -454,6 +505,7 @@ uint32_t nvc0_grhub_code[] = {
 	0x001fbb02,
 	0xf1000398,
 	0xf0200047,
+/* 0x040e: init_gpc */
 	0x4ea05043,
 	0x1fb90804,
 	0x8d21f402,
@@ -467,6 +519,7 @@ uint32_t nvc0_grhub_code[] = {
 	0xf7f00100,
 	0x8d21f402,
 	0x08004ea0,
+/* 0x0440: init_gpc_wait */
 	0xc86821f4,
 	0x0bf41fff,
 	0x044ea0fa,
@@ -479,6 +532,7 @@ uint32_t nvc0_grhub_code[] = {
 	0xb74021d0,
 	0xbd080020,
 	0x1f19f014,
+/* 0x0473: main */
 	0xf40021d0,
 	0x28f40031,
 	0x08d7f000,
@@ -517,6 +571,7 @@ uint32_t nvc0_grhub_code[] = {
 	0x94bd0684,
 	0xd00699f0,
 	0x0ef40089,
+/* 0x0509: chsw_prev_no_next */
 	0xb920f931,
 	0x32f40212,
 	0x0232f401,
@@ -524,10 +579,12 @@ uint32_t nvc0_grhub_code[] = {
 	0x17f120fc,
 	0x14b60b00,
 	0x0012d006,
+/* 0x0527: chsw_no_prev */
 	0xc8130ef4,
 	0x0bf41f23,
 	0x0131f40d,
 	0xf50232f4,
+/* 0x0537: chsw_done */
 	0xf1082921,
 	0xb60b0c17,
 	0x27f00614,
@@ -536,10 +593,12 @@ uint32_t nvc0_grhub_code[] = {
 	0xbd0684b6,
 	0x0499f094,
 	0xf50089d0,
+/* 0x0557: main_not_ctx_switch */
 	0xb0ff200e,
 	0x1bf401e4,
 	0x02f2b90d,
 	0x07b521f5,
+/* 0x0567: main_not_ctx_chan */
 	0xb0420ef4,
 	0x1bf402e4,
 	0x3c87f12e,
@@ -553,14 +612,17 @@ uint32_t nvc0_grhub_code[] = {
 	0xf094bd06,
 	0x89d00799,
 	0x110ef400,
+/* 0x0598: main_not_ctx_save */
 	0xf010ef94,
 	0x21f501f5,
 	0x0ef502ec,
+/* 0x05a6: main_done */
 	0x17f1fed1,
 	0x14b60820,
 	0xf024bd06,
 	0x12d01f29,
 	0xbe0ef500,
+/* 0x05b9: ih */
 	0xfe80f9fe,
 	0x80f90188,
 	0xa0f990f9,
@@ -574,16 +636,19 @@ uint32_t nvc0_grhub_code[] = {
 	0x21f400bf,
 	0x00b0b704,
 	0x01e7f004,
+/* 0x05ef: ih_no_fifo */
 	0xe400bed0,
 	0xf40100ab,
 	0xd7f00d0b,
 	0x01e7f108,
 	0x0421f440,
+/* 0x0600: ih_no_ctxsw */
 	0x0104b7f1,
 	0xabffb0bd,
 	0x0d0bf4b4,
 	0x0c1ca7f1,
 	0xd006a4b6,
+/* 0x0616: ih_no_other */
 	0x0ad000ab,
 	0xfcf0fc40,
 	0xfcd0fce0,
@@ -591,32 +656,40 @@ uint32_t nvc0_grhub_code[] = {
 	0xfe80fc90,
 	0x80fc0088,
 	0xf80032f4,
+/* 0x0631: ctx_4160s */
 	0x60e7f101,
 	0x40e3f041,
 	0xf401f7f0,
+/* 0x063e: ctx_4160s_wait */
 	0x21f48d21,
 	0x04ffc868,
 	0xf8fa0bf4,
+/* 0x0649: ctx_4160c */
 	0x60e7f100,
 	0x40e3f041,
 	0x21f4f4bd,
+/* 0x0657: ctx_4170s */
 	0xf100f88d,
 	0xf04170e7,
 	0xf5f040e3,
 	0x8d21f410,
+/* 0x0666: ctx_4170w */
 	0xe7f100f8,
 	0xe3f04170,
 	0x6821f440,
 	0xf410f4f0,
 	0x00f8f31b,
+/* 0x0678: ctx_redswitch */
 	0x0614e7f1,
 	0xf106e4b6,
 	0xd00270f7,
 	0xf7f000ef,
+/* 0x0689: ctx_redswitch_delay */
 	0x01f2b608,
 	0xf1fd1bf4,
 	0xd00770f7,
 	0x00f800ef,
+/* 0x0698: ctx_86c */
 	0x086ce7f1,
 	0xd006e4b6,
 	0xe7f100ef,
@@ -625,6 +698,7 @@ uint32_t nvc0_grhub_code[] = {
 	0xa86ce7f1,
 	0xf441e3f0,
 	0x00f88d21,
+/* 0x06b8: ctx_load */
 	0x083c87f1,
 	0xbd0684b6,
 	0x0599f094,
@@ -639,6 +713,7 @@ uint32_t nvc0_grhub_code[] = {
 	0x0614b60a,
 	0xd00747f0,
 	0x14d00012,
+/* 0x06f1: ctx_chan_wait_0 */
 	0x4014cf40,
 	0xf41f44f0,
 	0x32d0fa1b,
@@ -688,6 +763,7 @@ uint32_t nvc0_grhub_code[] = {
 	0xbd0684b6,
 	0x0599f094,
 	0xf80089d0,
+/* 0x07b5: ctx_chan */
 	0x3121f500,
 	0xb821f506,
 	0x0ca7f006,
@@ -695,39 +771,48 @@ uint32_t nvc0_grhub_code[] = {
 	0xb60a1017,
 	0x27f00614,
 	0x0012d005,
+/* 0x07d0: ctx_chan_wait */
 	0xfd0012cf,
 	0x1bf40522,
 	0x4921f5fa,
+/* 0x07df: ctx_mmio_exec */
 	0x9800f806,
 	0x27f18103,
 	0x24b60a04,
 	0x0023d006,
+/* 0x07ee: ctx_mmio_loop */
 	0x34c434bd,
 	0x0f1bf4ff,
 	0x030057f1,
 	0xfa0653f0,
 	0x03f80535,
+/* 0x0800: ctx_mmio_pull */
 	0x98c04e98,
 	0x21f4c14f,
 	0x0830b68d,
 	0xf40112b6,
+/* 0x0812: ctx_mmio_done */
 	0x0398df1b,
 	0x0023d016,
 	0xf1800080,
 	0xf0020017,
 	0x01fa0613,
 	0xf803f806,
+/* 0x0829: ctx_xfer */
 	0x0611f400,
+/* 0x082f: ctx_xfer_pre */
 	0xf01102f4,
 	0x21f510f7,
 	0x21f50698,
 	0x11f40631,
+/* 0x083d: ctx_xfer_pre_load */
 	0x02f7f01c,
 	0x065721f5,
 	0x066621f5,
 	0x067821f5,
 	0x21f5f4bd,
 	0x21f50657,
+/* 0x0856: ctx_xfer_exec */
 	0x019806b8,
 	0x1427f116,
 	0x0624b604,
@@ -762,9 +847,11 @@ uint32_t nvc0_grhub_code[] = {
 	0x0a1017f1,
 	0xf00614b6,
 	0x12d00527,
+/* 0x08dd: ctx_xfer_post_save_wait */
 	0x0012cf00,
 	0xf40522fd,
 	0x02f4fa1b,
+/* 0x08e9: ctx_xfer_post */
 	0x02f7f032,
 	0x065721f5,
 	0x21f5f4bd,
@@ -776,7 +863,9 @@ uint32_t nvc0_grhub_code[] = {
 	0x11fd8001,
 	0x070bf405,
 	0x07df21f5,
+/* 0x0914: ctx_xfer_no_post_mmio */
 	0x064921f5,
+/* 0x0918: ctx_xfer_done */
 	0x000000f8,
 	0x00000000,
 	0x00000000,
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnve0.fuc b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnve0.fuc
new file mode 100644
index 000000000000..138eeaa28665
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnve0.fuc
@@ -0,0 +1,780 @@
+/* fuc microcode for nve0 PGRAPH/HUB
+ *
+ * Copyright 2011 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+/* To build:
+ *    m4 nve0_grhub.fuc | envyas -a -w -m fuc -V nva3 -o nve0_grhub.fuc.h
+ */
+
+.section #nve0_grhub_data
+include(`nve0.fuc')
+gpc_count:		.b32 0
+rop_count:		.b32 0
+cmd_queue:		queue_init
+hub_mmio_list_head:	.b32 0
+hub_mmio_list_tail:	.b32 0
+
+ctx_current:		.b32 0
+
+chipsets:
+.b8  0xe4 0 0 0
+.b16 #nve4_hub_mmio_head
+.b16 #nve4_hub_mmio_tail
+.b8  0xe7 0 0 0
+.b16 #nve4_hub_mmio_head
+.b16 #nve4_hub_mmio_tail
+.b8  0 0 0 0
+
+nve4_hub_mmio_head:
+mmctx_data(0x17e91c, 2)
+mmctx_data(0x400204, 2)
+mmctx_data(0x404010, 7)
+mmctx_data(0x4040a8, 9)
+mmctx_data(0x4040d0, 7)
+mmctx_data(0x4040f8, 1)
+mmctx_data(0x404130, 3)
+mmctx_data(0x404150, 3)
+mmctx_data(0x404164, 1)
+mmctx_data(0x4041a0, 4)
+mmctx_data(0x404200, 4)
+mmctx_data(0x404404, 14)
+mmctx_data(0x404460, 4)
+mmctx_data(0x404480, 1)
+mmctx_data(0x404498, 1)
+mmctx_data(0x404604, 4)
+mmctx_data(0x404618, 4)
+mmctx_data(0x40462c, 2)
+mmctx_data(0x404640, 1)
+mmctx_data(0x404654, 1)
+mmctx_data(0x404660, 1)
+mmctx_data(0x404678, 19)
+mmctx_data(0x4046c8, 3)
+mmctx_data(0x404700, 3)
+mmctx_data(0x404718, 10)
+mmctx_data(0x404744, 2)
+mmctx_data(0x404754, 1)
+mmctx_data(0x405800, 1)
+mmctx_data(0x405830, 3)
+mmctx_data(0x405854, 1)
+mmctx_data(0x405870, 4)
+mmctx_data(0x405a00, 2)
+mmctx_data(0x405a18, 1)
+mmctx_data(0x405b00, 1)
+mmctx_data(0x405b10, 1)
+mmctx_data(0x406020, 1)
+mmctx_data(0x406028, 4)
+mmctx_data(0x4064a8, 2)
+mmctx_data(0x4064b4, 2)
+mmctx_data(0x4064c0, 12)
+mmctx_data(0x4064fc, 1)
+mmctx_data(0x407040, 1)
+mmctx_data(0x407804, 1)
+mmctx_data(0x40780c, 6)
+mmctx_data(0x4078bc, 1)
+mmctx_data(0x408000, 7)
+mmctx_data(0x408064, 1)
+mmctx_data(0x408800, 3)
+mmctx_data(0x408840, 1)
+mmctx_data(0x408900, 3)
+mmctx_data(0x408980, 1)
+nve4_hub_mmio_tail:
+
+.align 256
+chan_data:
+chan_mmio_count:	.b32 0
+chan_mmio_address:	.b32 0
+
+.align 256
+xfer_data: 		.b32 0
+
+.section #nve0_grhub_code
+bra #init
+define(`include_code')
+include(`nve0.fuc')
+
+// reports an exception to the host
+//
+// In: $r15 error code (see nve0.fuc)
+//
+error:
+	push $r14
+	mov $r14 0x814
+	shl b32 $r14 6
+	iowr I[$r14 + 0x000] $r15	// CC_SCRATCH[5] = error code
+	mov $r14 0xc1c
+	shl b32 $r14 6
+	mov $r15 1
+	iowr I[$r14 + 0x000] $r15	// INTR_UP_SET
+	pop $r14
+	ret
+
+// HUB fuc initialisation, executed by triggering ucode start, will
+// fall through to main loop after completion.
+//
+// Input:
+//   CC_SCRATCH[0]: chipset (PMC_BOOT_0 read returns 0x0bad0bad... sigh)
+//
+// Output:
+//   CC_SCRATCH[0]:
+//	     31:31: set to signal completion
+//   CC_SCRATCH[1]:
+//	      31:0: total PGRAPH context size
+//
+init:
+	clear b32 $r0
+	mov $sp $r0
+	mov $xdbase $r0
+
+	// enable fifo access
+	mov $r1 0x1200
+	mov $r2 2
+	iowr I[$r1 + 0x000] $r2	// FIFO_ENABLE
+
+	// setup i0 handler, and route all interrupts to it
+	mov $r1 #ih
+	mov $iv0 $r1
+	mov $r1 0x400
+	iowr I[$r1 + 0x300] $r0	// INTR_DISPATCH
+
+	// route HUB_CHANNEL_SWITCH to fuc interrupt 8
+	mov $r3 0x404
+	shl b32 $r3 6
+	mov $r2 0x2003		// { HUB_CHANNEL_SWITCH, ZERO } -> intr 8
+	iowr I[$r3 + 0x000] $r2
+
+	// not sure what these are, route them because NVIDIA does, and
+	// the IRQ handler will signal the host if we ever get one.. we
+	// may find out if/why we need to handle these if so..
+	//
+	mov $r2 0x2004
+	iowr I[$r3 + 0x004] $r2 // { 0x04, ZERO } -> intr 9
+	mov $r2 0x200b
+	iowr I[$r3 + 0x008] $r2 // { 0x0b, ZERO } -> intr 10
+	mov $r2 0x200c
+	iowr I[$r3 + 0x01c] $r2 // { 0x0c, ZERO } -> intr 15
+
+	// enable all INTR_UP interrupts
+	mov $r2 0xc24
+	shl b32 $r2 6
+	not b32 $r3 $r0
+	iowr I[$r2] $r3
+
+	// enable fifo, ctxsw, 9, 10, 15 interrupts
+	mov $r2 -0x78fc		// 0x8704
+	sethi $r2 0
+	iowr I[$r1 + 0x000] $r2	// INTR_EN_SET
+
+	// fifo level triggered, rest edge
+	sub b32 $r1 0x100
+	mov $r2 4
+	iowr I[$r1] $r2
+
+	// enable interrupts
+	bset $flags ie0
+
+	// fetch enabled GPC/ROP counts
+	mov $r14 -0x69fc	// 0x409604
+	sethi $r14 0x400000
+	call #nv_rd32
+	extr $r1 $r15 16:20
+	st b32 D[$r0 + #rop_count] $r1
+	and $r15 0x1f
+	st b32 D[$r0 + #gpc_count] $r15
+
+	// set BAR_REQMASK to GPC mask
+	mov $r1 1
+	shl b32 $r1 $r15
+	sub b32 $r1 1
+	mov $r2 0x40c
+	shl b32 $r2 6
+	iowr I[$r2 + 0x000] $r1
+	iowr I[$r2 + 0x100] $r1
+
+	// find context data for this chipset
+	mov $r2 0x800
+	shl b32 $r2 6
+	iord $r2 I[$r2 + 0x000]		// CC_SCRATCH[0]
+	mov $r15 #chipsets - 8
+	init_find_chipset:
+		add b32 $r15 8
+		ld b32 $r3 D[$r15 + 0x00]
+		cmpu b32 $r3 $r2
+		bra e #init_context
+		cmpu b32 $r3 0
+		bra ne #init_find_chipset
+		// unknown chipset
+		ret
+
+	// context size calculation, reserve first 256 bytes for use by fuc
+	init_context:
+	mov $r1 256
+
+	// calculate size of mmio context data
+	ld b16 $r14 D[$r15 + 4]
+	ld b16 $r15 D[$r15 + 6]
+	sethi $r14 0
+	st b32 D[$r0 + #hub_mmio_list_head] $r14
+	st b32 D[$r0 + #hub_mmio_list_tail] $r15
+	call #mmctx_size
+
+	// set mmctx base addresses now so we don't have to do it later,
+	// they don't (currently) ever change
+	mov $r3 0x700
+	shl b32 $r3 6
+	shr b32 $r4 $r1 8
+	iowr I[$r3 + 0x000] $r4		// MMCTX_SAVE_SWBASE
+	iowr I[$r3 + 0x100] $r4		// MMCTX_LOAD_SWBASE
+	add b32 $r3 0x1300
+	add b32 $r1 $r15
+	shr b32 $r15 2
+	iowr I[$r3 + 0x000] $r15	// MMCTX_LOAD_COUNT, wtf for?!?
+
+	// strands, base offset needs to be aligned to 256 bytes
+	shr b32 $r1 8
+	add b32 $r1 1
+	shl b32 $r1 8
+	mov b32 $r15 $r1
+	call #strand_ctx_init
+	add b32 $r1 $r15
+
+	// initialise each GPC in sequence by passing in the offset of its
+	// context data in GPCn_CC_SCRATCH[1], and starting its FUC (which
+	// has previously been uploaded by the host) running.
+	//
+	// the GPC fuc init sequence will set GPCn_CC_SCRATCH[0] bit 31
+	// when it has completed, and return the size of its context data
+	// in GPCn_CC_SCRATCH[1]
+	//
+	ld b32 $r3 D[$r0 + #gpc_count]
+	mov $r4 0x2000
+	sethi $r4 0x500000
+	init_gpc:
+		// setup, and start GPC ucode running
+		add b32 $r14 $r4 0x804
+		mov b32 $r15 $r1
+		call #nv_wr32			// CC_SCRATCH[1] = ctx offset
+		add b32 $r14 $r4 0x800
+		mov b32 $r15 $r2
+		call #nv_wr32			// CC_SCRATCH[0] = chipset
+		add b32 $r14 $r4 0x10c
+		clear b32 $r15
+		call #nv_wr32
+		add b32 $r14 $r4 0x104
+		call #nv_wr32			// ENTRY
+		add b32 $r14 $r4 0x100
+		mov $r15 2			// CTRL_START_TRIGGER
+		call #nv_wr32			// CTRL
+
+		// wait for it to complete, and adjust context size
+		add b32 $r14 $r4 0x800
+		init_gpc_wait:
+			call #nv_rd32
+			xbit $r15 $r15 31
+			bra e #init_gpc_wait
+		add b32 $r14 $r4 0x804
+		call #nv_rd32
+		add b32 $r1 $r15
+
+		// next!
+		add b32 $r4 0x8000
+		sub b32 $r3 1
+		bra ne #init_gpc
+
+	// save context size, and tell host we're ready
+	mov $r2 0x800
+	shl b32 $r2 6
+	iowr I[$r2 + 0x100] $r1		// CC_SCRATCH[1]  = context size
+	add b32 $r2 0x800
+	clear b32 $r1
+	bset $r1 31
+	iowr I[$r2 + 0x000] $r1		// CC_SCRATCH[0] |= 0x80000000
+
+// Main program loop, very simple, sleeps until woken up by the interrupt
+// handler, pulls a command from the queue and executes its handler
+//
+main:
+	// sleep until we have something to do
+	bset $flags $p0
+	sleep $p0
+	mov $r13 #cmd_queue
+	call #queue_get
+	bra $p1 #main
+
+	// context switch, requested by GPU?
+	cmpu b32 $r14 0x4001
+	bra ne #main_not_ctx_switch
+		trace_set(T_AUTO)
+		mov $r1 0xb00
+		shl b32 $r1 6
+		iord $r2 I[$r1 + 0x100]		// CHAN_NEXT
+		iord $r1 I[$r1 + 0x000]		// CHAN_CUR
+
+		xbit $r3 $r1 31
+		bra e #chsw_no_prev
+			xbit $r3 $r2 31
+			bra e #chsw_prev_no_next
+				push $r2
+				mov b32 $r2 $r1
+				trace_set(T_SAVE)
+				bclr $flags $p1
+				bset $flags $p2
+				call #ctx_xfer
+				trace_clr(T_SAVE);
+				pop $r2
+				trace_set(T_LOAD);
+				bset $flags $p1
+				call #ctx_xfer
+				trace_clr(T_LOAD);
+				bra #chsw_done
+			chsw_prev_no_next:
+				push $r2
+				mov b32 $r2 $r1
+				bclr $flags $p1
+				bclr $flags $p2
+				call #ctx_xfer
+				pop $r2
+				mov $r1 0xb00
+				shl b32 $r1 6
+				iowr I[$r1] $r2
+				bra #chsw_done
+		chsw_no_prev:
+			xbit $r3 $r2 31
+			bra e #chsw_done
+				bset $flags $p1
+				bclr $flags $p2
+				call #ctx_xfer
+
+		// ack the context switch request
+		chsw_done:
+		mov $r1 0xb0c
+		shl b32 $r1 6
+		mov $r2 1
+		iowr I[$r1 + 0x000] $r2		// 0x409b0c
+		trace_clr(T_AUTO)
+		bra #main
+
+	// request to set current channel? (*not* a context switch)
+	main_not_ctx_switch:
+	cmpu b32 $r14 0x0001
+	bra ne #main_not_ctx_chan
+		mov b32 $r2 $r15
+		call #ctx_chan
+		bra #main_done
+
+	// request to store current channel context?
+	main_not_ctx_chan:
+	cmpu b32 $r14 0x0002
+	bra ne #main_not_ctx_save
+		trace_set(T_SAVE)
+		bclr $flags $p1
+		bclr $flags $p2
+		call #ctx_xfer
+		trace_clr(T_SAVE)
+		bra #main_done
+
+	main_not_ctx_save:
+		shl b32 $r15 $r14 16
+		or $r15 E_BAD_COMMAND
+		call #error
+		bra #main
+
+	main_done:
+	mov $r1 0x820
+	shl b32 $r1 6
+	clear b32 $r2
+	bset $r2 31
+	iowr I[$r1 + 0x000] $r2		// CC_SCRATCH[0] |= 0x80000000
+	bra #main
+
+// interrupt handler
+ih:
+	push $r8
+	mov $r8 $flags
+	push $r8
+	push $r9
+	push $r10
+	push $r11
+	push $r13
+	push $r14
+	push $r15
+
+	// incoming fifo command?
+	iord $r10 I[$r0 + 0x200]	// INTR
+	and $r11 $r10 0x00000004
+	bra e #ih_no_fifo
+		// queue incoming fifo command for later processing
+		mov $r11 0x1900
+		mov $r13 #cmd_queue
+		iord $r14 I[$r11 + 0x100]	// FIFO_CMD
+		iord $r15 I[$r11 + 0x000]	// FIFO_DATA
+		call #queue_put
+		add b32 $r11 0x400
+		mov $r14 1
+		iowr I[$r11 + 0x000] $r14	// FIFO_ACK
+
+	// context switch request?
+	ih_no_fifo:
+	and $r11 $r10 0x00000100
+	bra e #ih_no_ctxsw
+		// enqueue a context switch for later processing
+		mov $r13 #cmd_queue
+		mov $r14 0x4001
+		call #queue_put
+
+	// anything we didn't handle, bring it to the host's attention
+	ih_no_ctxsw:
+	mov $r11 0x104
+	not b32 $r11
+	and $r11 $r10 $r11
+	bra e #ih_no_other
+		mov $r10 0xc1c
+		shl b32 $r10 6
+		iowr I[$r10] $r11	// INTR_UP_SET
+
+	// ack, and wake up main()
+	ih_no_other:
+	iowr I[$r0 + 0x100] $r10	// INTR_ACK
+
+	pop $r15
+	pop $r14
+	pop $r13
+	pop $r11
+	pop $r10
+	pop $r9
+	pop $r8
+	mov $flags $r8
+	pop $r8
+	bclr $flags $p0
+	iret
+
+// Again, not real sure
+//
+// In: $r15 value to set 0x404170 to
+//
+ctx_4170s:
+	mov $r14 0x4170
+	sethi $r14 0x400000
+	or $r15 0x10
+	call #nv_wr32
+	ret
+
+// Waits for a ctx_4170s() call to complete
+//
+ctx_4170w:
+	mov $r14 0x4170
+	sethi $r14 0x400000
+	call #nv_rd32
+	and $r15 0x10
+	bra ne #ctx_4170w
+	ret
+
+// Disables various things, waits a bit, and re-enables them..
+//
+// Not sure how exactly this helps, perhaps "ENABLE" is not such a
+// good description for the bits we turn off?  Anyways, without this,
+// funny things happen.
+//
+ctx_redswitch:
+	mov $r14 0x614
+	shl b32 $r14 6
+	mov $r15 0x270
+	iowr I[$r14] $r15	// HUB_RED_SWITCH = ENABLE_GPC, POWER_ALL
+	mov $r15 8
+	ctx_redswitch_delay:
+		sub b32 $r15 1
+		bra ne #ctx_redswitch_delay
+	mov $r15 0x770
+	iowr I[$r14] $r15	// HUB_RED_SWITCH = ENABLE_ALL, POWER_ALL
+	ret
+
+// Not a clue what this is for, except that unless the value is 0x10, the
+// strand context is saved (and presumably restored) incorrectly..
+//
+// In: $r15 value to set to (0x00/0x10 are used)
+//
+ctx_86c:
+	mov $r14 0x86c
+	shl b32 $r14 6
+	iowr I[$r14] $r15	// HUB(0x86c) = val
+	mov $r14 -0x75ec
+	sethi $r14 0x400000
+	call #nv_wr32		// ROP(0xa14) = val
+	mov $r14 -0x5794
+	sethi $r14 0x410000
+	call #nv_wr32		// GPC(0x86c) = val
+	ret
+
+// ctx_load - load's a channel's ctxctl data, and selects its vm
+//
+// In: $r2 channel address
+//
+ctx_load:
+	trace_set(T_CHAN)
+
+	// switch to channel, somewhat magic in parts..
+	mov $r10 12		// DONE_UNK12
+	call #wait_donez
+	mov $r1 0xa24
+	shl b32 $r1 6
+	iowr I[$r1 + 0x000] $r0	// 0x409a24
+	mov $r3 0xb00
+	shl b32 $r3 6
+	iowr I[$r3 + 0x100] $r2	// CHAN_NEXT
+	mov $r1 0xa0c
+	shl b32 $r1 6
+	mov $r4 7
+	iowr I[$r1 + 0x000] $r2 // MEM_CHAN
+	iowr I[$r1 + 0x100] $r4	// MEM_CMD
+	ctx_chan_wait_0:
+		iord $r4 I[$r1 + 0x100]
+		and $r4 0x1f
+		bra ne #ctx_chan_wait_0
+	iowr I[$r3 + 0x000] $r2	// CHAN_CUR
+
+	// load channel header, fetch PGRAPH context pointer
+	mov $xtargets $r0
+	bclr $r2 31
+	shl b32 $r2 4
+	add b32 $r2 2
+
+	trace_set(T_LCHAN)
+	mov $r1 0xa04
+	shl b32 $r1 6
+	iowr I[$r1 + 0x000] $r2		// MEM_BASE
+	mov $r1 0xa20
+	shl b32 $r1 6
+	mov $r2 0x0002
+	sethi $r2 0x80000000
+	iowr I[$r1 + 0x000] $r2		// MEM_TARGET = vram
+	mov $r1 0x10			// chan + 0x0210
+	mov $r2 #xfer_data
+	sethi $r2 0x00020000		// 16 bytes
+	xdld $r1 $r2
+	xdwait
+	trace_clr(T_LCHAN)
+
+	// update current context
+	ld b32 $r1 D[$r0 + #xfer_data + 4]
+	shl b32 $r1 24
+	ld b32 $r2 D[$r0 + #xfer_data + 0]
+	shr b32 $r2 8
+	or $r1 $r2
+	st b32 D[$r0 + #ctx_current] $r1
+
+	// set transfer base to start of context, and fetch context header
+	trace_set(T_LCTXH)
+	mov $r2 0xa04
+	shl b32 $r2 6
+	iowr I[$r2 + 0x000] $r1		// MEM_BASE
+	mov $r2 1
+	mov $r1 0xa20
+	shl b32 $r1 6
+	iowr I[$r1 + 0x000] $r2		// MEM_TARGET = vm
+	mov $r1 #chan_data
+	sethi $r1 0x00060000		// 256 bytes
+	xdld $r0 $r1
+	xdwait
+	trace_clr(T_LCTXH)
+
+	trace_clr(T_CHAN)
+	ret
+
+// ctx_chan - handler for HUB_SET_CHAN command, will set a channel as
+//            the active channel for ctxctl, but not actually transfer
+//            any context data.  intended for use only during initial
+//            context construction.
+//
+// In: $r2 channel address
+//
+ctx_chan:
+	call #ctx_load
+	mov $r10 12			// DONE_UNK12
+	call #wait_donez
+	mov $r1 0xa10
+	shl b32 $r1 6
+	mov $r2 5
+	iowr I[$r1 + 0x000] $r2		// MEM_CMD = 5 (???)
+	ctx_chan_wait:
+		iord $r2 I[$r1 + 0x000]
+		or $r2 $r2
+		bra ne #ctx_chan_wait
+	ret
+
+// Execute per-context state overrides list
+//
+// Only executed on the first load of a channel.  Might want to look into
+// removing this and having the host directly modify the channel's context
+// to change this state...  The nouveau DRM already builds this list as
+// it's definitely needed for NVIDIA's, so we may as well use it for now
+//
+// Input: $r1 mmio list length
+//
+ctx_mmio_exec:
+	// set transfer base to be the mmio list
+	ld b32 $r3 D[$r0 + #chan_mmio_address]
+	mov $r2 0xa04
+	shl b32 $r2 6
+	iowr I[$r2 + 0x000] $r3		// MEM_BASE
+
+	clear b32 $r3
+	ctx_mmio_loop:
+		// fetch next 256 bytes of mmio list if necessary
+		and $r4 $r3 0xff
+		bra ne #ctx_mmio_pull
+			mov $r5 #xfer_data
+			sethi $r5 0x00060000	// 256 bytes
+			xdld $r3 $r5
+			xdwait
+
+		// execute a single list entry
+		ctx_mmio_pull:
+		ld b32 $r14 D[$r4 + #xfer_data + 0x00]
+		ld b32 $r15 D[$r4 + #xfer_data + 0x04]
+		call #nv_wr32
+
+		// next!
+		add b32 $r3 8
+		sub b32 $r1 1
+		bra ne #ctx_mmio_loop
+
+	// set transfer base back to the current context
+	ctx_mmio_done:
+	ld b32 $r3 D[$r0 + #ctx_current]
+	iowr I[$r2 + 0x000] $r3		// MEM_BASE
+
+	// disable the mmio list now, we don't need/want to execute it again
+	st b32 D[$r0 + #chan_mmio_count] $r0
+	mov $r1 #chan_data
+	sethi $r1 0x00060000		// 256 bytes
+	xdst $r0 $r1
+	xdwait
+	ret
+
+// Transfer HUB context data between GPU and storage area
+//
+// In: $r2 channel address
+//     $p1 clear on save, set on load
+//     $p2 set if opposite direction done/will be done, so:
+//		on save it means: "a load will follow this save"
+//		on load it means: "a save preceeded this load"
+//
+ctx_xfer:
+	bra not $p1 #ctx_xfer_pre
+	bra $p2 #ctx_xfer_pre_load
+	ctx_xfer_pre:
+		mov $r15 0x10
+		call #ctx_86c
+		bra not $p1 #ctx_xfer_exec
+
+	ctx_xfer_pre_load:
+		mov $r15 2
+		call #ctx_4170s
+		call #ctx_4170w
+		call #ctx_redswitch
+		clear b32 $r15
+		call #ctx_4170s
+		call #ctx_load
+
+	// fetch context pointer, and initiate xfer on all GPCs
+	ctx_xfer_exec:
+	ld b32 $r1 D[$r0 + #ctx_current]
+	mov $r2 0x414
+	shl b32 $r2 6
+	iowr I[$r2 + 0x000] $r0	// BAR_STATUS = reset
+	mov $r14 -0x5b00
+	sethi $r14 0x410000
+	mov b32 $r15 $r1
+	call #nv_wr32		// GPC_BCAST_WRCMD_DATA = ctx pointer
+	add b32 $r14 4
+	xbit $r15 $flags $p1
+	xbit $r2 $flags $p2
+	shl b32 $r2 1
+	or $r15 $r2
+	call #nv_wr32		// GPC_BCAST_WRCMD_CMD = GPC_XFER(type)
+
+	// strands
+	mov $r1 0x4afc
+	sethi $r1 0x20000
+	mov $r2 0xc
+	iowr I[$r1] $r2		// STRAND_CMD(0x3f) = 0x0c
+	call #strand_wait
+	mov $r2 0x47fc
+	sethi $r2 0x20000
+	iowr I[$r2] $r0		// STRAND_FIRST_GENE(0x3f) = 0x00
+	xbit $r2 $flags $p1
+	add b32 $r2 3
+	iowr I[$r1] $r2		// STRAND_CMD(0x3f) = 0x03/0x04 (SAVE/LOAD)
+
+	// mmio context
+	xbit $r10 $flags $p1	// direction
+	or $r10 6		// first, last
+	mov $r11 0		// base = 0
+	ld b32 $r12 D[$r0 + #hub_mmio_list_head]
+	ld b32 $r13 D[$r0 + #hub_mmio_list_tail]
+	mov $r14 0		// not multi
+	call #mmctx_xfer
+
+	// wait for GPCs to all complete
+	mov $r10 8		// DONE_BAR
+	call #wait_doneo
+
+	// wait for strand xfer to complete
+	call #strand_wait
+
+	// post-op
+	bra $p1 #ctx_xfer_post
+		mov $r10 12		// DONE_UNK12
+		call #wait_donez
+		mov $r1 0xa10
+		shl b32 $r1 6
+		mov $r2 5
+		iowr I[$r1] $r2		// MEM_CMD
+		ctx_xfer_post_save_wait:
+			iord $r2 I[$r1]
+			or $r2 $r2
+			bra ne #ctx_xfer_post_save_wait
+
+	bra $p2 #ctx_xfer_done
+	ctx_xfer_post:
+		mov $r15 2
+		call #ctx_4170s
+		clear b32 $r15
+		call #ctx_86c
+		call #strand_post
+		call #ctx_4170w
+		clear b32 $r15
+		call #ctx_4170s
+
+		bra not $p1 #ctx_xfer_no_post_mmio
+		ld b32 $r1 D[$r0 + #chan_mmio_count]
+		or $r1 $r1
+		bra e #ctx_xfer_no_post_mmio
+			call #ctx_mmio_exec
+
+		ctx_xfer_no_post_mmio:
+
+	ctx_xfer_done:
+	ret
+
+.align 256
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnve0.fuc.h b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnve0.fuc.h
new file mode 100644
index 000000000000..decf0c60ca3b
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnve0.fuc.h
@@ -0,0 +1,857 @@
+uint32_t nve0_grhub_data[] = {
+/* 0x0000: gpc_count */
+	0x00000000,
+/* 0x0004: rop_count */
+	0x00000000,
+/* 0x0008: cmd_queue */
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+/* 0x0050: hub_mmio_list_head */
+	0x00000000,
+/* 0x0054: hub_mmio_list_tail */
+	0x00000000,
+/* 0x0058: ctx_current */
+	0x00000000,
+/* 0x005c: chipsets */
+	0x000000e4,
+	0x013c0070,
+	0x000000e7,
+	0x013c0070,
+	0x00000000,
+/* 0x0070: nve4_hub_mmio_head */
+	0x0417e91c,
+	0x04400204,
+	0x18404010,
+	0x204040a8,
+	0x184040d0,
+	0x004040f8,
+	0x08404130,
+	0x08404150,
+	0x00404164,
+	0x0c4041a0,
+	0x0c404200,
+	0x34404404,
+	0x0c404460,
+	0x00404480,
+	0x00404498,
+	0x0c404604,
+	0x0c404618,
+	0x0440462c,
+	0x00404640,
+	0x00404654,
+	0x00404660,
+	0x48404678,
+	0x084046c8,
+	0x08404700,
+	0x24404718,
+	0x04404744,
+	0x00404754,
+	0x00405800,
+	0x08405830,
+	0x00405854,
+	0x0c405870,
+	0x04405a00,
+	0x00405a18,
+	0x00405b00,
+	0x00405b10,
+	0x00406020,
+	0x0c406028,
+	0x044064a8,
+	0x044064b4,
+	0x2c4064c0,
+	0x004064fc,
+	0x00407040,
+	0x00407804,
+	0x1440780c,
+	0x004078bc,
+	0x18408000,
+	0x00408064,
+	0x08408800,
+	0x00408840,
+	0x08408900,
+	0x00408980,
+/* 0x013c: nve4_hub_mmio_tail */
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+/* 0x0200: chan_data */
+/* 0x0200: chan_mmio_count */
+	0x00000000,
+/* 0x0204: chan_mmio_address */
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+/* 0x0300: xfer_data */
+	0x00000000,
+};
+
+uint32_t nve0_grhub_code[] = {
+	0x03090ef5,
+/* 0x0004: queue_put */
+	0x9800d898,
+	0x86f001d9,
+	0x0489b808,
+	0xf00c1bf4,
+	0x21f502f7,
+	0x00f802ec,
+/* 0x001c: queue_put_next */
+	0xb60798c4,
+	0x8dbb0384,
+	0x0880b600,
+	0x80008e80,
+	0x90b6018f,
+	0x0f94f001,
+	0xf801d980,
+/* 0x0039: queue_get */
+	0x0131f400,
+	0x9800d898,
+	0x89b801d9,
+	0x210bf404,
+	0xb60789c4,
+	0x9dbb0394,
+	0x0890b600,
+	0x98009e98,
+	0x80b6019f,
+	0x0f84f001,
+	0xf400d880,
+/* 0x0066: queue_get_done */
+	0x00f80132,
+/* 0x0068: nv_rd32 */
+	0x0728b7f1,
+	0xb906b4b6,
+	0xc9f002ec,
+	0x00bcd01f,
+/* 0x0078: nv_rd32_wait */
+	0xc800bccf,
+	0x1bf41fcc,
+	0x06a7f0fa,
+	0x010321f5,
+	0xf840bfcf,
+/* 0x008d: nv_wr32 */
+	0x28b7f100,
+	0x06b4b607,
+	0xb980bfd0,
+	0xc9f002ec,
+	0x1ec9f01f,
+/* 0x00a3: nv_wr32_wait */
+	0xcf00bcd0,
+	0xccc800bc,
+	0xfa1bf41f,
+/* 0x00ae: watchdog_reset */
+	0x87f100f8,
+	0x84b60430,
+	0x1ff9f006,
+	0xf8008fd0,
+/* 0x00bd: watchdog_clear */
+	0x3087f100,
+	0x0684b604,
+	0xf80080d0,
+/* 0x00c9: wait_donez */
+	0x3c87f100,
+	0x0684b608,
+	0x99f094bd,
+	0x0089d000,
+	0x081887f1,
+	0xd00684b6,
+/* 0x00e2: wait_done_wait_donez */
+	0x87f1008a,
+	0x84b60400,
+	0x0088cf06,
+	0xf4888aff,
+	0x87f1f31b,
+	0x84b6085c,
+	0xf094bd06,
+	0x89d00099,
+/* 0x0103: wait_doneo */
+	0xf100f800,
+	0xb6083c87,
+	0x94bd0684,
+	0xd00099f0,
+	0x87f10089,
+	0x84b60818,
+	0x008ad006,
+/* 0x011c: wait_done_wait_doneo */
+	0x040087f1,
+	0xcf0684b6,
+	0x8aff0088,
+	0xf30bf488,
+	0x085c87f1,
+	0xbd0684b6,
+	0x0099f094,
+	0xf80089d0,
+/* 0x013d: mmctx_size */
+/* 0x013f: nv_mmctx_size_loop */
+	0x9894bd00,
+	0x85b600e8,
+	0x0180b61a,
+	0xbb0284b6,
+	0xe0b60098,
+	0x04efb804,
+	0xb9eb1bf4,
+	0x00f8029f,
+/* 0x015c: mmctx_xfer */
+	0x083c87f1,
+	0xbd0684b6,
+	0x0199f094,
+	0xf10089d0,
+	0xb6071087,
+	0x94bd0684,
+	0xf405bbfd,
+	0x8bd0090b,
+	0x0099f000,
+/* 0x0180: mmctx_base_disabled */
+	0xf405eefd,
+	0x8ed00c0b,
+	0xc08fd080,
+/* 0x018f: mmctx_multi_disabled */
+	0xb70199f0,
+	0xc8010080,
+	0xb4b600ab,
+	0x0cb9f010,
+	0xb601aec8,
+	0xbefd11e4,
+	0x008bd005,
+/* 0x01a8: mmctx_exec_loop */
+/* 0x01a8: mmctx_wait_free */
+	0xf0008ecf,
+	0x0bf41fe4,
+	0x00ce98fa,
+	0xd005e9fd,
+	0xc0b6c08e,
+	0x04cdb804,
+	0xc8e81bf4,
+	0x1bf402ab,
+/* 0x01c9: mmctx_fini_wait */
+	0x008bcf18,
+	0xb01fb4f0,
+	0x1bf410b4,
+	0x02a7f0f7,
+	0xf4c921f4,
+/* 0x01de: mmctx_stop */
+	0xabc81b0e,
+	0x10b4b600,
+	0xf00cb9f0,
+	0x8bd012b9,
+/* 0x01ed: mmctx_stop_wait */
+	0x008bcf00,
+	0xf412bbc8,
+/* 0x01f6: mmctx_done */
+	0x87f1fa1b,
+	0x84b6085c,
+	0xf094bd06,
+	0x89d00199,
+/* 0x0207: strand_wait */
+	0xf900f800,
+	0x02a7f0a0,
+	0xfcc921f4,
+/* 0x0213: strand_pre */
+	0xf100f8a0,
+	0xf04afc87,
+	0x97f00283,
+	0x0089d00c,
+	0x020721f5,
+/* 0x0226: strand_post */
+	0x87f100f8,
+	0x83f04afc,
+	0x0d97f002,
+	0xf50089d0,
+	0xf8020721,
+/* 0x0239: strand_set */
+	0xfca7f100,
+	0x02a3f04f,
+	0x0500aba2,
+	0xd00fc7f0,
+	0xc7f000ac,
+	0x00bcd00b,
+	0x020721f5,
+	0xf000aed0,
+	0xbcd00ac7,
+	0x0721f500,
+/* 0x0263: strand_ctx_init */
+	0xf100f802,
+	0xb6083c87,
+	0x94bd0684,
+	0xd00399f0,
+	0x21f50089,
+	0xe7f00213,
+	0x3921f503,
+	0xfca7f102,
+	0x02a3f046,
+	0x0400aba0,
+	0xf040a0d0,
+	0xbcd001c7,
+	0x0721f500,
+	0x010c9202,
+	0xf000acd0,
+	0xbcd002c7,
+	0x0721f500,
+	0x2621f502,
+	0x8087f102,
+	0x0684b608,
+	0xb70089cf,
+	0x95220080,
+/* 0x02ba: ctx_init_strand_loop */
+	0x8ed008fe,
+	0x408ed000,
+	0xb6808acf,
+	0xa0b606a5,
+	0x00eabb01,
+	0xb60480b6,
+	0x1bf40192,
+	0x08e4b6e8,
+	0xf1f2efbc,
+	0xb6085c87,
+	0x94bd0684,
+	0xd00399f0,
+	0x00f80089,
+/* 0x02ec: error */
+	0xe7f1e0f9,
+	0xe4b60814,
+	0x00efd006,
+	0x0c1ce7f1,
+	0xf006e4b6,
+	0xefd001f7,
+	0xf8e0fc00,
+/* 0x0309: init */
+	0xfe04bd00,
+	0x07fe0004,
+	0x0017f100,
+	0x0227f012,
+	0xf10012d0,
+	0xfe05b917,
+	0x17f10010,
+	0x10d00400,
+	0x0437f1c0,
+	0x0634b604,
+	0x200327f1,
+	0xf10032d0,
+	0xd0200427,
+	0x27f10132,
+	0x32d0200b,
+	0x0c27f102,
+	0x0732d020,
+	0x0c2427f1,
+	0xb90624b6,
+	0x23d00003,
+	0x0427f100,
+	0x0023f087,
+	0xb70012d0,
+	0xf0010012,
+	0x12d00427,
+	0x1031f400,
+	0x9604e7f1,
+	0xf440e3f0,
+	0xf1c76821,
+	0x01018090,
+	0x801ff4f0,
+	0x17f0000f,
+	0x041fbb01,
+	0xf10112b6,
+	0xb6040c27,
+	0x21d00624,
+	0x4021d000,
+	0x080027f1,
+	0xcf0624b6,
+	0xf7f00022,
+/* 0x03a9: init_find_chipset */
+	0x08f0b654,
+	0xb800f398,
+	0x0bf40432,
+	0x0034b00b,
+	0xf8f11bf4,
+/* 0x03bd: init_context */
+	0x0017f100,
+	0x02fe5801,
+	0xf003ff58,
+	0x0e8000e3,
+	0x150f8014,
+	0x013d21f5,
+	0x070037f1,
+	0x950634b6,
+	0x34d00814,
+	0x4034d000,
+	0x130030b7,
+	0xb6001fbb,
+	0x3fd002f5,
+	0x0815b600,
+	0xb60110b6,
+	0x1fb90814,
+	0x6321f502,
+	0x001fbb02,
+	0xf1000398,
+	0xf0200047,
+/* 0x040e: init_gpc */
+	0x4ea05043,
+	0x1fb90804,
+	0x8d21f402,
+	0x08004ea0,
+	0xf4022fb9,
+	0x4ea08d21,
+	0xf4bd010c,
+	0xa08d21f4,
+	0xf401044e,
+	0x4ea08d21,
+	0xf7f00100,
+	0x8d21f402,
+	0x08004ea0,
+/* 0x0440: init_gpc_wait */
+	0xc86821f4,
+	0x0bf41fff,
+	0x044ea0fa,
+	0x6821f408,
+	0xb7001fbb,
+	0xb6800040,
+	0x1bf40132,
+	0x0027f1b4,
+	0x0624b608,
+	0xb74021d0,
+	0xbd080020,
+	0x1f19f014,
+/* 0x0473: main */
+	0xf40021d0,
+	0x28f40031,
+	0x08d7f000,
+	0xf43921f4,
+	0xe4b1f401,
+	0x1bf54001,
+	0x87f100d1,
+	0x84b6083c,
+	0xf094bd06,
+	0x89d00499,
+	0x0017f100,
+	0x0614b60b,
+	0xcf4012cf,
+	0x13c80011,
+	0x7e0bf41f,
+	0xf41f23c8,
+	0x20f95a0b,
+	0xf10212b9,
+	0xb6083c87,
+	0x94bd0684,
+	0xd00799f0,
+	0x32f40089,
+	0x0231f401,
+	0x07fb21f5,
+	0x085c87f1,
+	0xbd0684b6,
+	0x0799f094,
+	0xfc0089d0,
+	0x3c87f120,
+	0x0684b608,
+	0x99f094bd,
+	0x0089d006,
+	0xf50131f4,
+	0xf107fb21,
+	0xb6085c87,
+	0x94bd0684,
+	0xd00699f0,
+	0x0ef40089,
+/* 0x0509: chsw_prev_no_next */
+	0xb920f931,
+	0x32f40212,
+	0x0232f401,
+	0x07fb21f5,
+	0x17f120fc,
+	0x14b60b00,
+	0x0012d006,
+/* 0x0527: chsw_no_prev */
+	0xc8130ef4,
+	0x0bf41f23,
+	0x0131f40d,
+	0xf50232f4,
+/* 0x0537: chsw_done */
+	0xf107fb21,
+	0xb60b0c17,
+	0x27f00614,
+	0x0012d001,
+	0x085c87f1,
+	0xbd0684b6,
+	0x0499f094,
+	0xf50089d0,
+/* 0x0557: main_not_ctx_switch */
+	0xb0ff200e,
+	0x1bf401e4,
+	0x02f2b90d,
+	0x078f21f5,
+/* 0x0567: main_not_ctx_chan */
+	0xb0420ef4,
+	0x1bf402e4,
+	0x3c87f12e,
+	0x0684b608,
+	0x99f094bd,
+	0x0089d007,
+	0xf40132f4,
+	0x21f50232,
+	0x87f107fb,
+	0x84b6085c,
+	0xf094bd06,
+	0x89d00799,
+	0x110ef400,
+/* 0x0598: main_not_ctx_save */
+	0xf010ef94,
+	0x21f501f5,
+	0x0ef502ec,
+/* 0x05a6: main_done */
+	0x17f1fed1,
+	0x14b60820,
+	0xf024bd06,
+	0x12d01f29,
+	0xbe0ef500,
+/* 0x05b9: ih */
+	0xfe80f9fe,
+	0x80f90188,
+	0xa0f990f9,
+	0xd0f9b0f9,
+	0xf0f9e0f9,
+	0xc4800acf,
+	0x0bf404ab,
+	0x00b7f11d,
+	0x08d7f019,
+	0xcf40becf,
+	0x21f400bf,
+	0x00b0b704,
+	0x01e7f004,
+/* 0x05ef: ih_no_fifo */
+	0xe400bed0,
+	0xf40100ab,
+	0xd7f00d0b,
+	0x01e7f108,
+	0x0421f440,
+/* 0x0600: ih_no_ctxsw */
+	0x0104b7f1,
+	0xabffb0bd,
+	0x0d0bf4b4,
+	0x0c1ca7f1,
+	0xd006a4b6,
+/* 0x0616: ih_no_other */
+	0x0ad000ab,
+	0xfcf0fc40,
+	0xfcd0fce0,
+	0xfca0fcb0,
+	0xfe80fc90,
+	0x80fc0088,
+	0xf80032f4,
+/* 0x0631: ctx_4170s */
+	0x70e7f101,
+	0x40e3f041,
+	0xf410f5f0,
+	0x00f88d21,
+/* 0x0640: ctx_4170w */
+	0x4170e7f1,
+	0xf440e3f0,
+	0xf4f06821,
+	0xf31bf410,
+/* 0x0652: ctx_redswitch */
+	0xe7f100f8,
+	0xe4b60614,
+	0x70f7f106,
+	0x00efd002,
+/* 0x0663: ctx_redswitch_delay */
+	0xb608f7f0,
+	0x1bf401f2,
+	0x70f7f1fd,
+	0x00efd007,
+/* 0x0672: ctx_86c */
+	0xe7f100f8,
+	0xe4b6086c,
+	0x00efd006,
+	0x8a14e7f1,
+	0xf440e3f0,
+	0xe7f18d21,
+	0xe3f0a86c,
+	0x8d21f441,
+/* 0x0692: ctx_load */
+	0x87f100f8,
+	0x84b6083c,
+	0xf094bd06,
+	0x89d00599,
+	0x0ca7f000,
+	0xf1c921f4,
+	0xb60a2417,
+	0x10d00614,
+	0x0037f100,
+	0x0634b60b,
+	0xf14032d0,
+	0xb60a0c17,
+	0x47f00614,
+	0x0012d007,
+/* 0x06cb: ctx_chan_wait_0 */
+	0xcf4014d0,
+	0x44f04014,
+	0xfa1bf41f,
+	0xfe0032d0,
+	0x2af0000b,
+	0x0424b61f,
+	0xf10220b6,
+	0xb6083c87,
+	0x94bd0684,
+	0xd00899f0,
+	0x17f10089,
+	0x14b60a04,
+	0x0012d006,
+	0x0a2017f1,
+	0xf00614b6,
+	0x23f10227,
+	0x12d08000,
+	0x1017f000,
+	0x030027f1,
+	0xfa0223f0,
+	0x03f80512,
+	0x085c87f1,
+	0xbd0684b6,
+	0x0899f094,
+	0x980089d0,
+	0x14b6c101,
+	0xc0029818,
+	0xfd0825b6,
+	0x01800512,
+	0x3c87f116,
+	0x0684b608,
+	0x99f094bd,
+	0x0089d009,
+	0x0a0427f1,
+	0xd00624b6,
+	0x27f00021,
+	0x2017f101,
+	0x0614b60a,
+	0xf10012d0,
+	0xf0020017,
+	0x01fa0613,
+	0xf103f805,
+	0xb6085c87,
+	0x94bd0684,
+	0xd00999f0,
+	0x87f10089,
+	0x84b6085c,
+	0xf094bd06,
+	0x89d00599,
+/* 0x078f: ctx_chan */
+	0xf500f800,
+	0xf0069221,
+	0x21f40ca7,
+	0x1017f1c9,
+	0x0614b60a,
+	0xd00527f0,
+/* 0x07a6: ctx_chan_wait */
+	0x12cf0012,
+	0x0522fd00,
+	0xf8fa1bf4,
+/* 0x07b1: ctx_mmio_exec */
+	0x81039800,
+	0x0a0427f1,
+	0xd00624b6,
+	0x34bd0023,
+/* 0x07c0: ctx_mmio_loop */
+	0xf4ff34c4,
+	0x57f10f1b,
+	0x53f00300,
+	0x0535fa06,
+/* 0x07d2: ctx_mmio_pull */
+	0x4e9803f8,
+	0xc14f98c0,
+	0xb68d21f4,
+	0x12b60830,
+	0xdf1bf401,
+/* 0x07e4: ctx_mmio_done */
+	0xd0160398,
+	0x00800023,
+	0x0017f180,
+	0x0613f002,
+	0xf80601fa,
+/* 0x07fb: ctx_xfer */
+	0xf400f803,
+	0x02f40611,
+/* 0x0801: ctx_xfer_pre */
+	0x10f7f00d,
+	0x067221f5,
+/* 0x080b: ctx_xfer_pre_load */
+	0xf01c11f4,
+	0x21f502f7,
+	0x21f50631,
+	0x21f50640,
+	0xf4bd0652,
+	0x063121f5,
+	0x069221f5,
+/* 0x0824: ctx_xfer_exec */
+	0xf1160198,
+	0xb6041427,
+	0x20d00624,
+	0x00e7f100,
+	0x41e3f0a5,
+	0xf4021fb9,
+	0xe0b68d21,
+	0x01fcf004,
+	0xb6022cf0,
+	0xf2fd0124,
+	0x8d21f405,
+	0x4afc17f1,
+	0xf00213f0,
+	0x12d00c27,
+	0x0721f500,
+	0xfc27f102,
+	0x0223f047,
+	0xf00020d0,
+	0x20b6012c,
+	0x0012d003,
+	0xf001acf0,
+	0xb7f006a5,
+	0x140c9800,
+	0xf0150d98,
+	0x21f500e7,
+	0xa7f0015c,
+	0x0321f508,
+	0x0721f501,
+	0x2201f402,
+	0xf40ca7f0,
+	0x17f1c921,
+	0x14b60a10,
+	0x0527f006,
+/* 0x08ab: ctx_xfer_post_save_wait */
+	0xcf0012d0,
+	0x22fd0012,
+	0xfa1bf405,
+/* 0x08b7: ctx_xfer_post */
+	0xf02e02f4,
+	0x21f502f7,
+	0xf4bd0631,
+	0x067221f5,
+	0x022621f5,
+	0x064021f5,
+	0x21f5f4bd,
+	0x11f40631,
+	0x80019810,
+	0xf40511fd,
+	0x21f5070b,
+/* 0x08e2: ctx_xfer_no_post_mmio */
+/* 0x08e2: ctx_xfer_done */
+	0x00f807b1,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+};
diff --git a/drivers/gpu/drm/nouveau/nvc0_graph.fuc b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/nvc0.fuc
index e6b228844a32..e6b228844a32 100644
--- a/drivers/gpu/drm/nouveau/nvc0_graph.fuc
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/nvc0.fuc
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/nve0.fuc b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/nve0.fuc
new file mode 100644
index 000000000000..f16a5d53319d
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/nve0.fuc
@@ -0,0 +1,400 @@
+/* fuc microcode util functions for nve0 PGRAPH
+ *
+ * Copyright 2011 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+define(`mmctx_data', `.b32 eval((($2 - 1) << 26) | $1)')
+define(`queue_init', `.skip eval((2 * 4) + ((8 * 4) * 2))')
+
+ifdef(`include_code', `
+// Error codes
+define(`E_BAD_COMMAND', 0x01)
+define(`E_CMD_OVERFLOW', 0x02)
+
+// Util macros to help with debugging ucode hangs etc
+define(`T_WAIT', 0)
+define(`T_MMCTX', 1)
+define(`T_STRWAIT', 2)
+define(`T_STRINIT', 3)
+define(`T_AUTO', 4)
+define(`T_CHAN', 5)
+define(`T_LOAD', 6)
+define(`T_SAVE', 7)
+define(`T_LCHAN', 8)
+define(`T_LCTXH', 9)
+
+define(`trace_set', `
+	mov $r8 0x83c
+	shl b32 $r8 6
+	clear b32 $r9
+	bset $r9 $1
+	iowr I[$r8 + 0x000] $r9		// CC_SCRATCH[7]
+')
+
+define(`trace_clr', `
+	mov $r8 0x85c
+	shl b32 $r8 6
+	clear b32 $r9
+	bset $r9 $1
+	iowr I[$r8 + 0x000] $r9		// CC_SCRATCH[7]
+')
+
+// queue_put - add request to queue
+//
+// In : $r13 queue pointer
+//	$r14 command
+//	$r15 data
+//
+queue_put:
+	// make sure we have space..
+	ld b32 $r8 D[$r13 + 0x0]	// GET
+	ld b32 $r9 D[$r13 + 0x4]	// PUT
+	xor $r8 8
+	cmpu b32 $r8 $r9
+	bra ne #queue_put_next
+		mov $r15 E_CMD_OVERFLOW
+		call #error
+		ret
+
+	// store cmd/data on queue
+	queue_put_next:
+	and $r8 $r9 7
+	shl b32 $r8 3
+	add b32 $r8 $r13
+	add b32 $r8 8
+	st b32 D[$r8 + 0x0] $r14
+	st b32 D[$r8 + 0x4] $r15
+
+	// update PUT
+	add b32 $r9 1
+	and $r9 0xf
+	st b32 D[$r13 + 0x4] $r9
+	ret
+
+// queue_get - fetch request from queue
+//
+// In : $r13 queue pointer
+//
+// Out:	$p1  clear on success (data available)
+//	$r14 command
+// 	$r15 data
+//
+queue_get:
+	bset $flags $p1
+	ld b32 $r8 D[$r13 + 0x0]	// GET
+	ld b32 $r9 D[$r13 + 0x4]	// PUT
+	cmpu b32 $r8 $r9
+	bra e #queue_get_done
+		// fetch first cmd/data pair
+		and $r9 $r8 7
+		shl b32 $r9 3
+		add b32 $r9 $r13
+		add b32 $r9 8
+		ld b32 $r14 D[$r9 + 0x0]
+		ld b32 $r15 D[$r9 + 0x4]
+
+		// update GET
+		add b32 $r8 1
+		and $r8 0xf
+		st b32 D[$r13 + 0x0] $r8
+		bclr $flags $p1
+queue_get_done:
+	ret
+
+// nv_rd32 - read 32-bit value from nv register
+//
+// In : $r14 register
+// Out: $r15 value
+//
+nv_rd32:
+	mov $r11 0x728
+	shl b32 $r11 6
+	mov b32 $r12 $r14
+	bset $r12 31			// MMIO_CTRL_PENDING
+	iowr I[$r11 + 0x000] $r12	// MMIO_CTRL
+	nv_rd32_wait:
+		iord $r12 I[$r11 + 0x000]
+		xbit $r12 $r12 31
+		bra ne #nv_rd32_wait
+	mov $r10 6			// DONE_MMIO_RD
+	call #wait_doneo
+	iord $r15 I[$r11 + 0x100]	// MMIO_RDVAL
+	ret
+
+// nv_wr32 - write 32-bit value to nv register
+//
+// In : $r14 register
+//      $r15 value
+//
+nv_wr32:
+	mov $r11 0x728
+	shl b32 $r11 6
+	iowr I[$r11 + 0x200] $r15	// MMIO_WRVAL
+	mov b32 $r12 $r14
+	bset $r12 31			// MMIO_CTRL_PENDING
+	bset $r12 30			// MMIO_CTRL_WRITE
+	iowr I[$r11 + 0x000] $r12	// MMIO_CTRL
+	nv_wr32_wait:
+		iord $r12 I[$r11 + 0x000]
+		xbit $r12 $r12 31
+		bra ne #nv_wr32_wait
+	ret
+
+// (re)set watchdog timer
+//
+// In : $r15 timeout
+//
+watchdog_reset:
+	mov $r8 0x430
+	shl b32 $r8 6
+	bset $r15 31
+	iowr I[$r8 + 0x000] $r15
+	ret
+
+// clear watchdog timer
+watchdog_clear:
+	mov $r8 0x430
+	shl b32 $r8 6
+	iowr I[$r8 + 0x000] $r0
+	ret
+
+// wait_done{z,o} - wait on FUC_DONE bit to become clear/set
+//
+// In : $r10 bit to wait on
+//
+define(`wait_done', `
+$1:
+	trace_set(T_WAIT);
+	mov $r8 0x818
+	shl b32 $r8 6
+	iowr I[$r8 + 0x000] $r10	// CC_SCRATCH[6] = wait bit
+	wait_done_$1:
+		mov $r8 0x400
+		shl b32 $r8 6
+		iord $r8 I[$r8 + 0x000]	// DONE
+		xbit $r8 $r8 $r10
+		bra $2 #wait_done_$1
+	trace_clr(T_WAIT)
+	ret
+')
+wait_done(wait_donez, ne)
+wait_done(wait_doneo, e)
+
+// mmctx_size - determine size of a mmio list transfer
+//
+// In : $r14 mmio list head
+//      $r15 mmio list tail
+// Out: $r15 transfer size (in bytes)
+//
+mmctx_size:
+	clear b32 $r9
+	nv_mmctx_size_loop:
+		ld b32 $r8 D[$r14]
+		shr b32 $r8 26
+		add b32 $r8 1
+		shl b32 $r8 2
+		add b32 $r9 $r8
+		add b32 $r14 4
+		cmpu b32 $r14 $r15
+		bra ne #nv_mmctx_size_loop
+	mov b32 $r15 $r9
+	ret
+
+// mmctx_xfer - execute a list of mmio transfers
+//
+// In : $r10 flags
+//		bit 0: direction (0 = save, 1 = load)
+//		bit 1: set if first transfer
+//		bit 2: set if last transfer
+//	$r11 base
+//	$r12 mmio list head
+//	$r13 mmio list tail
+//	$r14 multi_stride
+//	$r15 multi_mask
+//
+mmctx_xfer:
+	trace_set(T_MMCTX)
+	mov $r8 0x710
+	shl b32 $r8 6
+	clear b32 $r9
+	or $r11 $r11
+	bra e #mmctx_base_disabled
+		iowr I[$r8 + 0x000] $r11	// MMCTX_BASE
+		bset $r9 0			// BASE_EN
+	mmctx_base_disabled:
+	or $r14 $r14
+	bra e #mmctx_multi_disabled
+		iowr I[$r8 + 0x200] $r14 	// MMCTX_MULTI_STRIDE
+		iowr I[$r8 + 0x300] $r15 	// MMCTX_MULTI_MASK
+		bset $r9 1			// MULTI_EN
+	mmctx_multi_disabled:
+	add b32 $r8 0x100
+
+	xbit $r11 $r10 0
+	shl b32 $r11 16			// DIR
+	bset $r11 12			// QLIMIT = 0x10
+	xbit $r14 $r10 1
+	shl b32 $r14 17
+	or $r11 $r14			// START_TRIGGER
+	iowr I[$r8 + 0x000] $r11	// MMCTX_CTRL
+
+	// loop over the mmio list, and send requests to the hw
+	mmctx_exec_loop:
+		// wait for space in mmctx queue
+		mmctx_wait_free:
+			iord $r14 I[$r8 + 0x000] // MMCTX_CTRL
+			and $r14 0x1f
+			bra e #mmctx_wait_free
+
+		// queue up an entry
+		ld b32 $r14 D[$r12]
+		or $r14 $r9
+		iowr I[$r8 + 0x300] $r14
+		add b32 $r12 4
+		cmpu b32 $r12 $r13
+		bra ne #mmctx_exec_loop
+
+	xbit $r11 $r10 2
+	bra ne #mmctx_stop
+		// wait for queue to empty
+		mmctx_fini_wait:
+			iord $r11 I[$r8 + 0x000]	// MMCTX_CTRL
+			and $r11 0x1f
+			cmpu b32 $r11 0x10
+			bra ne #mmctx_fini_wait
+		mov $r10 2				// DONE_MMCTX
+		call #wait_donez
+		bra #mmctx_done
+	mmctx_stop:
+		xbit $r11 $r10 0
+		shl b32 $r11 16			// DIR
+		bset $r11 12			// QLIMIT = 0x10
+		bset $r11 18			// STOP_TRIGGER
+		iowr I[$r8 + 0x000] $r11	// MMCTX_CTRL
+		mmctx_stop_wait:
+			// wait for STOP_TRIGGER to clear
+			iord $r11 I[$r8 + 0x000] // MMCTX_CTRL
+			xbit $r11 $r11 18
+			bra ne #mmctx_stop_wait
+	mmctx_done:
+	trace_clr(T_MMCTX)
+	ret
+
+// Wait for DONE_STRAND
+//
+strand_wait:
+	push $r10
+	mov $r10 2
+	call #wait_donez
+	pop $r10
+	ret
+
+// unknown - call before issuing strand commands
+//
+strand_pre:
+	mov $r8 0x4afc
+	sethi $r8 0x20000
+	mov $r9 0xc
+	iowr I[$r8] $r9
+	call #strand_wait
+	ret
+
+// unknown - call after issuing strand commands
+//
+strand_post:
+	mov $r8 0x4afc
+	sethi $r8 0x20000
+	mov $r9 0xd
+	iowr I[$r8] $r9
+	call #strand_wait
+	ret
+
+// Selects strand set?!
+//
+// In: $r14 id
+//
+strand_set:
+	mov $r10 0x4ffc
+	sethi $r10 0x20000
+	sub b32 $r11 $r10 0x500
+	mov $r12 0xf
+	iowr I[$r10 + 0x000] $r12		// 0x93c = 0xf
+	mov $r12 0xb
+	iowr I[$r11 + 0x000] $r12		// 0x928 = 0xb
+	call #strand_wait
+	iowr I[$r10 + 0x000] $r14		// 0x93c = <id>
+	mov $r12 0xa
+	iowr I[$r11 + 0x000] $r12		// 0x928 = 0xa
+	call #strand_wait
+	ret
+
+// Initialise strand context data
+//
+// In : $r15 context base
+// Out: $r15 context size (in bytes)
+//
+// Strandset(?) 3 hardcoded currently
+//
+strand_ctx_init:
+	trace_set(T_STRINIT)
+	call #strand_pre
+	mov $r14 3
+	call #strand_set
+	mov $r10 0x46fc
+	sethi $r10 0x20000
+	add b32 $r11 $r10 0x400
+	iowr I[$r10 + 0x100] $r0	// STRAND_FIRST_GENE = 0
+	mov $r12 1
+	iowr I[$r11 + 0x000] $r12	// STRAND_CMD = LATCH_FIRST_GENE
+	call #strand_wait
+	sub b32 $r12 $r0 1
+	iowr I[$r10 + 0x000] $r12	// STRAND_GENE_CNT = 0xffffffff
+	mov $r12 2
+	iowr I[$r11 + 0x000] $r12	// STRAND_CMD = LATCH_GENE_CNT
+	call #strand_wait
+	call #strand_post
+
+	// read the size of each strand, poke the context offset of
+	// each into STRAND_{SAVE,LOAD}_SWBASE now, no need to worry
+	// about it later then.
+	mov $r8 0x880
+	shl b32 $r8 6
+	iord $r9 I[$r8 + 0x000]		// STRANDS
+	add b32 $r8 0x2200
+	shr b32 $r14 $r15 8
+	ctx_init_strand_loop:
+		iowr I[$r8 + 0x000] $r14	// STRAND_SAVE_SWBASE
+		iowr I[$r8 + 0x100] $r14	// STRAND_LOAD_SWBASE
+		iord $r10 I[$r8 + 0x200]	// STRAND_SIZE
+		shr b32 $r10 6
+		add b32 $r10 1
+		add b32 $r14 $r10
+		add b32 $r8 4
+		sub b32 $r9 1
+		bra ne #ctx_init_strand_loop
+
+	shl b32 $r14 8
+	sub b32 $r15 $r14 $r15
+	trace_clr(T_STRINIT)
+	ret
+')
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nv04.c b/drivers/gpu/drm/nouveau/core/engine/graph/nv04.c
new file mode 100644
index 000000000000..618528248457
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nv04.c
@@ -0,0 +1,1387 @@
+/*
+ * Copyright 2007 Stephane Marchesin
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#include <core/os.h>
+#include <core/class.h>
+#include <core/handle.h>
+#include <core/namedb.h>
+
+#include <subdev/fb.h>
+#include <subdev/instmem.h>
+#include <subdev/timer.h>
+
+#include <engine/fifo.h>
+#include <engine/graph.h>
+
+#include "regs.h"
+
+static u32
+nv04_graph_ctx_regs[] = {
+	0x0040053c,
+	0x00400544,
+	0x00400540,
+	0x00400548,
+	NV04_PGRAPH_CTX_SWITCH1,
+	NV04_PGRAPH_CTX_SWITCH2,
+	NV04_PGRAPH_CTX_SWITCH3,
+	NV04_PGRAPH_CTX_SWITCH4,
+	NV04_PGRAPH_CTX_CACHE1,
+	NV04_PGRAPH_CTX_CACHE2,
+	NV04_PGRAPH_CTX_CACHE3,
+	NV04_PGRAPH_CTX_CACHE4,
+	0x00400184,
+	0x004001a4,
+	0x004001c4,
+	0x004001e4,
+	0x00400188,
+	0x004001a8,
+	0x004001c8,
+	0x004001e8,
+	0x0040018c,
+	0x004001ac,
+	0x004001cc,
+	0x004001ec,
+	0x00400190,
+	0x004001b0,
+	0x004001d0,
+	0x004001f0,
+	0x00400194,
+	0x004001b4,
+	0x004001d4,
+	0x004001f4,
+	0x00400198,
+	0x004001b8,
+	0x004001d8,
+	0x004001f8,
+	0x0040019c,
+	0x004001bc,
+	0x004001dc,
+	0x004001fc,
+	0x00400174,
+	NV04_PGRAPH_DMA_START_0,
+	NV04_PGRAPH_DMA_START_1,
+	NV04_PGRAPH_DMA_LENGTH,
+	NV04_PGRAPH_DMA_MISC,
+	NV04_PGRAPH_DMA_PITCH,
+	NV04_PGRAPH_BOFFSET0,
+	NV04_PGRAPH_BBASE0,
+	NV04_PGRAPH_BLIMIT0,
+	NV04_PGRAPH_BOFFSET1,
+	NV04_PGRAPH_BBASE1,
+	NV04_PGRAPH_BLIMIT1,
+	NV04_PGRAPH_BOFFSET2,
+	NV04_PGRAPH_BBASE2,
+	NV04_PGRAPH_BLIMIT2,
+	NV04_PGRAPH_BOFFSET3,
+	NV04_PGRAPH_BBASE3,
+	NV04_PGRAPH_BLIMIT3,
+	NV04_PGRAPH_BOFFSET4,
+	NV04_PGRAPH_BBASE4,
+	NV04_PGRAPH_BLIMIT4,
+	NV04_PGRAPH_BOFFSET5,
+	NV04_PGRAPH_BBASE5,
+	NV04_PGRAPH_BLIMIT5,
+	NV04_PGRAPH_BPITCH0,
+	NV04_PGRAPH_BPITCH1,
+	NV04_PGRAPH_BPITCH2,
+	NV04_PGRAPH_BPITCH3,
+	NV04_PGRAPH_BPITCH4,
+	NV04_PGRAPH_SURFACE,
+	NV04_PGRAPH_STATE,
+	NV04_PGRAPH_BSWIZZLE2,
+	NV04_PGRAPH_BSWIZZLE5,
+	NV04_PGRAPH_BPIXEL,
+	NV04_PGRAPH_NOTIFY,
+	NV04_PGRAPH_PATT_COLOR0,
+	NV04_PGRAPH_PATT_COLOR1,
+	NV04_PGRAPH_PATT_COLORRAM+0x00,
+	NV04_PGRAPH_PATT_COLORRAM+0x04,
+	NV04_PGRAPH_PATT_COLORRAM+0x08,
+	NV04_PGRAPH_PATT_COLORRAM+0x0c,
+	NV04_PGRAPH_PATT_COLORRAM+0x10,
+	NV04_PGRAPH_PATT_COLORRAM+0x14,
+	NV04_PGRAPH_PATT_COLORRAM+0x18,
+	NV04_PGRAPH_PATT_COLORRAM+0x1c,
+	NV04_PGRAPH_PATT_COLORRAM+0x20,
+	NV04_PGRAPH_PATT_COLORRAM+0x24,
+	NV04_PGRAPH_PATT_COLORRAM+0x28,
+	NV04_PGRAPH_PATT_COLORRAM+0x2c,
+	NV04_PGRAPH_PATT_COLORRAM+0x30,
+	NV04_PGRAPH_PATT_COLORRAM+0x34,
+	NV04_PGRAPH_PATT_COLORRAM+0x38,
+	NV04_PGRAPH_PATT_COLORRAM+0x3c,
+	NV04_PGRAPH_PATT_COLORRAM+0x40,
+	NV04_PGRAPH_PATT_COLORRAM+0x44,
+	NV04_PGRAPH_PATT_COLORRAM+0x48,
+	NV04_PGRAPH_PATT_COLORRAM+0x4c,
+	NV04_PGRAPH_PATT_COLORRAM+0x50,
+	NV04_PGRAPH_PATT_COLORRAM+0x54,
+	NV04_PGRAPH_PATT_COLORRAM+0x58,
+	NV04_PGRAPH_PATT_COLORRAM+0x5c,
+	NV04_PGRAPH_PATT_COLORRAM+0x60,
+	NV04_PGRAPH_PATT_COLORRAM+0x64,
+	NV04_PGRAPH_PATT_COLORRAM+0x68,
+	NV04_PGRAPH_PATT_COLORRAM+0x6c,
+	NV04_PGRAPH_PATT_COLORRAM+0x70,
+	NV04_PGRAPH_PATT_COLORRAM+0x74,
+	NV04_PGRAPH_PATT_COLORRAM+0x78,
+	NV04_PGRAPH_PATT_COLORRAM+0x7c,
+	NV04_PGRAPH_PATT_COLORRAM+0x80,
+	NV04_PGRAPH_PATT_COLORRAM+0x84,
+	NV04_PGRAPH_PATT_COLORRAM+0x88,
+	NV04_PGRAPH_PATT_COLORRAM+0x8c,
+	NV04_PGRAPH_PATT_COLORRAM+0x90,
+	NV04_PGRAPH_PATT_COLORRAM+0x94,
+	NV04_PGRAPH_PATT_COLORRAM+0x98,
+	NV04_PGRAPH_PATT_COLORRAM+0x9c,
+	NV04_PGRAPH_PATT_COLORRAM+0xa0,
+	NV04_PGRAPH_PATT_COLORRAM+0xa4,
+	NV04_PGRAPH_PATT_COLORRAM+0xa8,
+	NV04_PGRAPH_PATT_COLORRAM+0xac,
+	NV04_PGRAPH_PATT_COLORRAM+0xb0,
+	NV04_PGRAPH_PATT_COLORRAM+0xb4,
+	NV04_PGRAPH_PATT_COLORRAM+0xb8,
+	NV04_PGRAPH_PATT_COLORRAM+0xbc,
+	NV04_PGRAPH_PATT_COLORRAM+0xc0,
+	NV04_PGRAPH_PATT_COLORRAM+0xc4,
+	NV04_PGRAPH_PATT_COLORRAM+0xc8,
+	NV04_PGRAPH_PATT_COLORRAM+0xcc,
+	NV04_PGRAPH_PATT_COLORRAM+0xd0,
+	NV04_PGRAPH_PATT_COLORRAM+0xd4,
+	NV04_PGRAPH_PATT_COLORRAM+0xd8,
+	NV04_PGRAPH_PATT_COLORRAM+0xdc,
+	NV04_PGRAPH_PATT_COLORRAM+0xe0,
+	NV04_PGRAPH_PATT_COLORRAM+0xe4,
+	NV04_PGRAPH_PATT_COLORRAM+0xe8,
+	NV04_PGRAPH_PATT_COLORRAM+0xec,
+	NV04_PGRAPH_PATT_COLORRAM+0xf0,
+	NV04_PGRAPH_PATT_COLORRAM+0xf4,
+	NV04_PGRAPH_PATT_COLORRAM+0xf8,
+	NV04_PGRAPH_PATT_COLORRAM+0xfc,
+	NV04_PGRAPH_PATTERN,
+	0x0040080c,
+	NV04_PGRAPH_PATTERN_SHAPE,
+	0x00400600,
+	NV04_PGRAPH_ROP3,
+	NV04_PGRAPH_CHROMA,
+	NV04_PGRAPH_BETA_AND,
+	NV04_PGRAPH_BETA_PREMULT,
+	NV04_PGRAPH_CONTROL0,
+	NV04_PGRAPH_CONTROL1,
+	NV04_PGRAPH_CONTROL2,
+	NV04_PGRAPH_BLEND,
+	NV04_PGRAPH_STORED_FMT,
+	NV04_PGRAPH_SOURCE_COLOR,
+	0x00400560,
+	0x00400568,
+	0x00400564,
+	0x0040056c,
+	0x00400400,
+	0x00400480,
+	0x00400404,
+	0x00400484,
+	0x00400408,
+	0x00400488,
+	0x0040040c,
+	0x0040048c,
+	0x00400410,
+	0x00400490,
+	0x00400414,
+	0x00400494,
+	0x00400418,
+	0x00400498,
+	0x0040041c,
+	0x0040049c,
+	0x00400420,
+	0x004004a0,
+	0x00400424,
+	0x004004a4,
+	0x00400428,
+	0x004004a8,
+	0x0040042c,
+	0x004004ac,
+	0x00400430,
+	0x004004b0,
+	0x00400434,
+	0x004004b4,
+	0x00400438,
+	0x004004b8,
+	0x0040043c,
+	0x004004bc,
+	0x00400440,
+	0x004004c0,
+	0x00400444,
+	0x004004c4,
+	0x00400448,
+	0x004004c8,
+	0x0040044c,
+	0x004004cc,
+	0x00400450,
+	0x004004d0,
+	0x00400454,
+	0x004004d4,
+	0x00400458,
+	0x004004d8,
+	0x0040045c,
+	0x004004dc,
+	0x00400460,
+	0x004004e0,
+	0x00400464,
+	0x004004e4,
+	0x00400468,
+	0x004004e8,
+	0x0040046c,
+	0x004004ec,
+	0x00400470,
+	0x004004f0,
+	0x00400474,
+	0x004004f4,
+	0x00400478,
+	0x004004f8,
+	0x0040047c,
+	0x004004fc,
+	0x00400534,
+	0x00400538,
+	0x00400514,
+	0x00400518,
+	0x0040051c,
+	0x00400520,
+	0x00400524,
+	0x00400528,
+	0x0040052c,
+	0x00400530,
+	0x00400d00,
+	0x00400d40,
+	0x00400d80,
+	0x00400d04,
+	0x00400d44,
+	0x00400d84,
+	0x00400d08,
+	0x00400d48,
+	0x00400d88,
+	0x00400d0c,
+	0x00400d4c,
+	0x00400d8c,
+	0x00400d10,
+	0x00400d50,
+	0x00400d90,
+	0x00400d14,
+	0x00400d54,
+	0x00400d94,
+	0x00400d18,
+	0x00400d58,
+	0x00400d98,
+	0x00400d1c,
+	0x00400d5c,
+	0x00400d9c,
+	0x00400d20,
+	0x00400d60,
+	0x00400da0,
+	0x00400d24,
+	0x00400d64,
+	0x00400da4,
+	0x00400d28,
+	0x00400d68,
+	0x00400da8,
+	0x00400d2c,
+	0x00400d6c,
+	0x00400dac,
+	0x00400d30,
+	0x00400d70,
+	0x00400db0,
+	0x00400d34,
+	0x00400d74,
+	0x00400db4,
+	0x00400d38,
+	0x00400d78,
+	0x00400db8,
+	0x00400d3c,
+	0x00400d7c,
+	0x00400dbc,
+	0x00400590,
+	0x00400594,
+	0x00400598,
+	0x0040059c,
+	0x004005a8,
+	0x004005ac,
+	0x004005b0,
+	0x004005b4,
+	0x004005c0,
+	0x004005c4,
+	0x004005c8,
+	0x004005cc,
+	0x004005d0,
+	0x004005d4,
+	0x004005d8,
+	0x004005dc,
+	0x004005e0,
+	NV04_PGRAPH_PASSTHRU_0,
+	NV04_PGRAPH_PASSTHRU_1,
+	NV04_PGRAPH_PASSTHRU_2,
+	NV04_PGRAPH_DVD_COLORFMT,
+	NV04_PGRAPH_SCALED_FORMAT,
+	NV04_PGRAPH_MISC24_0,
+	NV04_PGRAPH_MISC24_1,
+	NV04_PGRAPH_MISC24_2,
+	0x00400500,
+	0x00400504,
+	NV04_PGRAPH_VALID1,
+	NV04_PGRAPH_VALID2,
+	NV04_PGRAPH_DEBUG_3
+};
+
+struct nv04_graph_priv {
+	struct nouveau_graph base;
+	struct nv04_graph_chan *chan[16];
+	spinlock_t lock;
+};
+
+struct nv04_graph_chan {
+	struct nouveau_object base;
+	int chid;
+	u32 nv04[ARRAY_SIZE(nv04_graph_ctx_regs)];
+};
+
+
+static inline struct nv04_graph_priv *
+nv04_graph_priv(struct nv04_graph_chan *chan)
+{
+	return (void *)nv_object(chan)->engine;
+}
+
+/*******************************************************************************
+ * Graphics object classes
+ ******************************************************************************/
+
+/*
+ * Software methods, why they are needed, and how they all work:
+ *
+ * NV04 and NV05 keep most of the state in PGRAPH context itself, but some
+ * 2d engine settings are kept inside the grobjs themselves. The grobjs are
+ * 3 words long on both. grobj format on NV04 is:
+ *
+ * word 0:
+ *  - bits 0-7: class
+ *  - bit 12: color key active
+ *  - bit 13: clip rect active
+ *  - bit 14: if set, destination surface is swizzled and taken from buffer 5
+ *            [set by NV04_SWIZZLED_SURFACE], otherwise it's linear and taken
+ *            from buffer 0 [set by NV04_CONTEXT_SURFACES_2D or
+ *            NV03_CONTEXT_SURFACE_DST].
+ *  - bits 15-17: 2d operation [aka patch config]
+ *  - bit 24: patch valid [enables rendering using this object]
+ *  - bit 25: surf3d valid [for tex_tri and multitex_tri only]
+ * word 1:
+ *  - bits 0-1: mono format
+ *  - bits 8-13: color format
+ *  - bits 16-31: DMA_NOTIFY instance
+ * word 2:
+ *  - bits 0-15: DMA_A instance
+ *  - bits 16-31: DMA_B instance
+ *
+ * On NV05 it's:
+ *
+ * word 0:
+ *  - bits 0-7: class
+ *  - bit 12: color key active
+ *  - bit 13: clip rect active
+ *  - bit 14: if set, destination surface is swizzled and taken from buffer 5
+ *            [set by NV04_SWIZZLED_SURFACE], otherwise it's linear and taken
+ *            from buffer 0 [set by NV04_CONTEXT_SURFACES_2D or
+ *            NV03_CONTEXT_SURFACE_DST].
+ *  - bits 15-17: 2d operation [aka patch config]
+ *  - bits 20-22: dither mode
+ *  - bit 24: patch valid [enables rendering using this object]
+ *  - bit 25: surface_dst/surface_color/surf2d/surf3d valid
+ *  - bit 26: surface_src/surface_zeta valid
+ *  - bit 27: pattern valid
+ *  - bit 28: rop valid
+ *  - bit 29: beta1 valid
+ *  - bit 30: beta4 valid
+ * word 1:
+ *  - bits 0-1: mono format
+ *  - bits 8-13: color format
+ *  - bits 16-31: DMA_NOTIFY instance
+ * word 2:
+ *  - bits 0-15: DMA_A instance
+ *  - bits 16-31: DMA_B instance
+ *
+ * NV05 will set/unset the relevant valid bits when you poke the relevant
+ * object-binding methods with object of the proper type, or with the NULL
+ * type. It'll only allow rendering using the grobj if all needed objects
+ * are bound. The needed set of objects depends on selected operation: for
+ * example rop object is needed by ROP_AND, but not by SRCCOPY_AND.
+ *
+ * NV04 doesn't have these methods implemented at all, and doesn't have the
+ * relevant bits in grobj. Instead, it'll allow rendering whenever bit 24
+ * is set. So we have to emulate them in software, internally keeping the
+ * same bits as NV05 does. Since grobjs are aligned to 16 bytes on nv04,
+ * but the last word isn't actually used for anything, we abuse it for this
+ * purpose.
+ *
+ * Actually, NV05 can optionally check bit 24 too, but we disable this since
+ * there's no use for it.
+ *
+ * For unknown reasons, NV04 implements surf3d binding in hardware as an
+ * exception. Also for unknown reasons, NV04 doesn't implement the clipping
+ * methods on the surf3d object, so we have to emulate them too.
+ */
+
+static void
+nv04_graph_set_ctx1(struct nouveau_object *object, u32 mask, u32 value)
+{
+	struct nv04_graph_priv *priv = (void *)object->engine;
+	int subc = (nv_rd32(priv, NV04_PGRAPH_TRAPPED_ADDR) >> 13) & 0x7;
+	u32 tmp;
+
+	tmp  = nv_ro32(object, 0x00);
+	tmp &= ~mask;
+	tmp |= value;
+	nv_wo32(object, 0x00, tmp);
+
+	nv_wr32(priv, NV04_PGRAPH_CTX_SWITCH1, tmp);
+	nv_wr32(priv, NV04_PGRAPH_CTX_CACHE1 + (subc<<2), tmp);
+}
+
+static void
+nv04_graph_set_ctx_val(struct nouveau_object *object, u32 mask, u32 value)
+{
+	int class, op, valid = 1;
+	u32 tmp, ctx1;
+
+	ctx1 = nv_ro32(object, 0x00);
+	class = ctx1 & 0xff;
+	op = (ctx1 >> 15) & 7;
+
+	tmp = nv_ro32(object, 0x0c);
+	tmp &= ~mask;
+	tmp |= value;
+	nv_wo32(object, 0x0c, tmp);
+
+	/* check for valid surf2d/surf_dst/surf_color */
+	if (!(tmp & 0x02000000))
+		valid = 0;
+	/* check for valid surf_src/surf_zeta */
+	if ((class == 0x1f || class == 0x48) && !(tmp & 0x04000000))
+		valid = 0;
+
+	switch (op) {
+	/* SRCCOPY_AND, SRCCOPY: no extra objects required */
+	case 0:
+	case 3:
+		break;
+	/* ROP_AND: requires pattern and rop */
+	case 1:
+		if (!(tmp & 0x18000000))
+			valid = 0;
+		break;
+	/* BLEND_AND: requires beta1 */
+	case 2:
+		if (!(tmp & 0x20000000))
+			valid = 0;
+		break;
+	/* SRCCOPY_PREMULT, BLEND_PREMULT: beta4 required */
+	case 4:
+	case 5:
+		if (!(tmp & 0x40000000))
+			valid = 0;
+		break;
+	}
+
+	nv04_graph_set_ctx1(object, 0x01000000, valid << 24);
+}
+
+static int
+nv04_graph_mthd_set_operation(struct nouveau_object *object, u32 mthd,
+			      void *args, u32 size)
+{
+	u32 class = nv_ro32(object, 0) & 0xff;
+	u32 data = *(u32 *)args;
+	if (data > 5)
+		return 1;
+	/* Old versions of the objects only accept first three operations. */
+	if (data > 2 && class < 0x40)
+		return 1;
+	nv04_graph_set_ctx1(object, 0x00038000, data << 15);
+	/* changing operation changes set of objects needed for validation */
+	nv04_graph_set_ctx_val(object, 0, 0);
+	return 0;
+}
+
+static int
+nv04_graph_mthd_surf3d_clip_h(struct nouveau_object *object, u32 mthd,
+			      void *args, u32 size)
+{
+	struct nv04_graph_priv *priv = (void *)object->engine;
+	u32 data = *(u32 *)args;
+	u32 min = data & 0xffff, max;
+	u32 w = data >> 16;
+	if (min & 0x8000)
+		/* too large */
+		return 1;
+	if (w & 0x8000)
+		/* yes, it accepts negative for some reason. */
+		w |= 0xffff0000;
+	max = min + w;
+	max &= 0x3ffff;
+	nv_wr32(priv, 0x40053c, min);
+	nv_wr32(priv, 0x400544, max);
+	return 0;
+}
+
+static int
+nv04_graph_mthd_surf3d_clip_v(struct nouveau_object *object, u32 mthd,
+			      void *args, u32 size)
+{
+	struct nv04_graph_priv *priv = (void *)object->engine;
+	u32 data = *(u32 *)args;
+	u32 min = data & 0xffff, max;
+	u32 w = data >> 16;
+	if (min & 0x8000)
+		/* too large */
+		return 1;
+	if (w & 0x8000)
+		/* yes, it accepts negative for some reason. */
+		w |= 0xffff0000;
+	max = min + w;
+	max &= 0x3ffff;
+	nv_wr32(priv, 0x400540, min);
+	nv_wr32(priv, 0x400548, max);
+	return 0;
+}
+
+static u16
+nv04_graph_mthd_bind_class(struct nouveau_object *object, u32 *args, u32 size)
+{
+	struct nouveau_instmem *imem = nouveau_instmem(object);
+	u32 inst = *(u32 *)args << 4;
+	return nv_ro32(imem, inst);
+}
+
+static int
+nv04_graph_mthd_bind_surf2d(struct nouveau_object *object, u32 mthd,
+			    void *args, u32 size)
+{
+	switch (nv04_graph_mthd_bind_class(object, args, size)) {
+	case 0x30:
+		nv04_graph_set_ctx1(object, 0x00004000, 0);
+		nv04_graph_set_ctx_val(object, 0x02000000, 0);
+		return 0;
+	case 0x42:
+		nv04_graph_set_ctx1(object, 0x00004000, 0);
+		nv04_graph_set_ctx_val(object, 0x02000000, 0x02000000);
+		return 0;
+	}
+	return 1;
+}
+
+static int
+nv04_graph_mthd_bind_surf2d_swzsurf(struct nouveau_object *object, u32 mthd,
+				    void *args, u32 size)
+{
+	switch (nv04_graph_mthd_bind_class(object, args, size)) {
+	case 0x30:
+		nv04_graph_set_ctx1(object, 0x00004000, 0);
+		nv04_graph_set_ctx_val(object, 0x02000000, 0);
+		return 0;
+	case 0x42:
+		nv04_graph_set_ctx1(object, 0x00004000, 0);
+		nv04_graph_set_ctx_val(object, 0x02000000, 0x02000000);
+		return 0;
+	case 0x52:
+		nv04_graph_set_ctx1(object, 0x00004000, 0x00004000);
+		nv04_graph_set_ctx_val(object, 0x02000000, 0x02000000);
+		return 0;
+	}
+	return 1;
+}
+
+static int
+nv01_graph_mthd_bind_patt(struct nouveau_object *object, u32 mthd,
+			  void *args, u32 size)
+{
+	switch (nv04_graph_mthd_bind_class(object, args, size)) {
+	case 0x30:
+		nv04_graph_set_ctx_val(object, 0x08000000, 0);
+		return 0;
+	case 0x18:
+		nv04_graph_set_ctx_val(object, 0x08000000, 0x08000000);
+		return 0;
+	}
+	return 1;
+}
+
+static int
+nv04_graph_mthd_bind_patt(struct nouveau_object *object, u32 mthd,
+			  void *args, u32 size)
+{
+	switch (nv04_graph_mthd_bind_class(object, args, size)) {
+	case 0x30:
+		nv04_graph_set_ctx_val(object, 0x08000000, 0);
+		return 0;
+	case 0x44:
+		nv04_graph_set_ctx_val(object, 0x08000000, 0x08000000);
+		return 0;
+	}
+	return 1;
+}
+
+static int
+nv04_graph_mthd_bind_rop(struct nouveau_object *object, u32 mthd,
+			 void *args, u32 size)
+{
+	switch (nv04_graph_mthd_bind_class(object, args, size)) {
+	case 0x30:
+		nv04_graph_set_ctx_val(object, 0x10000000, 0);
+		return 0;
+	case 0x43:
+		nv04_graph_set_ctx_val(object, 0x10000000, 0x10000000);
+		return 0;
+	}
+	return 1;
+}
+
+static int
+nv04_graph_mthd_bind_beta1(struct nouveau_object *object, u32 mthd,
+			   void *args, u32 size)
+{
+	switch (nv04_graph_mthd_bind_class(object, args, size)) {
+	case 0x30:
+		nv04_graph_set_ctx_val(object, 0x20000000, 0);
+		return 0;
+	case 0x12:
+		nv04_graph_set_ctx_val(object, 0x20000000, 0x20000000);
+		return 0;
+	}
+	return 1;
+}
+
+static int
+nv04_graph_mthd_bind_beta4(struct nouveau_object *object, u32 mthd,
+			   void *args, u32 size)
+{
+	switch (nv04_graph_mthd_bind_class(object, args, size)) {
+	case 0x30:
+		nv04_graph_set_ctx_val(object, 0x40000000, 0);
+		return 0;
+	case 0x72:
+		nv04_graph_set_ctx_val(object, 0x40000000, 0x40000000);
+		return 0;
+	}
+	return 1;
+}
+
+static int
+nv04_graph_mthd_bind_surf_dst(struct nouveau_object *object, u32 mthd,
+			      void *args, u32 size)
+{
+	switch (nv04_graph_mthd_bind_class(object, args, size)) {
+	case 0x30:
+		nv04_graph_set_ctx_val(object, 0x02000000, 0);
+		return 0;
+	case 0x58:
+		nv04_graph_set_ctx_val(object, 0x02000000, 0x02000000);
+		return 0;
+	}
+	return 1;
+}
+
+static int
+nv04_graph_mthd_bind_surf_src(struct nouveau_object *object, u32 mthd,
+			      void *args, u32 size)
+{
+	switch (nv04_graph_mthd_bind_class(object, args, size)) {
+	case 0x30:
+		nv04_graph_set_ctx_val(object, 0x04000000, 0);
+		return 0;
+	case 0x59:
+		nv04_graph_set_ctx_val(object, 0x04000000, 0x04000000);
+		return 0;
+	}
+	return 1;
+}
+
+static int
+nv04_graph_mthd_bind_surf_color(struct nouveau_object *object, u32 mthd,
+				void *args, u32 size)
+{
+	switch (nv04_graph_mthd_bind_class(object, args, size)) {
+	case 0x30:
+		nv04_graph_set_ctx_val(object, 0x02000000, 0);
+		return 0;
+	case 0x5a:
+		nv04_graph_set_ctx_val(object, 0x02000000, 0x02000000);
+		return 0;
+	}
+	return 1;
+}
+
+static int
+nv04_graph_mthd_bind_surf_zeta(struct nouveau_object *object, u32 mthd,
+			       void *args, u32 size)
+{
+	switch (nv04_graph_mthd_bind_class(object, args, size)) {
+	case 0x30:
+		nv04_graph_set_ctx_val(object, 0x04000000, 0);
+		return 0;
+	case 0x5b:
+		nv04_graph_set_ctx_val(object, 0x04000000, 0x04000000);
+		return 0;
+	}
+	return 1;
+}
+
+static int
+nv01_graph_mthd_bind_clip(struct nouveau_object *object, u32 mthd,
+			  void *args, u32 size)
+{
+	switch (nv04_graph_mthd_bind_class(object, args, size)) {
+	case 0x30:
+		nv04_graph_set_ctx1(object, 0x2000, 0);
+		return 0;
+	case 0x19:
+		nv04_graph_set_ctx1(object, 0x2000, 0x2000);
+		return 0;
+	}
+	return 1;
+}
+
+static int
+nv01_graph_mthd_bind_chroma(struct nouveau_object *object, u32 mthd,
+			    void *args, u32 size)
+{
+	switch (nv04_graph_mthd_bind_class(object, args, size)) {
+	case 0x30:
+		nv04_graph_set_ctx1(object, 0x1000, 0);
+		return 0;
+	/* Yes, for some reason even the old versions of objects
+	 * accept 0x57 and not 0x17. Consistency be damned.
+	 */
+	case 0x57:
+		nv04_graph_set_ctx1(object, 0x1000, 0x1000);
+		return 0;
+	}
+	return 1;
+}
+
+static struct nouveau_omthds
+nv03_graph_gdi_omthds[] = {
+	{ 0x0184, nv01_graph_mthd_bind_patt },
+	{ 0x0188, nv04_graph_mthd_bind_rop },
+	{ 0x018c, nv04_graph_mthd_bind_beta1 },
+	{ 0x0190, nv04_graph_mthd_bind_surf_dst },
+	{ 0x02fc, nv04_graph_mthd_set_operation },
+	{}
+};
+
+static struct nouveau_omthds
+nv04_graph_gdi_omthds[] = {
+	{ 0x0188, nv04_graph_mthd_bind_patt },
+	{ 0x018c, nv04_graph_mthd_bind_rop },
+	{ 0x0190, nv04_graph_mthd_bind_beta1 },
+	{ 0x0194, nv04_graph_mthd_bind_beta4 },
+	{ 0x0198, nv04_graph_mthd_bind_surf2d },
+	{ 0x02fc, nv04_graph_mthd_set_operation },
+	{}
+};
+
+static struct nouveau_omthds
+nv01_graph_blit_omthds[] = {
+	{ 0x0184, nv01_graph_mthd_bind_chroma },
+	{ 0x0188, nv01_graph_mthd_bind_clip },
+	{ 0x018c, nv01_graph_mthd_bind_patt },
+	{ 0x0190, nv04_graph_mthd_bind_rop },
+	{ 0x0194, nv04_graph_mthd_bind_beta1 },
+	{ 0x0198, nv04_graph_mthd_bind_surf_dst },
+	{ 0x019c, nv04_graph_mthd_bind_surf_src },
+	{ 0x02fc, nv04_graph_mthd_set_operation },
+	{}
+};
+
+static struct nouveau_omthds
+nv04_graph_blit_omthds[] = {
+	{ 0x0184, nv01_graph_mthd_bind_chroma },
+	{ 0x0188, nv01_graph_mthd_bind_clip },
+	{ 0x018c, nv04_graph_mthd_bind_patt },
+	{ 0x0190, nv04_graph_mthd_bind_rop },
+	{ 0x0194, nv04_graph_mthd_bind_beta1 },
+	{ 0x0198, nv04_graph_mthd_bind_beta4 },
+	{ 0x019c, nv04_graph_mthd_bind_surf2d },
+	{ 0x02fc, nv04_graph_mthd_set_operation },
+	{}
+};
+
+static struct nouveau_omthds
+nv04_graph_iifc_omthds[] = {
+	{ 0x0188, nv01_graph_mthd_bind_chroma },
+	{ 0x018c, nv01_graph_mthd_bind_clip },
+	{ 0x0190, nv04_graph_mthd_bind_patt },
+	{ 0x0194, nv04_graph_mthd_bind_rop },
+	{ 0x0198, nv04_graph_mthd_bind_beta1 },
+	{ 0x019c, nv04_graph_mthd_bind_beta4 },
+	{ 0x01a0, nv04_graph_mthd_bind_surf2d_swzsurf },
+	{ 0x03e4, nv04_graph_mthd_set_operation },
+	{}
+};
+
+static struct nouveau_omthds
+nv01_graph_ifc_omthds[] = {
+	{ 0x0184, nv01_graph_mthd_bind_chroma },
+	{ 0x0188, nv01_graph_mthd_bind_clip },
+	{ 0x018c, nv01_graph_mthd_bind_patt },
+	{ 0x0190, nv04_graph_mthd_bind_rop },
+	{ 0x0194, nv04_graph_mthd_bind_beta1 },
+	{ 0x0198, nv04_graph_mthd_bind_surf_dst },
+	{ 0x02fc, nv04_graph_mthd_set_operation },
+	{}
+};
+
+static struct nouveau_omthds
+nv04_graph_ifc_omthds[] = {
+	{ 0x0184, nv01_graph_mthd_bind_chroma },
+	{ 0x0188, nv01_graph_mthd_bind_clip },
+	{ 0x018c, nv04_graph_mthd_bind_patt },
+	{ 0x0190, nv04_graph_mthd_bind_rop },
+	{ 0x0194, nv04_graph_mthd_bind_beta1 },
+	{ 0x0198, nv04_graph_mthd_bind_beta4 },
+	{ 0x019c, nv04_graph_mthd_bind_surf2d },
+	{ 0x02fc, nv04_graph_mthd_set_operation },
+	{}
+};
+
+static struct nouveau_omthds
+nv03_graph_sifc_omthds[] = {
+	{ 0x0184, nv01_graph_mthd_bind_chroma },
+	{ 0x0188, nv01_graph_mthd_bind_patt },
+	{ 0x018c, nv04_graph_mthd_bind_rop },
+	{ 0x0190, nv04_graph_mthd_bind_beta1 },
+	{ 0x0194, nv04_graph_mthd_bind_surf_dst },
+	{ 0x02fc, nv04_graph_mthd_set_operation },
+	{}
+};
+
+static struct nouveau_omthds
+nv04_graph_sifc_omthds[] = {
+	{ 0x0184, nv01_graph_mthd_bind_chroma },
+	{ 0x0188, nv04_graph_mthd_bind_patt },
+	{ 0x018c, nv04_graph_mthd_bind_rop },
+	{ 0x0190, nv04_graph_mthd_bind_beta1 },
+	{ 0x0194, nv04_graph_mthd_bind_beta4 },
+	{ 0x0198, nv04_graph_mthd_bind_surf2d },
+	{ 0x02fc, nv04_graph_mthd_set_operation },
+	{}
+};
+
+static struct nouveau_omthds
+nv03_graph_sifm_omthds[] = {
+	{ 0x0188, nv01_graph_mthd_bind_patt },
+	{ 0x018c, nv04_graph_mthd_bind_rop },
+	{ 0x0190, nv04_graph_mthd_bind_beta1 },
+	{ 0x0194, nv04_graph_mthd_bind_surf_dst },
+	{ 0x0304, nv04_graph_mthd_set_operation },
+	{}
+};
+
+static struct nouveau_omthds
+nv04_graph_sifm_omthds[] = {
+	{ 0x0188, nv04_graph_mthd_bind_patt },
+	{ 0x018c, nv04_graph_mthd_bind_rop },
+	{ 0x0190, nv04_graph_mthd_bind_beta1 },
+	{ 0x0194, nv04_graph_mthd_bind_beta4 },
+	{ 0x0198, nv04_graph_mthd_bind_surf2d },
+	{ 0x0304, nv04_graph_mthd_set_operation },
+	{}
+};
+
+static struct nouveau_omthds
+nv04_graph_surf3d_omthds[] = {
+	{ 0x02f8, nv04_graph_mthd_surf3d_clip_h },
+	{ 0x02fc, nv04_graph_mthd_surf3d_clip_v },
+	{}
+};
+
+static struct nouveau_omthds
+nv03_graph_ttri_omthds[] = {
+	{ 0x0188, nv01_graph_mthd_bind_clip },
+	{ 0x018c, nv04_graph_mthd_bind_surf_color },
+	{ 0x0190, nv04_graph_mthd_bind_surf_zeta },
+	{}
+};
+
+static struct nouveau_omthds
+nv01_graph_prim_omthds[] = {
+	{ 0x0184, nv01_graph_mthd_bind_clip },
+	{ 0x0188, nv01_graph_mthd_bind_patt },
+	{ 0x018c, nv04_graph_mthd_bind_rop },
+	{ 0x0190, nv04_graph_mthd_bind_beta1 },
+	{ 0x0194, nv04_graph_mthd_bind_surf_dst },
+	{ 0x02fc, nv04_graph_mthd_set_operation },
+	{}
+};
+
+static struct nouveau_omthds
+nv04_graph_prim_omthds[] = {
+	{ 0x0184, nv01_graph_mthd_bind_clip },
+	{ 0x0188, nv04_graph_mthd_bind_patt },
+	{ 0x018c, nv04_graph_mthd_bind_rop },
+	{ 0x0190, nv04_graph_mthd_bind_beta1 },
+	{ 0x0194, nv04_graph_mthd_bind_beta4 },
+	{ 0x0198, nv04_graph_mthd_bind_surf2d },
+	{ 0x02fc, nv04_graph_mthd_set_operation },
+	{}
+};
+
+static int
+nv04_graph_object_ctor(struct nouveau_object *parent,
+		       struct nouveau_object *engine,
+		       struct nouveau_oclass *oclass, void *data, u32 size,
+		       struct nouveau_object **pobject)
+{
+	struct nouveau_gpuobj *obj;
+	int ret;
+
+	ret = nouveau_gpuobj_create(parent, engine, oclass, 0, parent,
+				    16, 16, 0, &obj);
+	*pobject = nv_object(obj);
+	if (ret)
+		return ret;
+
+	nv_wo32(obj, 0x00, nv_mclass(obj));
+#ifdef __BIG_ENDIAN
+	nv_mo32(obj, 0x00, 0x00080000, 0x00080000);
+#endif
+	nv_wo32(obj, 0x04, 0x00000000);
+	nv_wo32(obj, 0x08, 0x00000000);
+	nv_wo32(obj, 0x0c, 0x00000000);
+	return 0;
+}
+
+struct nouveau_ofuncs
+nv04_graph_ofuncs = {
+	.ctor = nv04_graph_object_ctor,
+	.dtor = _nouveau_gpuobj_dtor,
+	.init = _nouveau_gpuobj_init,
+	.fini = _nouveau_gpuobj_fini,
+	.rd32 = _nouveau_gpuobj_rd32,
+	.wr32 = _nouveau_gpuobj_wr32,
+};
+
+static struct nouveau_oclass
+nv04_graph_sclass[] = {
+	{ 0x0012, &nv04_graph_ofuncs }, /* beta1 */
+	{ 0x0017, &nv04_graph_ofuncs }, /* chroma */
+	{ 0x0018, &nv04_graph_ofuncs }, /* pattern (nv01) */
+	{ 0x0019, &nv04_graph_ofuncs }, /* clip */
+	{ 0x001c, &nv04_graph_ofuncs, nv01_graph_prim_omthds }, /* line */
+	{ 0x001d, &nv04_graph_ofuncs, nv01_graph_prim_omthds }, /* tri */
+	{ 0x001e, &nv04_graph_ofuncs, nv01_graph_prim_omthds }, /* rect */
+	{ 0x001f, &nv04_graph_ofuncs, nv01_graph_blit_omthds },
+	{ 0x0021, &nv04_graph_ofuncs, nv01_graph_ifc_omthds },
+	{ 0x0030, &nv04_graph_ofuncs }, /* null */
+	{ 0x0036, &nv04_graph_ofuncs, nv03_graph_sifc_omthds },
+	{ 0x0037, &nv04_graph_ofuncs, nv03_graph_sifm_omthds },
+	{ 0x0038, &nv04_graph_ofuncs }, /* dvd subpicture */
+	{ 0x0039, &nv04_graph_ofuncs }, /* m2mf */
+	{ 0x0042, &nv04_graph_ofuncs }, /* surf2d */
+	{ 0x0043, &nv04_graph_ofuncs }, /* rop */
+	{ 0x0044, &nv04_graph_ofuncs }, /* pattern */
+	{ 0x0048, &nv04_graph_ofuncs, nv03_graph_ttri_omthds },
+	{ 0x004a, &nv04_graph_ofuncs, nv04_graph_gdi_omthds },
+	{ 0x004b, &nv04_graph_ofuncs, nv03_graph_gdi_omthds },
+	{ 0x0052, &nv04_graph_ofuncs }, /* swzsurf */
+	{ 0x0053, &nv04_graph_ofuncs, nv04_graph_surf3d_omthds },
+	{ 0x0054, &nv04_graph_ofuncs }, /* ttri */
+	{ 0x0055, &nv04_graph_ofuncs }, /* mtri */
+	{ 0x0057, &nv04_graph_ofuncs }, /* chroma */
+	{ 0x0058, &nv04_graph_ofuncs }, /* surf_dst */
+	{ 0x0059, &nv04_graph_ofuncs }, /* surf_src */
+	{ 0x005a, &nv04_graph_ofuncs }, /* surf_color */
+	{ 0x005b, &nv04_graph_ofuncs }, /* surf_zeta */
+	{ 0x005c, &nv04_graph_ofuncs, nv04_graph_prim_omthds }, /* line */
+	{ 0x005d, &nv04_graph_ofuncs, nv04_graph_prim_omthds }, /* tri */
+	{ 0x005e, &nv04_graph_ofuncs, nv04_graph_prim_omthds }, /* rect */
+	{ 0x005f, &nv04_graph_ofuncs, nv04_graph_blit_omthds },
+	{ 0x0060, &nv04_graph_ofuncs, nv04_graph_iifc_omthds },
+	{ 0x0061, &nv04_graph_ofuncs, nv04_graph_ifc_omthds },
+	{ 0x0064, &nv04_graph_ofuncs }, /* iifc (nv05) */
+	{ 0x0065, &nv04_graph_ofuncs }, /* ifc (nv05) */
+	{ 0x0066, &nv04_graph_ofuncs }, /* sifc (nv05) */
+	{ 0x0072, &nv04_graph_ofuncs }, /* beta4 */
+	{ 0x0076, &nv04_graph_ofuncs, nv04_graph_sifc_omthds },
+	{ 0x0077, &nv04_graph_ofuncs, nv04_graph_sifm_omthds },
+	{},
+};
+
+/*******************************************************************************
+ * PGRAPH context
+ ******************************************************************************/
+
+static struct nv04_graph_chan *
+nv04_graph_channel(struct nv04_graph_priv *priv)
+{
+	struct nv04_graph_chan *chan = NULL;
+	if (nv_rd32(priv, NV04_PGRAPH_CTX_CONTROL) & 0x00010000) {
+		int chid = nv_rd32(priv, NV04_PGRAPH_CTX_USER) >> 24;
+		if (chid < ARRAY_SIZE(priv->chan))
+			chan = priv->chan[chid];
+	}
+	return chan;
+}
+
+static int
+nv04_graph_load_context(struct nv04_graph_chan *chan, int chid)
+{
+	struct nv04_graph_priv *priv = nv04_graph_priv(chan);
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(nv04_graph_ctx_regs); i++)
+		nv_wr32(priv, nv04_graph_ctx_regs[i], chan->nv04[i]);
+
+	nv_wr32(priv, NV04_PGRAPH_CTX_CONTROL, 0x10010100);
+	nv_mask(priv, NV04_PGRAPH_CTX_USER, 0xff000000, chid << 24);
+	nv_mask(priv, NV04_PGRAPH_FFINTFC_ST2, 0xfff00000, 0x00000000);
+	return 0;
+}
+
+static int
+nv04_graph_unload_context(struct nv04_graph_chan *chan)
+{
+	struct nv04_graph_priv *priv = nv04_graph_priv(chan);
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(nv04_graph_ctx_regs); i++)
+		chan->nv04[i] = nv_rd32(priv, nv04_graph_ctx_regs[i]);
+
+	nv_wr32(priv, NV04_PGRAPH_CTX_CONTROL, 0x10000000);
+	nv_mask(priv, NV04_PGRAPH_CTX_USER, 0xff000000, 0x0f000000);
+	return 0;
+}
+
+static void
+nv04_graph_context_switch(struct nv04_graph_priv *priv)
+{
+	struct nv04_graph_chan *prev = NULL;
+	struct nv04_graph_chan *next = NULL;
+	unsigned long flags;
+	int chid;
+
+	spin_lock_irqsave(&priv->lock, flags);
+	nv04_graph_idle(priv);
+
+	/* If previous context is valid, we need to save it */
+	prev = nv04_graph_channel(priv);
+	if (prev)
+		nv04_graph_unload_context(prev);
+
+	/* load context for next channel */
+	chid = (nv_rd32(priv, NV04_PGRAPH_TRAPPED_ADDR) >> 24) & 0x0f;
+	next = priv->chan[chid];
+	if (next)
+		nv04_graph_load_context(next, chid);
+
+	spin_unlock_irqrestore(&priv->lock, flags);
+}
+
+static u32 *ctx_reg(struct nv04_graph_chan *chan, u32 reg)
+{
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(nv04_graph_ctx_regs); i++) {
+		if (nv04_graph_ctx_regs[i] == reg)
+			return &chan->nv04[i];
+	}
+
+	return NULL;
+}
+
+static int
+nv04_graph_context_ctor(struct nouveau_object *parent,
+			struct nouveau_object *engine,
+			struct nouveau_oclass *oclass, void *data, u32 size,
+			struct nouveau_object **pobject)
+{
+	struct nouveau_fifo_chan *fifo = (void *)parent;
+	struct nv04_graph_priv *priv = (void *)engine;
+	struct nv04_graph_chan *chan;
+	unsigned long flags;
+	int ret;
+
+	ret = nouveau_object_create(parent, engine, oclass, 0, &chan);
+	*pobject = nv_object(chan);
+	if (ret)
+		return ret;
+
+	spin_lock_irqsave(&priv->lock, flags);
+	if (priv->chan[fifo->chid]) {
+		*pobject = nv_object(priv->chan[fifo->chid]);
+		atomic_inc(&(*pobject)->refcount);
+		spin_unlock_irqrestore(&priv->lock, flags);
+		nouveau_object_destroy(&chan->base);
+		return 1;
+	}
+
+	*ctx_reg(chan, NV04_PGRAPH_DEBUG_3) = 0xfad4ff31;
+
+	priv->chan[fifo->chid] = chan;
+	chan->chid = fifo->chid;
+	spin_unlock_irqrestore(&priv->lock, flags);
+	return 0;
+}
+
+static void
+nv04_graph_context_dtor(struct nouveau_object *object)
+{
+	struct nv04_graph_priv *priv = (void *)object->engine;
+	struct nv04_graph_chan *chan = (void *)object;
+	unsigned long flags;
+
+	spin_lock_irqsave(&priv->lock, flags);
+	priv->chan[chan->chid] = NULL;
+	spin_unlock_irqrestore(&priv->lock, flags);
+
+	nouveau_object_destroy(&chan->base);
+}
+
+static int
+nv04_graph_context_fini(struct nouveau_object *object, bool suspend)
+{
+	struct nv04_graph_priv *priv = (void *)object->engine;
+	struct nv04_graph_chan *chan = (void *)object;
+	unsigned long flags;
+
+	spin_lock_irqsave(&priv->lock, flags);
+	nv_mask(priv, NV04_PGRAPH_FIFO, 0x00000001, 0x00000000);
+	if (nv04_graph_channel(priv) == chan)
+		nv04_graph_unload_context(chan);
+	nv_mask(priv, NV04_PGRAPH_FIFO, 0x00000001, 0x00000001);
+	spin_unlock_irqrestore(&priv->lock, flags);
+
+	return nouveau_object_fini(&chan->base, suspend);
+}
+
+static struct nouveau_oclass
+nv04_graph_cclass = {
+	.handle = NV_ENGCTX(GR, 0x04),
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nv04_graph_context_ctor,
+		.dtor = nv04_graph_context_dtor,
+		.init = nouveau_object_init,
+		.fini = nv04_graph_context_fini,
+	},
+};
+
+/*******************************************************************************
+ * PGRAPH engine/subdev functions
+ ******************************************************************************/
+
+bool
+nv04_graph_idle(void *obj)
+{
+	struct nouveau_graph *graph = nouveau_graph(obj);
+	u32 mask = 0xffffffff;
+
+	if (nv_device(obj)->card_type == NV_40)
+		mask &= ~NV40_PGRAPH_STATUS_SYNC_STALL;
+
+	if (!nv_wait(graph, NV04_PGRAPH_STATUS, mask, 0)) {
+		nv_error(graph, "idle timed out with status 0x%08x\n",
+			 nv_rd32(graph, NV04_PGRAPH_STATUS));
+		return false;
+	}
+
+	return true;
+}
+
+static const struct nouveau_bitfield
+nv04_graph_intr_name[] = {
+	{ NV_PGRAPH_INTR_NOTIFY, "NOTIFY" },
+	{}
+};
+
+static const struct nouveau_bitfield
+nv04_graph_nstatus[] = {
+	{ NV04_PGRAPH_NSTATUS_STATE_IN_USE,       "STATE_IN_USE" },
+	{ NV04_PGRAPH_NSTATUS_INVALID_STATE,      "INVALID_STATE" },
+	{ NV04_PGRAPH_NSTATUS_BAD_ARGUMENT,       "BAD_ARGUMENT" },
+	{ NV04_PGRAPH_NSTATUS_PROTECTION_FAULT,   "PROTECTION_FAULT" },
+	{}
+};
+
+const struct nouveau_bitfield
+nv04_graph_nsource[] = {
+	{ NV03_PGRAPH_NSOURCE_NOTIFICATION,       "NOTIFICATION" },
+	{ NV03_PGRAPH_NSOURCE_DATA_ERROR,         "DATA_ERROR" },
+	{ NV03_PGRAPH_NSOURCE_PROTECTION_ERROR,   "PROTECTION_ERROR" },
+	{ NV03_PGRAPH_NSOURCE_RANGE_EXCEPTION,    "RANGE_EXCEPTION" },
+	{ NV03_PGRAPH_NSOURCE_LIMIT_COLOR,        "LIMIT_COLOR" },
+	{ NV03_PGRAPH_NSOURCE_LIMIT_ZETA,         "LIMIT_ZETA" },
+	{ NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD,       "ILLEGAL_MTHD" },
+	{ NV03_PGRAPH_NSOURCE_DMA_R_PROTECTION,   "DMA_R_PROTECTION" },
+	{ NV03_PGRAPH_NSOURCE_DMA_W_PROTECTION,   "DMA_W_PROTECTION" },
+	{ NV03_PGRAPH_NSOURCE_FORMAT_EXCEPTION,   "FORMAT_EXCEPTION" },
+	{ NV03_PGRAPH_NSOURCE_PATCH_EXCEPTION,    "PATCH_EXCEPTION" },
+	{ NV03_PGRAPH_NSOURCE_STATE_INVALID,      "STATE_INVALID" },
+	{ NV03_PGRAPH_NSOURCE_DOUBLE_NOTIFY,      "DOUBLE_NOTIFY" },
+	{ NV03_PGRAPH_NSOURCE_NOTIFY_IN_USE,      "NOTIFY_IN_USE" },
+	{ NV03_PGRAPH_NSOURCE_METHOD_CNT,         "METHOD_CNT" },
+	{ NV03_PGRAPH_NSOURCE_BFR_NOTIFICATION,   "BFR_NOTIFICATION" },
+	{ NV03_PGRAPH_NSOURCE_DMA_VTX_PROTECTION, "DMA_VTX_PROTECTION" },
+	{ NV03_PGRAPH_NSOURCE_DMA_WIDTH_A,        "DMA_WIDTH_A" },
+	{ NV03_PGRAPH_NSOURCE_DMA_WIDTH_B,        "DMA_WIDTH_B" },
+	{}
+};
+
+static void
+nv04_graph_intr(struct nouveau_subdev *subdev)
+{
+	struct nv04_graph_priv *priv = (void *)subdev;
+	struct nv04_graph_chan *chan = NULL;
+	struct nouveau_namedb *namedb = NULL;
+	struct nouveau_handle *handle = NULL;
+	u32 stat = nv_rd32(priv, NV03_PGRAPH_INTR);
+	u32 nsource = nv_rd32(priv, NV03_PGRAPH_NSOURCE);
+	u32 nstatus = nv_rd32(priv, NV03_PGRAPH_NSTATUS);
+	u32 addr = nv_rd32(priv, NV04_PGRAPH_TRAPPED_ADDR);
+	u32 chid = (addr & 0x0f000000) >> 24;
+	u32 subc = (addr & 0x0000e000) >> 13;
+	u32 mthd = (addr & 0x00001ffc);
+	u32 data = nv_rd32(priv, NV04_PGRAPH_TRAPPED_DATA);
+	u32 class = nv_rd32(priv, 0x400180 + subc * 4) & 0xff;
+	u32 inst = (nv_rd32(priv, 0x40016c) & 0xffff) << 4;
+	u32 show = stat;
+	unsigned long flags;
+
+	spin_lock_irqsave(&priv->lock, flags);
+	chan = priv->chan[chid];
+	if (chan)
+		namedb = (void *)nv_pclass(nv_object(chan), NV_NAMEDB_CLASS);
+	spin_unlock_irqrestore(&priv->lock, flags);
+
+	if (stat & NV_PGRAPH_INTR_NOTIFY) {
+		if (chan && (nsource & NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD)) {
+			handle = nouveau_namedb_get_vinst(namedb, inst);
+			if (handle && !nv_call(handle->object, mthd, data))
+				show &= ~NV_PGRAPH_INTR_NOTIFY;
+		}
+	}
+
+	if (stat & NV_PGRAPH_INTR_CONTEXT_SWITCH) {
+		nv_wr32(priv, NV03_PGRAPH_INTR, NV_PGRAPH_INTR_CONTEXT_SWITCH);
+		stat &= ~NV_PGRAPH_INTR_CONTEXT_SWITCH;
+		show &= ~NV_PGRAPH_INTR_CONTEXT_SWITCH;
+		nv04_graph_context_switch(priv);
+	}
+
+	nv_wr32(priv, NV03_PGRAPH_INTR, stat);
+	nv_wr32(priv, NV04_PGRAPH_FIFO, 0x00000001);
+
+	if (show) {
+		nv_error(priv, "");
+		nouveau_bitfield_print(nv04_graph_intr_name, show);
+		printk(" nsource:");
+		nouveau_bitfield_print(nv04_graph_nsource, nsource);
+		printk(" nstatus:");
+		nouveau_bitfield_print(nv04_graph_nstatus, nstatus);
+		printk("\n");
+		nv_error(priv, "ch %d/%d class 0x%04x "
+			       "mthd 0x%04x data 0x%08x\n",
+			 chid, subc, class, mthd, data);
+	}
+
+	nouveau_namedb_put(handle);
+}
+
+static int
+nv04_graph_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+		struct nouveau_oclass *oclass, void *data, u32 size,
+		struct nouveau_object **pobject)
+{
+	struct nv04_graph_priv *priv;
+	int ret;
+
+	ret = nouveau_graph_create(parent, engine, oclass, true, &priv);
+	*pobject = nv_object(priv);
+	if (ret)
+		return ret;
+
+	nv_subdev(priv)->unit = 0x00001000;
+	nv_subdev(priv)->intr = nv04_graph_intr;
+	nv_engine(priv)->cclass = &nv04_graph_cclass;
+	nv_engine(priv)->sclass = nv04_graph_sclass;
+	spin_lock_init(&priv->lock);
+	return 0;
+}
+
+static int
+nv04_graph_init(struct nouveau_object *object)
+{
+	struct nouveau_engine *engine = nv_engine(object);
+	struct nv04_graph_priv *priv = (void *)engine;
+	int ret;
+
+	ret = nouveau_graph_init(&priv->base);
+	if (ret)
+		return ret;
+
+	/* Enable PGRAPH interrupts */
+	nv_wr32(priv, NV03_PGRAPH_INTR, 0xFFFFFFFF);
+	nv_wr32(priv, NV03_PGRAPH_INTR_EN, 0xFFFFFFFF);
+
+	nv_wr32(priv, NV04_PGRAPH_VALID1, 0);
+	nv_wr32(priv, NV04_PGRAPH_VALID2, 0);
+	/*nv_wr32(priv, NV04_PGRAPH_DEBUG_0, 0x000001FF);
+	nv_wr32(priv, NV04_PGRAPH_DEBUG_0, 0x001FFFFF);*/
+	nv_wr32(priv, NV04_PGRAPH_DEBUG_0, 0x1231c000);
+	/*1231C000 blob, 001 haiku*/
+	/*V_WRITE(NV04_PGRAPH_DEBUG_1, 0xf2d91100);*/
+	nv_wr32(priv, NV04_PGRAPH_DEBUG_1, 0x72111100);
+	/*0x72111100 blob , 01 haiku*/
+	/*nv_wr32(priv, NV04_PGRAPH_DEBUG_2, 0x11d5f870);*/
+	nv_wr32(priv, NV04_PGRAPH_DEBUG_2, 0x11d5f071);
+	/*haiku same*/
+
+	/*nv_wr32(priv, NV04_PGRAPH_DEBUG_3, 0xfad4ff31);*/
+	nv_wr32(priv, NV04_PGRAPH_DEBUG_3, 0xf0d4ff31);
+	/*haiku and blob 10d4*/
+
+	nv_wr32(priv, NV04_PGRAPH_STATE        , 0xFFFFFFFF);
+	nv_wr32(priv, NV04_PGRAPH_CTX_CONTROL  , 0x10000100);
+	nv_mask(priv, NV04_PGRAPH_CTX_USER, 0xff000000, 0x0f000000);
+
+	/* These don't belong here, they're part of a per-channel context */
+	nv_wr32(priv, NV04_PGRAPH_PATTERN_SHAPE, 0x00000000);
+	nv_wr32(priv, NV04_PGRAPH_BETA_AND     , 0xFFFFFFFF);
+	return 0;
+}
+
+struct nouveau_oclass
+nv04_graph_oclass = {
+	.handle = NV_ENGINE(GR, 0x04),
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nv04_graph_ctor,
+		.dtor = _nouveau_graph_dtor,
+		.init = nv04_graph_init,
+		.fini = _nouveau_graph_fini,
+	},
+};
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nv10.c b/drivers/gpu/drm/nouveau/core/engine/graph/nv10.c
new file mode 100644
index 000000000000..92521c89e77f
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nv10.c
@@ -0,0 +1,1314 @@
+/*
+ * Copyright 2007 Matthieu CASTET <castet.matthieu@free.fr>
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#include <core/os.h>
+#include <core/class.h>
+#include <core/handle.h>
+
+#include <subdev/fb.h>
+
+#include <engine/fifo.h>
+#include <engine/graph.h>
+
+#include "regs.h"
+
+struct pipe_state {
+	u32 pipe_0x0000[0x040/4];
+	u32 pipe_0x0040[0x010/4];
+	u32 pipe_0x0200[0x0c0/4];
+	u32 pipe_0x4400[0x080/4];
+	u32 pipe_0x6400[0x3b0/4];
+	u32 pipe_0x6800[0x2f0/4];
+	u32 pipe_0x6c00[0x030/4];
+	u32 pipe_0x7000[0x130/4];
+	u32 pipe_0x7400[0x0c0/4];
+	u32 pipe_0x7800[0x0c0/4];
+};
+
+static int nv10_graph_ctx_regs[] = {
+	NV10_PGRAPH_CTX_SWITCH(0),
+	NV10_PGRAPH_CTX_SWITCH(1),
+	NV10_PGRAPH_CTX_SWITCH(2),
+	NV10_PGRAPH_CTX_SWITCH(3),
+	NV10_PGRAPH_CTX_SWITCH(4),
+	NV10_PGRAPH_CTX_CACHE(0, 0),
+	NV10_PGRAPH_CTX_CACHE(0, 1),
+	NV10_PGRAPH_CTX_CACHE(0, 2),
+	NV10_PGRAPH_CTX_CACHE(0, 3),
+	NV10_PGRAPH_CTX_CACHE(0, 4),
+	NV10_PGRAPH_CTX_CACHE(1, 0),
+	NV10_PGRAPH_CTX_CACHE(1, 1),
+	NV10_PGRAPH_CTX_CACHE(1, 2),
+	NV10_PGRAPH_CTX_CACHE(1, 3),
+	NV10_PGRAPH_CTX_CACHE(1, 4),
+	NV10_PGRAPH_CTX_CACHE(2, 0),
+	NV10_PGRAPH_CTX_CACHE(2, 1),
+	NV10_PGRAPH_CTX_CACHE(2, 2),
+	NV10_PGRAPH_CTX_CACHE(2, 3),
+	NV10_PGRAPH_CTX_CACHE(2, 4),
+	NV10_PGRAPH_CTX_CACHE(3, 0),
+	NV10_PGRAPH_CTX_CACHE(3, 1),
+	NV10_PGRAPH_CTX_CACHE(3, 2),
+	NV10_PGRAPH_CTX_CACHE(3, 3),
+	NV10_PGRAPH_CTX_CACHE(3, 4),
+	NV10_PGRAPH_CTX_CACHE(4, 0),
+	NV10_PGRAPH_CTX_CACHE(4, 1),
+	NV10_PGRAPH_CTX_CACHE(4, 2),
+	NV10_PGRAPH_CTX_CACHE(4, 3),
+	NV10_PGRAPH_CTX_CACHE(4, 4),
+	NV10_PGRAPH_CTX_CACHE(5, 0),
+	NV10_PGRAPH_CTX_CACHE(5, 1),
+	NV10_PGRAPH_CTX_CACHE(5, 2),
+	NV10_PGRAPH_CTX_CACHE(5, 3),
+	NV10_PGRAPH_CTX_CACHE(5, 4),
+	NV10_PGRAPH_CTX_CACHE(6, 0),
+	NV10_PGRAPH_CTX_CACHE(6, 1),
+	NV10_PGRAPH_CTX_CACHE(6, 2),
+	NV10_PGRAPH_CTX_CACHE(6, 3),
+	NV10_PGRAPH_CTX_CACHE(6, 4),
+	NV10_PGRAPH_CTX_CACHE(7, 0),
+	NV10_PGRAPH_CTX_CACHE(7, 1),
+	NV10_PGRAPH_CTX_CACHE(7, 2),
+	NV10_PGRAPH_CTX_CACHE(7, 3),
+	NV10_PGRAPH_CTX_CACHE(7, 4),
+	NV10_PGRAPH_CTX_USER,
+	NV04_PGRAPH_DMA_START_0,
+	NV04_PGRAPH_DMA_START_1,
+	NV04_PGRAPH_DMA_LENGTH,
+	NV04_PGRAPH_DMA_MISC,
+	NV10_PGRAPH_DMA_PITCH,
+	NV04_PGRAPH_BOFFSET0,
+	NV04_PGRAPH_BBASE0,
+	NV04_PGRAPH_BLIMIT0,
+	NV04_PGRAPH_BOFFSET1,
+	NV04_PGRAPH_BBASE1,
+	NV04_PGRAPH_BLIMIT1,
+	NV04_PGRAPH_BOFFSET2,
+	NV04_PGRAPH_BBASE2,
+	NV04_PGRAPH_BLIMIT2,
+	NV04_PGRAPH_BOFFSET3,
+	NV04_PGRAPH_BBASE3,
+	NV04_PGRAPH_BLIMIT3,
+	NV04_PGRAPH_BOFFSET4,
+	NV04_PGRAPH_BBASE4,
+	NV04_PGRAPH_BLIMIT4,
+	NV04_PGRAPH_BOFFSET5,
+	NV04_PGRAPH_BBASE5,
+	NV04_PGRAPH_BLIMIT5,
+	NV04_PGRAPH_BPITCH0,
+	NV04_PGRAPH_BPITCH1,
+	NV04_PGRAPH_BPITCH2,
+	NV04_PGRAPH_BPITCH3,
+	NV04_PGRAPH_BPITCH4,
+	NV10_PGRAPH_SURFACE,
+	NV10_PGRAPH_STATE,
+	NV04_PGRAPH_BSWIZZLE2,
+	NV04_PGRAPH_BSWIZZLE5,
+	NV04_PGRAPH_BPIXEL,
+	NV10_PGRAPH_NOTIFY,
+	NV04_PGRAPH_PATT_COLOR0,
+	NV04_PGRAPH_PATT_COLOR1,
+	NV04_PGRAPH_PATT_COLORRAM, /* 64 values from 0x400900 to 0x4009fc */
+	0x00400904,
+	0x00400908,
+	0x0040090c,
+	0x00400910,
+	0x00400914,
+	0x00400918,
+	0x0040091c,
+	0x00400920,
+	0x00400924,
+	0x00400928,
+	0x0040092c,
+	0x00400930,
+	0x00400934,
+	0x00400938,
+	0x0040093c,
+	0x00400940,
+	0x00400944,
+	0x00400948,
+	0x0040094c,
+	0x00400950,
+	0x00400954,
+	0x00400958,
+	0x0040095c,
+	0x00400960,
+	0x00400964,
+	0x00400968,
+	0x0040096c,
+	0x00400970,
+	0x00400974,
+	0x00400978,
+	0x0040097c,
+	0x00400980,
+	0x00400984,
+	0x00400988,
+	0x0040098c,
+	0x00400990,
+	0x00400994,
+	0x00400998,
+	0x0040099c,
+	0x004009a0,
+	0x004009a4,
+	0x004009a8,
+	0x004009ac,
+	0x004009b0,
+	0x004009b4,
+	0x004009b8,
+	0x004009bc,
+	0x004009c0,
+	0x004009c4,
+	0x004009c8,
+	0x004009cc,
+	0x004009d0,
+	0x004009d4,
+	0x004009d8,
+	0x004009dc,
+	0x004009e0,
+	0x004009e4,
+	0x004009e8,
+	0x004009ec,
+	0x004009f0,
+	0x004009f4,
+	0x004009f8,
+	0x004009fc,
+	NV04_PGRAPH_PATTERN,	/* 2 values from 0x400808 to 0x40080c */
+	0x0040080c,
+	NV04_PGRAPH_PATTERN_SHAPE,
+	NV03_PGRAPH_MONO_COLOR0,
+	NV04_PGRAPH_ROP3,
+	NV04_PGRAPH_CHROMA,
+	NV04_PGRAPH_BETA_AND,
+	NV04_PGRAPH_BETA_PREMULT,
+	0x00400e70,
+	0x00400e74,
+	0x00400e78,
+	0x00400e7c,
+	0x00400e80,
+	0x00400e84,
+	0x00400e88,
+	0x00400e8c,
+	0x00400ea0,
+	0x00400ea4,
+	0x00400ea8,
+	0x00400e90,
+	0x00400e94,
+	0x00400e98,
+	0x00400e9c,
+	NV10_PGRAPH_WINDOWCLIP_HORIZONTAL, /* 8 values from 0x400f00-0x400f1c */
+	NV10_PGRAPH_WINDOWCLIP_VERTICAL,   /* 8 values from 0x400f20-0x400f3c */
+	0x00400f04,
+	0x00400f24,
+	0x00400f08,
+	0x00400f28,
+	0x00400f0c,
+	0x00400f2c,
+	0x00400f10,
+	0x00400f30,
+	0x00400f14,
+	0x00400f34,
+	0x00400f18,
+	0x00400f38,
+	0x00400f1c,
+	0x00400f3c,
+	NV10_PGRAPH_XFMODE0,
+	NV10_PGRAPH_XFMODE1,
+	NV10_PGRAPH_GLOBALSTATE0,
+	NV10_PGRAPH_GLOBALSTATE1,
+	NV04_PGRAPH_STORED_FMT,
+	NV04_PGRAPH_SOURCE_COLOR,
+	NV03_PGRAPH_ABS_X_RAM,	/* 32 values from 0x400400 to 0x40047c */
+	NV03_PGRAPH_ABS_Y_RAM,	/* 32 values from 0x400480 to 0x4004fc */
+	0x00400404,
+	0x00400484,
+	0x00400408,
+	0x00400488,
+	0x0040040c,
+	0x0040048c,
+	0x00400410,
+	0x00400490,
+	0x00400414,
+	0x00400494,
+	0x00400418,
+	0x00400498,
+	0x0040041c,
+	0x0040049c,
+	0x00400420,
+	0x004004a0,
+	0x00400424,
+	0x004004a4,
+	0x00400428,
+	0x004004a8,
+	0x0040042c,
+	0x004004ac,
+	0x00400430,
+	0x004004b0,
+	0x00400434,
+	0x004004b4,
+	0x00400438,
+	0x004004b8,
+	0x0040043c,
+	0x004004bc,
+	0x00400440,
+	0x004004c0,
+	0x00400444,
+	0x004004c4,
+	0x00400448,
+	0x004004c8,
+	0x0040044c,
+	0x004004cc,
+	0x00400450,
+	0x004004d0,
+	0x00400454,
+	0x004004d4,
+	0x00400458,
+	0x004004d8,
+	0x0040045c,
+	0x004004dc,
+	0x00400460,
+	0x004004e0,
+	0x00400464,
+	0x004004e4,
+	0x00400468,
+	0x004004e8,
+	0x0040046c,
+	0x004004ec,
+	0x00400470,
+	0x004004f0,
+	0x00400474,
+	0x004004f4,
+	0x00400478,
+	0x004004f8,
+	0x0040047c,
+	0x004004fc,
+	NV03_PGRAPH_ABS_UCLIP_XMIN,
+	NV03_PGRAPH_ABS_UCLIP_XMAX,
+	NV03_PGRAPH_ABS_UCLIP_YMIN,
+	NV03_PGRAPH_ABS_UCLIP_YMAX,
+	0x00400550,
+	0x00400558,
+	0x00400554,
+	0x0040055c,
+	NV03_PGRAPH_ABS_UCLIPA_XMIN,
+	NV03_PGRAPH_ABS_UCLIPA_XMAX,
+	NV03_PGRAPH_ABS_UCLIPA_YMIN,
+	NV03_PGRAPH_ABS_UCLIPA_YMAX,
+	NV03_PGRAPH_ABS_ICLIP_XMAX,
+	NV03_PGRAPH_ABS_ICLIP_YMAX,
+	NV03_PGRAPH_XY_LOGIC_MISC0,
+	NV03_PGRAPH_XY_LOGIC_MISC1,
+	NV03_PGRAPH_XY_LOGIC_MISC2,
+	NV03_PGRAPH_XY_LOGIC_MISC3,
+	NV03_PGRAPH_CLIPX_0,
+	NV03_PGRAPH_CLIPX_1,
+	NV03_PGRAPH_CLIPY_0,
+	NV03_PGRAPH_CLIPY_1,
+	NV10_PGRAPH_COMBINER0_IN_ALPHA,
+	NV10_PGRAPH_COMBINER1_IN_ALPHA,
+	NV10_PGRAPH_COMBINER0_IN_RGB,
+	NV10_PGRAPH_COMBINER1_IN_RGB,
+	NV10_PGRAPH_COMBINER_COLOR0,
+	NV10_PGRAPH_COMBINER_COLOR1,
+	NV10_PGRAPH_COMBINER0_OUT_ALPHA,
+	NV10_PGRAPH_COMBINER1_OUT_ALPHA,
+	NV10_PGRAPH_COMBINER0_OUT_RGB,
+	NV10_PGRAPH_COMBINER1_OUT_RGB,
+	NV10_PGRAPH_COMBINER_FINAL0,
+	NV10_PGRAPH_COMBINER_FINAL1,
+	0x00400e00,
+	0x00400e04,
+	0x00400e08,
+	0x00400e0c,
+	0x00400e10,
+	0x00400e14,
+	0x00400e18,
+	0x00400e1c,
+	0x00400e20,
+	0x00400e24,
+	0x00400e28,
+	0x00400e2c,
+	0x00400e30,
+	0x00400e34,
+	0x00400e38,
+	0x00400e3c,
+	NV04_PGRAPH_PASSTHRU_0,
+	NV04_PGRAPH_PASSTHRU_1,
+	NV04_PGRAPH_PASSTHRU_2,
+	NV10_PGRAPH_DIMX_TEXTURE,
+	NV10_PGRAPH_WDIMX_TEXTURE,
+	NV10_PGRAPH_DVD_COLORFMT,
+	NV10_PGRAPH_SCALED_FORMAT,
+	NV04_PGRAPH_MISC24_0,
+	NV04_PGRAPH_MISC24_1,
+	NV04_PGRAPH_MISC24_2,
+	NV03_PGRAPH_X_MISC,
+	NV03_PGRAPH_Y_MISC,
+	NV04_PGRAPH_VALID1,
+	NV04_PGRAPH_VALID2,
+};
+
+static int nv17_graph_ctx_regs[] = {
+	NV10_PGRAPH_DEBUG_4,
+	0x004006b0,
+	0x00400eac,
+	0x00400eb0,
+	0x00400eb4,
+	0x00400eb8,
+	0x00400ebc,
+	0x00400ec0,
+	0x00400ec4,
+	0x00400ec8,
+	0x00400ecc,
+	0x00400ed0,
+	0x00400ed4,
+	0x00400ed8,
+	0x00400edc,
+	0x00400ee0,
+	0x00400a00,
+	0x00400a04,
+};
+
+struct nv10_graph_priv {
+	struct nouveau_graph base;
+	struct nv10_graph_chan *chan[32];
+	spinlock_t lock;
+};
+
+struct nv10_graph_chan {
+	struct nouveau_object base;
+	int chid;
+	int nv10[ARRAY_SIZE(nv10_graph_ctx_regs)];
+	int nv17[ARRAY_SIZE(nv17_graph_ctx_regs)];
+	struct pipe_state pipe_state;
+	u32 lma_window[4];
+};
+
+
+static inline struct nv10_graph_priv *
+nv10_graph_priv(struct nv10_graph_chan *chan)
+{
+	return (void *)nv_object(chan)->engine;
+}
+
+/*******************************************************************************
+ * Graphics object classes
+ ******************************************************************************/
+
+#define PIPE_SAVE(priv, state, addr)					\
+	do {								\
+		int __i;						\
+		nv_wr32(priv, NV10_PGRAPH_PIPE_ADDRESS, addr);		\
+		for (__i = 0; __i < ARRAY_SIZE(state); __i++)		\
+			state[__i] = nv_rd32(priv, NV10_PGRAPH_PIPE_DATA); \
+	} while (0)
+
+#define PIPE_RESTORE(priv, state, addr)					\
+	do {								\
+		int __i;						\
+		nv_wr32(priv, NV10_PGRAPH_PIPE_ADDRESS, addr);		\
+		for (__i = 0; __i < ARRAY_SIZE(state); __i++)		\
+			nv_wr32(priv, NV10_PGRAPH_PIPE_DATA, state[__i]); \
+	} while (0)
+
+static struct nouveau_oclass
+nv10_graph_sclass[] = {
+	{ 0x0012, &nv04_graph_ofuncs }, /* beta1 */
+	{ 0x0019, &nv04_graph_ofuncs }, /* clip */
+	{ 0x0030, &nv04_graph_ofuncs }, /* null */
+	{ 0x0039, &nv04_graph_ofuncs }, /* m2mf */
+	{ 0x0043, &nv04_graph_ofuncs }, /* rop */
+	{ 0x0044, &nv04_graph_ofuncs }, /* pattern */
+	{ 0x004a, &nv04_graph_ofuncs }, /* gdi */
+	{ 0x0052, &nv04_graph_ofuncs }, /* swzsurf */
+	{ 0x005f, &nv04_graph_ofuncs }, /* blit */
+	{ 0x0062, &nv04_graph_ofuncs }, /* surf2d */
+	{ 0x0072, &nv04_graph_ofuncs }, /* beta4 */
+	{ 0x0089, &nv04_graph_ofuncs }, /* sifm */
+	{ 0x008a, &nv04_graph_ofuncs }, /* ifc */
+	{ 0x009f, &nv04_graph_ofuncs }, /* blit */
+	{ 0x0093, &nv04_graph_ofuncs }, /* surf3d */
+	{ 0x0094, &nv04_graph_ofuncs }, /* ttri */
+	{ 0x0095, &nv04_graph_ofuncs }, /* mtri */
+	{ 0x0056, &nv04_graph_ofuncs }, /* celcius */
+	{},
+};
+
+static struct nouveau_oclass
+nv15_graph_sclass[] = {
+	{ 0x0012, &nv04_graph_ofuncs }, /* beta1 */
+	{ 0x0019, &nv04_graph_ofuncs }, /* clip */
+	{ 0x0030, &nv04_graph_ofuncs }, /* null */
+	{ 0x0039, &nv04_graph_ofuncs }, /* m2mf */
+	{ 0x0043, &nv04_graph_ofuncs }, /* rop */
+	{ 0x0044, &nv04_graph_ofuncs }, /* pattern */
+	{ 0x004a, &nv04_graph_ofuncs }, /* gdi */
+	{ 0x0052, &nv04_graph_ofuncs }, /* swzsurf */
+	{ 0x005f, &nv04_graph_ofuncs }, /* blit */
+	{ 0x0062, &nv04_graph_ofuncs }, /* surf2d */
+	{ 0x0072, &nv04_graph_ofuncs }, /* beta4 */
+	{ 0x0089, &nv04_graph_ofuncs }, /* sifm */
+	{ 0x008a, &nv04_graph_ofuncs }, /* ifc */
+	{ 0x009f, &nv04_graph_ofuncs }, /* blit */
+	{ 0x0093, &nv04_graph_ofuncs }, /* surf3d */
+	{ 0x0094, &nv04_graph_ofuncs }, /* ttri */
+	{ 0x0095, &nv04_graph_ofuncs }, /* mtri */
+	{ 0x0096, &nv04_graph_ofuncs }, /* celcius */
+	{},
+};
+
+static int
+nv17_graph_mthd_lma_window(struct nouveau_object *object, u32 mthd,
+			   void *args, u32 size)
+{
+	struct nv10_graph_chan *chan = (void *)object->parent;
+	struct nv10_graph_priv *priv = nv10_graph_priv(chan);
+	struct pipe_state *pipe = &chan->pipe_state;
+	u32 pipe_0x0040[1], pipe_0x64c0[8], pipe_0x6a80[3], pipe_0x6ab0[3];
+	u32 xfmode0, xfmode1;
+	u32 data = *(u32 *)args;
+	int i;
+
+	chan->lma_window[(mthd - 0x1638) / 4] = data;
+
+	if (mthd != 0x1644)
+		return 0;
+
+	nv04_graph_idle(priv);
+
+	PIPE_SAVE(priv, pipe_0x0040, 0x0040);
+	PIPE_SAVE(priv, pipe->pipe_0x0200, 0x0200);
+
+	PIPE_RESTORE(priv, chan->lma_window, 0x6790);
+
+	nv04_graph_idle(priv);
+
+	xfmode0 = nv_rd32(priv, NV10_PGRAPH_XFMODE0);
+	xfmode1 = nv_rd32(priv, NV10_PGRAPH_XFMODE1);
+
+	PIPE_SAVE(priv, pipe->pipe_0x4400, 0x4400);
+	PIPE_SAVE(priv, pipe_0x64c0, 0x64c0);
+	PIPE_SAVE(priv, pipe_0x6ab0, 0x6ab0);
+	PIPE_SAVE(priv, pipe_0x6a80, 0x6a80);
+
+	nv04_graph_idle(priv);
+
+	nv_wr32(priv, NV10_PGRAPH_XFMODE0, 0x10000000);
+	nv_wr32(priv, NV10_PGRAPH_XFMODE1, 0x00000000);
+	nv_wr32(priv, NV10_PGRAPH_PIPE_ADDRESS, 0x000064c0);
+	for (i = 0; i < 4; i++)
+		nv_wr32(priv, NV10_PGRAPH_PIPE_DATA, 0x3f800000);
+	for (i = 0; i < 4; i++)
+		nv_wr32(priv, NV10_PGRAPH_PIPE_DATA, 0x00000000);
+
+	nv_wr32(priv, NV10_PGRAPH_PIPE_ADDRESS, 0x00006ab0);
+	for (i = 0; i < 3; i++)
+		nv_wr32(priv, NV10_PGRAPH_PIPE_DATA, 0x3f800000);
+
+	nv_wr32(priv, NV10_PGRAPH_PIPE_ADDRESS, 0x00006a80);
+	for (i = 0; i < 3; i++)
+		nv_wr32(priv, NV10_PGRAPH_PIPE_DATA, 0x00000000);
+
+	nv_wr32(priv, NV10_PGRAPH_PIPE_ADDRESS, 0x00000040);
+	nv_wr32(priv, NV10_PGRAPH_PIPE_DATA, 0x00000008);
+
+	PIPE_RESTORE(priv, pipe->pipe_0x0200, 0x0200);
+
+	nv04_graph_idle(priv);
+
+	PIPE_RESTORE(priv, pipe_0x0040, 0x0040);
+
+	nv_wr32(priv, NV10_PGRAPH_XFMODE0, xfmode0);
+	nv_wr32(priv, NV10_PGRAPH_XFMODE1, xfmode1);
+
+	PIPE_RESTORE(priv, pipe_0x64c0, 0x64c0);
+	PIPE_RESTORE(priv, pipe_0x6ab0, 0x6ab0);
+	PIPE_RESTORE(priv, pipe_0x6a80, 0x6a80);
+	PIPE_RESTORE(priv, pipe->pipe_0x4400, 0x4400);
+
+	nv_wr32(priv, NV10_PGRAPH_PIPE_ADDRESS, 0x000000c0);
+	nv_wr32(priv, NV10_PGRAPH_PIPE_DATA, 0x00000000);
+
+	nv04_graph_idle(priv);
+
+	return 0;
+}
+
+static int
+nv17_graph_mthd_lma_enable(struct nouveau_object *object, u32 mthd,
+			   void *args, u32 size)
+{
+	struct nv10_graph_chan *chan = (void *)object->parent;
+	struct nv10_graph_priv *priv = nv10_graph_priv(chan);
+
+	nv04_graph_idle(priv);
+
+	nv_mask(priv, NV10_PGRAPH_DEBUG_4, 0x00000100, 0x00000100);
+	nv_mask(priv, 0x4006b0, 0x08000000, 0x08000000);
+	return 0;
+}
+
+static struct nouveau_omthds
+nv17_celcius_omthds[] = {
+	{ 0x1638, nv17_graph_mthd_lma_window },
+	{ 0x163c, nv17_graph_mthd_lma_window },
+	{ 0x1640, nv17_graph_mthd_lma_window },
+	{ 0x1644, nv17_graph_mthd_lma_window },
+	{ 0x1658, nv17_graph_mthd_lma_enable },
+	{}
+};
+
+static struct nouveau_oclass
+nv17_graph_sclass[] = {
+	{ 0x0012, &nv04_graph_ofuncs }, /* beta1 */
+	{ 0x0019, &nv04_graph_ofuncs }, /* clip */
+	{ 0x0030, &nv04_graph_ofuncs }, /* null */
+	{ 0x0039, &nv04_graph_ofuncs }, /* m2mf */
+	{ 0x0043, &nv04_graph_ofuncs }, /* rop */
+	{ 0x0044, &nv04_graph_ofuncs }, /* pattern */
+	{ 0x004a, &nv04_graph_ofuncs }, /* gdi */
+	{ 0x0052, &nv04_graph_ofuncs }, /* swzsurf */
+	{ 0x005f, &nv04_graph_ofuncs }, /* blit */
+	{ 0x0062, &nv04_graph_ofuncs }, /* surf2d */
+	{ 0x0072, &nv04_graph_ofuncs }, /* beta4 */
+	{ 0x0089, &nv04_graph_ofuncs }, /* sifm */
+	{ 0x008a, &nv04_graph_ofuncs }, /* ifc */
+	{ 0x009f, &nv04_graph_ofuncs }, /* blit */
+	{ 0x0093, &nv04_graph_ofuncs }, /* surf3d */
+	{ 0x0094, &nv04_graph_ofuncs }, /* ttri */
+	{ 0x0095, &nv04_graph_ofuncs }, /* mtri */
+	{ 0x0099, &nv04_graph_ofuncs, nv17_celcius_omthds },
+	{},
+};
+
+/*******************************************************************************
+ * PGRAPH context
+ ******************************************************************************/
+
+static struct nv10_graph_chan *
+nv10_graph_channel(struct nv10_graph_priv *priv)
+{
+	struct nv10_graph_chan *chan = NULL;
+	if (nv_rd32(priv, 0x400144) & 0x00010000) {
+		int chid = nv_rd32(priv, 0x400148) >> 24;
+		if (chid < ARRAY_SIZE(priv->chan))
+			chan = priv->chan[chid];
+	}
+	return chan;
+}
+
+static void
+nv10_graph_save_pipe(struct nv10_graph_chan *chan)
+{
+	struct nv10_graph_priv *priv = nv10_graph_priv(chan);
+	struct pipe_state *pipe = &chan->pipe_state;
+
+	PIPE_SAVE(priv, pipe->pipe_0x4400, 0x4400);
+	PIPE_SAVE(priv, pipe->pipe_0x0200, 0x0200);
+	PIPE_SAVE(priv, pipe->pipe_0x6400, 0x6400);
+	PIPE_SAVE(priv, pipe->pipe_0x6800, 0x6800);
+	PIPE_SAVE(priv, pipe->pipe_0x6c00, 0x6c00);
+	PIPE_SAVE(priv, pipe->pipe_0x7000, 0x7000);
+	PIPE_SAVE(priv, pipe->pipe_0x7400, 0x7400);
+	PIPE_SAVE(priv, pipe->pipe_0x7800, 0x7800);
+	PIPE_SAVE(priv, pipe->pipe_0x0040, 0x0040);
+	PIPE_SAVE(priv, pipe->pipe_0x0000, 0x0000);
+}
+
+static void
+nv10_graph_load_pipe(struct nv10_graph_chan *chan)
+{
+	struct nv10_graph_priv *priv = nv10_graph_priv(chan);
+	struct pipe_state *pipe = &chan->pipe_state;
+	u32 xfmode0, xfmode1;
+	int i;
+
+	nv04_graph_idle(priv);
+	/* XXX check haiku comments */
+	xfmode0 = nv_rd32(priv, NV10_PGRAPH_XFMODE0);
+	xfmode1 = nv_rd32(priv, NV10_PGRAPH_XFMODE1);
+	nv_wr32(priv, NV10_PGRAPH_XFMODE0, 0x10000000);
+	nv_wr32(priv, NV10_PGRAPH_XFMODE1, 0x00000000);
+	nv_wr32(priv, NV10_PGRAPH_PIPE_ADDRESS, 0x000064c0);
+	for (i = 0; i < 4; i++)
+		nv_wr32(priv, NV10_PGRAPH_PIPE_DATA, 0x3f800000);
+	for (i = 0; i < 4; i++)
+		nv_wr32(priv, NV10_PGRAPH_PIPE_DATA, 0x00000000);
+
+	nv_wr32(priv, NV10_PGRAPH_PIPE_ADDRESS, 0x00006ab0);
+	for (i = 0; i < 3; i++)
+		nv_wr32(priv, NV10_PGRAPH_PIPE_DATA, 0x3f800000);
+
+	nv_wr32(priv, NV10_PGRAPH_PIPE_ADDRESS, 0x00006a80);
+	for (i = 0; i < 3; i++)
+		nv_wr32(priv, NV10_PGRAPH_PIPE_DATA, 0x00000000);
+
+	nv_wr32(priv, NV10_PGRAPH_PIPE_ADDRESS, 0x00000040);
+	nv_wr32(priv, NV10_PGRAPH_PIPE_DATA, 0x00000008);
+
+
+	PIPE_RESTORE(priv, pipe->pipe_0x0200, 0x0200);
+	nv04_graph_idle(priv);
+
+	/* restore XFMODE */
+	nv_wr32(priv, NV10_PGRAPH_XFMODE0, xfmode0);
+	nv_wr32(priv, NV10_PGRAPH_XFMODE1, xfmode1);
+	PIPE_RESTORE(priv, pipe->pipe_0x6400, 0x6400);
+	PIPE_RESTORE(priv, pipe->pipe_0x6800, 0x6800);
+	PIPE_RESTORE(priv, pipe->pipe_0x6c00, 0x6c00);
+	PIPE_RESTORE(priv, pipe->pipe_0x7000, 0x7000);
+	PIPE_RESTORE(priv, pipe->pipe_0x7400, 0x7400);
+	PIPE_RESTORE(priv, pipe->pipe_0x7800, 0x7800);
+	PIPE_RESTORE(priv, pipe->pipe_0x4400, 0x4400);
+	PIPE_RESTORE(priv, pipe->pipe_0x0000, 0x0000);
+	PIPE_RESTORE(priv, pipe->pipe_0x0040, 0x0040);
+	nv04_graph_idle(priv);
+}
+
+static void
+nv10_graph_create_pipe(struct nv10_graph_chan *chan)
+{
+	struct nv10_graph_priv *priv = nv10_graph_priv(chan);
+	struct pipe_state *pipe_state = &chan->pipe_state;
+	u32 *pipe_state_addr;
+	int i;
+#define PIPE_INIT(addr) \
+	do { \
+		pipe_state_addr = pipe_state->pipe_##addr; \
+	} while (0)
+#define PIPE_INIT_END(addr) \
+	do { \
+		u32 *__end_addr = pipe_state->pipe_##addr + \
+				ARRAY_SIZE(pipe_state->pipe_##addr); \
+		if (pipe_state_addr != __end_addr) \
+			nv_error(priv, "incomplete pipe init for 0x%x :  %p/%p\n", \
+				addr, pipe_state_addr, __end_addr); \
+	} while (0)
+#define NV_WRITE_PIPE_INIT(value) *(pipe_state_addr++) = value
+
+	PIPE_INIT(0x0200);
+	for (i = 0; i < 48; i++)
+		NV_WRITE_PIPE_INIT(0x00000000);
+	PIPE_INIT_END(0x0200);
+
+	PIPE_INIT(0x6400);
+	for (i = 0; i < 211; i++)
+		NV_WRITE_PIPE_INIT(0x00000000);
+	NV_WRITE_PIPE_INIT(0x3f800000);
+	NV_WRITE_PIPE_INIT(0x40000000);
+	NV_WRITE_PIPE_INIT(0x40000000);
+	NV_WRITE_PIPE_INIT(0x40000000);
+	NV_WRITE_PIPE_INIT(0x40000000);
+	NV_WRITE_PIPE_INIT(0x00000000);
+	NV_WRITE_PIPE_INIT(0x00000000);
+	NV_WRITE_PIPE_INIT(0x3f800000);
+	NV_WRITE_PIPE_INIT(0x00000000);
+	NV_WRITE_PIPE_INIT(0x3f000000);
+	NV_WRITE_PIPE_INIT(0x3f000000);
+	NV_WRITE_PIPE_INIT(0x00000000);
+	NV_WRITE_PIPE_INIT(0x00000000);
+	NV_WRITE_PIPE_INIT(0x00000000);
+	NV_WRITE_PIPE_INIT(0x00000000);
+	NV_WRITE_PIPE_INIT(0x3f800000);
+	NV_WRITE_PIPE_INIT(0x00000000);
+	NV_WRITE_PIPE_INIT(0x00000000);
+	NV_WRITE_PIPE_INIT(0x00000000);
+	NV_WRITE_PIPE_INIT(0x00000000);
+	NV_WRITE_PIPE_INIT(0x00000000);
+	NV_WRITE_PIPE_INIT(0x3f800000);
+	NV_WRITE_PIPE_INIT(0x3f800000);
+	NV_WRITE_PIPE_INIT(0x3f800000);
+	NV_WRITE_PIPE_INIT(0x3f800000);
+	PIPE_INIT_END(0x6400);
+
+	PIPE_INIT(0x6800);
+	for (i = 0; i < 162; i++)
+		NV_WRITE_PIPE_INIT(0x00000000);
+	NV_WRITE_PIPE_INIT(0x3f800000);
+	for (i = 0; i < 25; i++)
+		NV_WRITE_PIPE_INIT(0x00000000);
+	PIPE_INIT_END(0x6800);
+
+	PIPE_INIT(0x6c00);
+	NV_WRITE_PIPE_INIT(0x00000000);
+	NV_WRITE_PIPE_INIT(0x00000000);
+	NV_WRITE_PIPE_INIT(0x00000000);
+	NV_WRITE_PIPE_INIT(0x00000000);
+	NV_WRITE_PIPE_INIT(0xbf800000);
+	NV_WRITE_PIPE_INIT(0x00000000);
+	NV_WRITE_PIPE_INIT(0x00000000);
+	NV_WRITE_PIPE_INIT(0x00000000);
+	NV_WRITE_PIPE_INIT(0x00000000);
+	NV_WRITE_PIPE_INIT(0x00000000);
+	NV_WRITE_PIPE_INIT(0x00000000);
+	NV_WRITE_PIPE_INIT(0x00000000);
+	PIPE_INIT_END(0x6c00);
+
+	PIPE_INIT(0x7000);
+	NV_WRITE_PIPE_INIT(0x00000000);
+	NV_WRITE_PIPE_INIT(0x00000000);
+	NV_WRITE_PIPE_INIT(0x00000000);
+	NV_WRITE_PIPE_INIT(0x00000000);
+	NV_WRITE_PIPE_INIT(0x00000000);
+	NV_WRITE_PIPE_INIT(0x00000000);
+	NV_WRITE_PIPE_INIT(0x00000000);
+	NV_WRITE_PIPE_INIT(0x00000000);
+	NV_WRITE_PIPE_INIT(0x00000000);
+	NV_WRITE_PIPE_INIT(0x00000000);
+	NV_WRITE_PIPE_INIT(0x00000000);
+	NV_WRITE_PIPE_INIT(0x00000000);
+	NV_WRITE_PIPE_INIT(0x7149f2ca);
+	NV_WRITE_PIPE_INIT(0x00000000);
+	NV_WRITE_PIPE_INIT(0x00000000);
+	NV_WRITE_PIPE_INIT(0x00000000);
+	NV_WRITE_PIPE_INIT(0x7149f2ca);
+	NV_WRITE_PIPE_INIT(0x00000000);
+	NV_WRITE_PIPE_INIT(0x00000000);
+	NV_WRITE_PIPE_INIT(0x00000000);
+	NV_WRITE_PIPE_INIT(0x7149f2ca);
+	NV_WRITE_PIPE_INIT(0x00000000);
+	NV_WRITE_PIPE_INIT(0x00000000);
+	NV_WRITE_PIPE_INIT(0x00000000);
+	NV_WRITE_PIPE_INIT(0x7149f2ca);
+	NV_WRITE_PIPE_INIT(0x00000000);
+	NV_WRITE_PIPE_INIT(0x00000000);
+	NV_WRITE_PIPE_INIT(0x00000000);
+	NV_WRITE_PIPE_INIT(0x7149f2ca);
+	NV_WRITE_PIPE_INIT(0x00000000);
+	NV_WRITE_PIPE_INIT(0x00000000);
+	NV_WRITE_PIPE_INIT(0x00000000);
+	NV_WRITE_PIPE_INIT(0x7149f2ca);
+	NV_WRITE_PIPE_INIT(0x00000000);
+	NV_WRITE_PIPE_INIT(0x00000000);
+	NV_WRITE_PIPE_INIT(0x00000000);
+	NV_WRITE_PIPE_INIT(0x7149f2ca);
+	NV_WRITE_PIPE_INIT(0x00000000);
+	NV_WRITE_PIPE_INIT(0x00000000);
+	NV_WRITE_PIPE_INIT(0x00000000);
+	NV_WRITE_PIPE_INIT(0x7149f2ca);
+	for (i = 0; i < 35; i++)
+		NV_WRITE_PIPE_INIT(0x00000000);
+	PIPE_INIT_END(0x7000);
+
+	PIPE_INIT(0x7400);
+	for (i = 0; i < 48; i++)
+		NV_WRITE_PIPE_INIT(0x00000000);
+	PIPE_INIT_END(0x7400);
+
+	PIPE_INIT(0x7800);
+	for (i = 0; i < 48; i++)
+		NV_WRITE_PIPE_INIT(0x00000000);
+	PIPE_INIT_END(0x7800);
+
+	PIPE_INIT(0x4400);
+	for (i = 0; i < 32; i++)
+		NV_WRITE_PIPE_INIT(0x00000000);
+	PIPE_INIT_END(0x4400);
+
+	PIPE_INIT(0x0000);
+	for (i = 0; i < 16; i++)
+		NV_WRITE_PIPE_INIT(0x00000000);
+	PIPE_INIT_END(0x0000);
+
+	PIPE_INIT(0x0040);
+	for (i = 0; i < 4; i++)
+		NV_WRITE_PIPE_INIT(0x00000000);
+	PIPE_INIT_END(0x0040);
+
+#undef PIPE_INIT
+#undef PIPE_INIT_END
+#undef NV_WRITE_PIPE_INIT
+}
+
+static int
+nv10_graph_ctx_regs_find_offset(struct nv10_graph_priv *priv, int reg)
+{
+	int i;
+	for (i = 0; i < ARRAY_SIZE(nv10_graph_ctx_regs); i++) {
+		if (nv10_graph_ctx_regs[i] == reg)
+			return i;
+	}
+	nv_error(priv, "unknow offset nv10_ctx_regs %d\n", reg);
+	return -1;
+}
+
+static int
+nv17_graph_ctx_regs_find_offset(struct nv10_graph_priv *priv, int reg)
+{
+	int i;
+	for (i = 0; i < ARRAY_SIZE(nv17_graph_ctx_regs); i++) {
+		if (nv17_graph_ctx_regs[i] == reg)
+			return i;
+	}
+	nv_error(priv, "unknow offset nv17_ctx_regs %d\n", reg);
+	return -1;
+}
+
+static void
+nv10_graph_load_dma_vtxbuf(struct nv10_graph_chan *chan, int chid, u32 inst)
+{
+	struct nv10_graph_priv *priv = nv10_graph_priv(chan);
+	u32 st2, st2_dl, st2_dh, fifo_ptr, fifo[0x60/4];
+	u32 ctx_user, ctx_switch[5];
+	int i, subchan = -1;
+
+	/* NV10TCL_DMA_VTXBUF (method 0x18c) modifies hidden state
+	 * that cannot be restored via MMIO. Do it through the FIFO
+	 * instead.
+	 */
+
+	/* Look for a celsius object */
+	for (i = 0; i < 8; i++) {
+		int class = nv_rd32(priv, NV10_PGRAPH_CTX_CACHE(i, 0)) & 0xfff;
+
+		if (class == 0x56 || class == 0x96 || class == 0x99) {
+			subchan = i;
+			break;
+		}
+	}
+
+	if (subchan < 0 || !inst)
+		return;
+
+	/* Save the current ctx object */
+	ctx_user = nv_rd32(priv, NV10_PGRAPH_CTX_USER);
+	for (i = 0; i < 5; i++)
+		ctx_switch[i] = nv_rd32(priv, NV10_PGRAPH_CTX_SWITCH(i));
+
+	/* Save the FIFO state */
+	st2 = nv_rd32(priv, NV10_PGRAPH_FFINTFC_ST2);
+	st2_dl = nv_rd32(priv, NV10_PGRAPH_FFINTFC_ST2_DL);
+	st2_dh = nv_rd32(priv, NV10_PGRAPH_FFINTFC_ST2_DH);
+	fifo_ptr = nv_rd32(priv, NV10_PGRAPH_FFINTFC_FIFO_PTR);
+
+	for (i = 0; i < ARRAY_SIZE(fifo); i++)
+		fifo[i] = nv_rd32(priv, 0x4007a0 + 4 * i);
+
+	/* Switch to the celsius subchannel */
+	for (i = 0; i < 5; i++)
+		nv_wr32(priv, NV10_PGRAPH_CTX_SWITCH(i),
+			nv_rd32(priv, NV10_PGRAPH_CTX_CACHE(subchan, i)));
+	nv_mask(priv, NV10_PGRAPH_CTX_USER, 0xe000, subchan << 13);
+
+	/* Inject NV10TCL_DMA_VTXBUF */
+	nv_wr32(priv, NV10_PGRAPH_FFINTFC_FIFO_PTR, 0);
+	nv_wr32(priv, NV10_PGRAPH_FFINTFC_ST2,
+		0x2c000000 | chid << 20 | subchan << 16 | 0x18c);
+	nv_wr32(priv, NV10_PGRAPH_FFINTFC_ST2_DL, inst);
+	nv_mask(priv, NV10_PGRAPH_CTX_CONTROL, 0, 0x10000);
+	nv_mask(priv, NV04_PGRAPH_FIFO, 0x00000001, 0x00000001);
+	nv_mask(priv, NV04_PGRAPH_FIFO, 0x00000001, 0x00000000);
+
+	/* Restore the FIFO state */
+	for (i = 0; i < ARRAY_SIZE(fifo); i++)
+		nv_wr32(priv, 0x4007a0 + 4 * i, fifo[i]);
+
+	nv_wr32(priv, NV10_PGRAPH_FFINTFC_FIFO_PTR, fifo_ptr);
+	nv_wr32(priv, NV10_PGRAPH_FFINTFC_ST2, st2);
+	nv_wr32(priv, NV10_PGRAPH_FFINTFC_ST2_DL, st2_dl);
+	nv_wr32(priv, NV10_PGRAPH_FFINTFC_ST2_DH, st2_dh);
+
+	/* Restore the current ctx object */
+	for (i = 0; i < 5; i++)
+		nv_wr32(priv, NV10_PGRAPH_CTX_SWITCH(i), ctx_switch[i]);
+	nv_wr32(priv, NV10_PGRAPH_CTX_USER, ctx_user);
+}
+
+static int
+nv10_graph_load_context(struct nv10_graph_chan *chan, int chid)
+{
+	struct nv10_graph_priv *priv = nv10_graph_priv(chan);
+	u32 inst;
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(nv10_graph_ctx_regs); i++)
+		nv_wr32(priv, nv10_graph_ctx_regs[i], chan->nv10[i]);
+
+	if (nv_device(priv)->chipset >= 0x17) {
+		for (i = 0; i < ARRAY_SIZE(nv17_graph_ctx_regs); i++)
+			nv_wr32(priv, nv17_graph_ctx_regs[i], chan->nv17[i]);
+	}
+
+	nv10_graph_load_pipe(chan);
+
+	inst = nv_rd32(priv, NV10_PGRAPH_GLOBALSTATE1) & 0xffff;
+	nv10_graph_load_dma_vtxbuf(chan, chid, inst);
+
+	nv_wr32(priv, NV10_PGRAPH_CTX_CONTROL, 0x10010100);
+	nv_mask(priv, NV10_PGRAPH_CTX_USER, 0xff000000, chid << 24);
+	nv_mask(priv, NV10_PGRAPH_FFINTFC_ST2, 0x30000000, 0x00000000);
+	return 0;
+}
+
+static int
+nv10_graph_unload_context(struct nv10_graph_chan *chan)
+{
+	struct nv10_graph_priv *priv = nv10_graph_priv(chan);
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(nv10_graph_ctx_regs); i++)
+		chan->nv10[i] = nv_rd32(priv, nv10_graph_ctx_regs[i]);
+
+	if (nv_device(priv)->chipset >= 0x17) {
+		for (i = 0; i < ARRAY_SIZE(nv17_graph_ctx_regs); i++)
+			chan->nv17[i] = nv_rd32(priv, nv17_graph_ctx_regs[i]);
+	}
+
+	nv10_graph_save_pipe(chan);
+
+	nv_wr32(priv, NV10_PGRAPH_CTX_CONTROL, 0x10000000);
+	nv_mask(priv, NV10_PGRAPH_CTX_USER, 0xff000000, 0x1f000000);
+	return 0;
+}
+
+static void
+nv10_graph_context_switch(struct nv10_graph_priv *priv)
+{
+	struct nv10_graph_chan *prev = NULL;
+	struct nv10_graph_chan *next = NULL;
+	unsigned long flags;
+	int chid;
+
+	spin_lock_irqsave(&priv->lock, flags);
+	nv04_graph_idle(priv);
+
+	/* If previous context is valid, we need to save it */
+	prev = nv10_graph_channel(priv);
+	if (prev)
+		nv10_graph_unload_context(prev);
+
+	/* load context for next channel */
+	chid = (nv_rd32(priv, NV04_PGRAPH_TRAPPED_ADDR) >> 20) & 0x1f;
+	next = priv->chan[chid];
+	if (next)
+		nv10_graph_load_context(next, chid);
+
+	spin_unlock_irqrestore(&priv->lock, flags);
+}
+
+#define NV_WRITE_CTX(reg, val) do { \
+	int offset = nv10_graph_ctx_regs_find_offset(priv, reg); \
+	if (offset > 0) \
+		chan->nv10[offset] = val; \
+	} while (0)
+
+#define NV17_WRITE_CTX(reg, val) do { \
+	int offset = nv17_graph_ctx_regs_find_offset(priv, reg); \
+	if (offset > 0) \
+		chan->nv17[offset] = val; \
+	} while (0)
+
+static int
+nv10_graph_context_ctor(struct nouveau_object *parent,
+			struct nouveau_object *engine,
+			struct nouveau_oclass *oclass, void *data, u32 size,
+			struct nouveau_object **pobject)
+{
+	struct nouveau_fifo_chan *fifo = (void *)parent;
+	struct nv10_graph_priv *priv = (void *)engine;
+	struct nv10_graph_chan *chan;
+	unsigned long flags;
+	int ret;
+
+	ret = nouveau_object_create(parent, engine, oclass, 0, &chan);
+	*pobject = nv_object(chan);
+	if (ret)
+		return ret;
+
+	spin_lock_irqsave(&priv->lock, flags);
+	if (priv->chan[fifo->chid]) {
+		*pobject = nv_object(priv->chan[fifo->chid]);
+		atomic_inc(&(*pobject)->refcount);
+		spin_unlock_irqrestore(&priv->lock, flags);
+		nouveau_object_destroy(&chan->base);
+		return 1;
+	}
+
+	NV_WRITE_CTX(0x00400e88, 0x08000000);
+	NV_WRITE_CTX(0x00400e9c, 0x4b7fffff);
+	NV_WRITE_CTX(NV03_PGRAPH_XY_LOGIC_MISC0, 0x0001ffff);
+	NV_WRITE_CTX(0x00400e10, 0x00001000);
+	NV_WRITE_CTX(0x00400e14, 0x00001000);
+	NV_WRITE_CTX(0x00400e30, 0x00080008);
+	NV_WRITE_CTX(0x00400e34, 0x00080008);
+	if (nv_device(priv)->chipset >= 0x17) {
+		/* is it really needed ??? */
+		NV17_WRITE_CTX(NV10_PGRAPH_DEBUG_4,
+					nv_rd32(priv, NV10_PGRAPH_DEBUG_4));
+		NV17_WRITE_CTX(0x004006b0, nv_rd32(priv, 0x004006b0));
+		NV17_WRITE_CTX(0x00400eac, 0x0fff0000);
+		NV17_WRITE_CTX(0x00400eb0, 0x0fff0000);
+		NV17_WRITE_CTX(0x00400ec0, 0x00000080);
+		NV17_WRITE_CTX(0x00400ed0, 0x00000080);
+	}
+	NV_WRITE_CTX(NV10_PGRAPH_CTX_USER, chan->chid << 24);
+
+	nv10_graph_create_pipe(chan);
+
+	priv->chan[fifo->chid] = chan;
+	chan->chid = fifo->chid;
+	spin_unlock_irqrestore(&priv->lock, flags);
+	return 0;
+}
+
+static void
+nv10_graph_context_dtor(struct nouveau_object *object)
+{
+	struct nv10_graph_priv *priv = (void *)object->engine;
+	struct nv10_graph_chan *chan = (void *)object;
+	unsigned long flags;
+
+	spin_lock_irqsave(&priv->lock, flags);
+	priv->chan[chan->chid] = NULL;
+	spin_unlock_irqrestore(&priv->lock, flags);
+
+	nouveau_object_destroy(&chan->base);
+}
+
+static int
+nv10_graph_context_fini(struct nouveau_object *object, bool suspend)
+{
+	struct nv10_graph_priv *priv = (void *)object->engine;
+	struct nv10_graph_chan *chan = (void *)object;
+	unsigned long flags;
+
+	spin_lock_irqsave(&priv->lock, flags);
+	nv_mask(priv, NV04_PGRAPH_FIFO, 0x00000001, 0x00000000);
+	if (nv10_graph_channel(priv) == chan)
+		nv10_graph_unload_context(chan);
+	nv_mask(priv, NV04_PGRAPH_FIFO, 0x00000001, 0x00000001);
+	spin_unlock_irqrestore(&priv->lock, flags);
+
+	return nouveau_object_fini(&chan->base, suspend);
+}
+
+static struct nouveau_oclass
+nv10_graph_cclass = {
+	.handle = NV_ENGCTX(GR, 0x10),
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nv10_graph_context_ctor,
+		.dtor = nv10_graph_context_dtor,
+		.init = nouveau_object_init,
+		.fini = nv10_graph_context_fini,
+	},
+};
+
+/*******************************************************************************
+ * PGRAPH engine/subdev functions
+ ******************************************************************************/
+
+static void
+nv10_graph_tile_prog(struct nouveau_engine *engine, int i)
+{
+	struct nouveau_fb_tile *tile = &nouveau_fb(engine)->tile.region[i];
+	struct nouveau_fifo *pfifo = nouveau_fifo(engine);
+	struct nv10_graph_priv *priv = (void *)engine;
+	unsigned long flags;
+
+	pfifo->pause(pfifo, &flags);
+	nv04_graph_idle(priv);
+
+	nv_wr32(priv, NV10_PGRAPH_TLIMIT(i), tile->limit);
+	nv_wr32(priv, NV10_PGRAPH_TSIZE(i), tile->pitch);
+	nv_wr32(priv, NV10_PGRAPH_TILE(i), tile->addr);
+
+	pfifo->start(pfifo, &flags);
+}
+
+const struct nouveau_bitfield nv10_graph_intr_name[] = {
+	{ NV_PGRAPH_INTR_NOTIFY, "NOTIFY" },
+	{ NV_PGRAPH_INTR_ERROR,  "ERROR"  },
+	{}
+};
+
+const struct nouveau_bitfield nv10_graph_nstatus[] = {
+	{ NV10_PGRAPH_NSTATUS_STATE_IN_USE,       "STATE_IN_USE" },
+	{ NV10_PGRAPH_NSTATUS_INVALID_STATE,      "INVALID_STATE" },
+	{ NV10_PGRAPH_NSTATUS_BAD_ARGUMENT,       "BAD_ARGUMENT" },
+	{ NV10_PGRAPH_NSTATUS_PROTECTION_FAULT,   "PROTECTION_FAULT" },
+	{}
+};
+
+static void
+nv10_graph_intr(struct nouveau_subdev *subdev)
+{
+	struct nv10_graph_priv *priv = (void *)subdev;
+	struct nv10_graph_chan *chan = NULL;
+	struct nouveau_namedb *namedb = NULL;
+	struct nouveau_handle *handle = NULL;
+	u32 stat = nv_rd32(priv, NV03_PGRAPH_INTR);
+	u32 nsource = nv_rd32(priv, NV03_PGRAPH_NSOURCE);
+	u32 nstatus = nv_rd32(priv, NV03_PGRAPH_NSTATUS);
+	u32 addr = nv_rd32(priv, NV04_PGRAPH_TRAPPED_ADDR);
+	u32 chid = (addr & 0x01f00000) >> 20;
+	u32 subc = (addr & 0x00070000) >> 16;
+	u32 mthd = (addr & 0x00001ffc);
+	u32 data = nv_rd32(priv, NV04_PGRAPH_TRAPPED_DATA);
+	u32 class = nv_rd32(priv, 0x400160 + subc * 4) & 0xfff;
+	u32 show = stat;
+	unsigned long flags;
+
+	spin_lock_irqsave(&priv->lock, flags);
+	chan = priv->chan[chid];
+	if (chan)
+		namedb = (void *)nv_pclass(nv_object(chan), NV_NAMEDB_CLASS);
+	spin_unlock_irqrestore(&priv->lock, flags);
+
+	if (stat & NV_PGRAPH_INTR_ERROR) {
+		if (chan && (nsource & NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD)) {
+			handle = nouveau_namedb_get_class(namedb, class);
+			if (handle && !nv_call(handle->object, mthd, data))
+				show &= ~NV_PGRAPH_INTR_ERROR;
+		}
+	}
+
+	if (stat & NV_PGRAPH_INTR_CONTEXT_SWITCH) {
+		nv_wr32(priv, NV03_PGRAPH_INTR, NV_PGRAPH_INTR_CONTEXT_SWITCH);
+		stat &= ~NV_PGRAPH_INTR_CONTEXT_SWITCH;
+		show &= ~NV_PGRAPH_INTR_CONTEXT_SWITCH;
+		nv10_graph_context_switch(priv);
+	}
+
+	nv_wr32(priv, NV03_PGRAPH_INTR, stat);
+	nv_wr32(priv, NV04_PGRAPH_FIFO, 0x00000001);
+
+	if (show) {
+		nv_error(priv, "");
+		nouveau_bitfield_print(nv10_graph_intr_name, show);
+		printk(" nsource:");
+		nouveau_bitfield_print(nv04_graph_nsource, nsource);
+		printk(" nstatus:");
+		nouveau_bitfield_print(nv10_graph_nstatus, nstatus);
+		printk("\n");
+		nv_error(priv, "ch %d/%d class 0x%04x "
+			       "mthd 0x%04x data 0x%08x\n",
+			 chid, subc, class, mthd, data);
+	}
+
+	nouveau_namedb_put(handle);
+}
+
+static int
+nv10_graph_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+		struct nouveau_oclass *oclass, void *data, u32 size,
+		struct nouveau_object **pobject)
+{
+	struct nv10_graph_priv *priv;
+	int ret;
+
+	ret = nouveau_graph_create(parent, engine, oclass, true, &priv);
+	*pobject = nv_object(priv);
+	if (ret)
+		return ret;
+
+	nv_subdev(priv)->unit = 0x00001000;
+	nv_subdev(priv)->intr = nv10_graph_intr;
+	nv_engine(priv)->cclass = &nv10_graph_cclass;
+
+	if (nv_device(priv)->chipset <= 0x10)
+		nv_engine(priv)->sclass = nv10_graph_sclass;
+	else
+	if (nv_device(priv)->chipset <  0x17 ||
+	    nv_device(priv)->chipset == 0x1a)
+		nv_engine(priv)->sclass = nv15_graph_sclass;
+	else
+		nv_engine(priv)->sclass = nv17_graph_sclass;
+
+	nv_engine(priv)->tile_prog = nv10_graph_tile_prog;
+	spin_lock_init(&priv->lock);
+	return 0;
+}
+
+static void
+nv10_graph_dtor(struct nouveau_object *object)
+{
+	struct nv10_graph_priv *priv = (void *)object;
+	nouveau_graph_destroy(&priv->base);
+}
+
+static int
+nv10_graph_init(struct nouveau_object *object)
+{
+	struct nouveau_engine *engine = nv_engine(object);
+	struct nouveau_fb *pfb = nouveau_fb(object);
+	struct nv10_graph_priv *priv = (void *)engine;
+	int ret, i;
+
+	ret = nouveau_graph_init(&priv->base);
+	if (ret)
+		return ret;
+
+	nv_wr32(priv, NV03_PGRAPH_INTR   , 0xFFFFFFFF);
+	nv_wr32(priv, NV03_PGRAPH_INTR_EN, 0xFFFFFFFF);
+
+	nv_wr32(priv, NV04_PGRAPH_DEBUG_0, 0xFFFFFFFF);
+	nv_wr32(priv, NV04_PGRAPH_DEBUG_0, 0x00000000);
+	nv_wr32(priv, NV04_PGRAPH_DEBUG_1, 0x00118700);
+	/* nv_wr32(priv, NV04_PGRAPH_DEBUG_2, 0x24E00810); */ /* 0x25f92ad9 */
+	nv_wr32(priv, NV04_PGRAPH_DEBUG_2, 0x25f92ad9);
+	nv_wr32(priv, NV04_PGRAPH_DEBUG_3, 0x55DE0830 | (1 << 29) | (1 << 31));
+
+	if (nv_device(priv)->chipset >= 0x17) {
+		nv_wr32(priv, NV10_PGRAPH_DEBUG_4, 0x1f000000);
+		nv_wr32(priv, 0x400a10, 0x03ff3fb6);
+		nv_wr32(priv, 0x400838, 0x002f8684);
+		nv_wr32(priv, 0x40083c, 0x00115f3f);
+		nv_wr32(priv, 0x4006b0, 0x40000020);
+	} else {
+		nv_wr32(priv, NV10_PGRAPH_DEBUG_4, 0x00000000);
+	}
+
+	/* Turn all the tiling regions off. */
+	for (i = 0; i < pfb->tile.regions; i++)
+		engine->tile_prog(engine, i);
+
+	nv_wr32(priv, NV10_PGRAPH_CTX_SWITCH(0), 0x00000000);
+	nv_wr32(priv, NV10_PGRAPH_CTX_SWITCH(1), 0x00000000);
+	nv_wr32(priv, NV10_PGRAPH_CTX_SWITCH(2), 0x00000000);
+	nv_wr32(priv, NV10_PGRAPH_CTX_SWITCH(3), 0x00000000);
+	nv_wr32(priv, NV10_PGRAPH_CTX_SWITCH(4), 0x00000000);
+	nv_wr32(priv, NV10_PGRAPH_STATE, 0xFFFFFFFF);
+
+	nv_mask(priv, NV10_PGRAPH_CTX_USER, 0xff000000, 0x1f000000);
+	nv_wr32(priv, NV10_PGRAPH_CTX_CONTROL, 0x10000100);
+	nv_wr32(priv, NV10_PGRAPH_FFINTFC_ST2, 0x08000000);
+	return 0;
+}
+
+static int
+nv10_graph_fini(struct nouveau_object *object, bool suspend)
+{
+	struct nv10_graph_priv *priv = (void *)object;
+	return nouveau_graph_fini(&priv->base, suspend);
+}
+
+struct nouveau_oclass
+nv10_graph_oclass = {
+	.handle = NV_ENGINE(GR, 0x10),
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nv10_graph_ctor,
+		.dtor = nv10_graph_dtor,
+		.init = nv10_graph_init,
+		.fini = nv10_graph_fini,
+	},
+};
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nv20.c b/drivers/gpu/drm/nouveau/core/engine/graph/nv20.c
new file mode 100644
index 000000000000..8f3f619c4a78
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nv20.c
@@ -0,0 +1,381 @@
+#include <core/os.h>
+#include <core/class.h>
+#include <core/engctx.h>
+#include <core/handle.h>
+#include <core/enum.h>
+
+#include <subdev/timer.h>
+#include <subdev/fb.h>
+
+#include <engine/graph.h>
+#include <engine/fifo.h>
+
+#include "nv20.h"
+#include "regs.h"
+
+/*******************************************************************************
+ * Graphics object classes
+ ******************************************************************************/
+
+static struct nouveau_oclass
+nv20_graph_sclass[] = {
+	{ 0x0012, &nv04_graph_ofuncs, NULL }, /* beta1 */
+	{ 0x0019, &nv04_graph_ofuncs, NULL }, /* clip */
+	{ 0x0030, &nv04_graph_ofuncs, NULL }, /* null */
+	{ 0x0039, &nv04_graph_ofuncs, NULL }, /* m2mf */
+	{ 0x0043, &nv04_graph_ofuncs, NULL }, /* rop */
+	{ 0x0044, &nv04_graph_ofuncs, NULL }, /* patt */
+	{ 0x004a, &nv04_graph_ofuncs, NULL }, /* gdi */
+	{ 0x0062, &nv04_graph_ofuncs, NULL }, /* surf2d */
+	{ 0x0072, &nv04_graph_ofuncs, NULL }, /* beta4 */
+	{ 0x0089, &nv04_graph_ofuncs, NULL }, /* sifm */
+	{ 0x008a, &nv04_graph_ofuncs, NULL }, /* ifc */
+	{ 0x0096, &nv04_graph_ofuncs, NULL }, /* celcius */
+	{ 0x0097, &nv04_graph_ofuncs, NULL }, /* kelvin */
+	{ 0x009e, &nv04_graph_ofuncs, NULL }, /* swzsurf */
+	{ 0x009f, &nv04_graph_ofuncs, NULL }, /* imageblit */
+	{},
+};
+
+/*******************************************************************************
+ * PGRAPH context
+ ******************************************************************************/
+
+static int
+nv20_graph_context_ctor(struct nouveau_object *parent,
+			struct nouveau_object *engine,
+			struct nouveau_oclass *oclass, void *data, u32 size,
+			struct nouveau_object **pobject)
+{
+	struct nv20_graph_chan *chan;
+	int ret, i;
+
+	ret = nouveau_graph_context_create(parent, engine, oclass, NULL,
+					   0x37f0, 16, NVOBJ_FLAG_ZERO_ALLOC,
+					   &chan);
+	*pobject = nv_object(chan);
+	if (ret)
+		return ret;
+
+	chan->chid = nouveau_fifo_chan(parent)->chid;
+
+	nv_wo32(chan, 0x0000, 0x00000001 | (chan->chid << 24));
+	nv_wo32(chan, 0x033c, 0xffff0000);
+	nv_wo32(chan, 0x03a0, 0x0fff0000);
+	nv_wo32(chan, 0x03a4, 0x0fff0000);
+	nv_wo32(chan, 0x047c, 0x00000101);
+	nv_wo32(chan, 0x0490, 0x00000111);
+	nv_wo32(chan, 0x04a8, 0x44400000);
+	for (i = 0x04d4; i <= 0x04e0; i += 4)
+		nv_wo32(chan, i, 0x00030303);
+	for (i = 0x04f4; i <= 0x0500; i += 4)
+		nv_wo32(chan, i, 0x00080000);
+	for (i = 0x050c; i <= 0x0518; i += 4)
+		nv_wo32(chan, i, 0x01012000);
+	for (i = 0x051c; i <= 0x0528; i += 4)
+		nv_wo32(chan, i, 0x000105b8);
+	for (i = 0x052c; i <= 0x0538; i += 4)
+		nv_wo32(chan, i, 0x00080008);
+	for (i = 0x055c; i <= 0x0598; i += 4)
+		nv_wo32(chan, i, 0x07ff0000);
+	nv_wo32(chan, 0x05a4, 0x4b7fffff);
+	nv_wo32(chan, 0x05fc, 0x00000001);
+	nv_wo32(chan, 0x0604, 0x00004000);
+	nv_wo32(chan, 0x0610, 0x00000001);
+	nv_wo32(chan, 0x0618, 0x00040000);
+	nv_wo32(chan, 0x061c, 0x00010000);
+	for (i = 0x1c1c; i <= 0x248c; i += 16) {
+		nv_wo32(chan, (i + 0), 0x10700ff9);
+		nv_wo32(chan, (i + 4), 0x0436086c);
+		nv_wo32(chan, (i + 8), 0x000c001b);
+	}
+	nv_wo32(chan, 0x281c, 0x3f800000);
+	nv_wo32(chan, 0x2830, 0x3f800000);
+	nv_wo32(chan, 0x285c, 0x40000000);
+	nv_wo32(chan, 0x2860, 0x3f800000);
+	nv_wo32(chan, 0x2864, 0x3f000000);
+	nv_wo32(chan, 0x286c, 0x40000000);
+	nv_wo32(chan, 0x2870, 0x3f800000);
+	nv_wo32(chan, 0x2878, 0xbf800000);
+	nv_wo32(chan, 0x2880, 0xbf800000);
+	nv_wo32(chan, 0x34a4, 0x000fe000);
+	nv_wo32(chan, 0x3530, 0x000003f8);
+	nv_wo32(chan, 0x3540, 0x002fe000);
+	for (i = 0x355c; i <= 0x3578; i += 4)
+		nv_wo32(chan, i, 0x001c527c);
+	return 0;
+}
+
+int
+nv20_graph_context_init(struct nouveau_object *object)
+{
+	struct nv20_graph_priv *priv = (void *)object->engine;
+	struct nv20_graph_chan *chan = (void *)object;
+	int ret;
+
+	ret = nouveau_graph_context_init(&chan->base);
+	if (ret)
+		return ret;
+
+	nv_wo32(priv->ctxtab, chan->chid * 4, nv_gpuobj(chan)->addr >> 4);
+	return 0;
+}
+
+int
+nv20_graph_context_fini(struct nouveau_object *object, bool suspend)
+{
+	struct nv20_graph_priv *priv = (void *)object->engine;
+	struct nv20_graph_chan *chan = (void *)object;
+	int chid = -1;
+
+	nv_mask(priv, 0x400720, 0x00000001, 0x00000000);
+	if (nv_rd32(priv, 0x400144) & 0x00010000)
+		chid = (nv_rd32(priv, 0x400148) & 0x1f000000) >> 24;
+	if (chan->chid == chid) {
+		nv_wr32(priv, 0x400784, nv_gpuobj(chan)->addr >> 4);
+		nv_wr32(priv, 0x400788, 0x00000002);
+		nv_wait(priv, 0x400700, 0xffffffff, 0x00000000);
+		nv_wr32(priv, 0x400144, 0x10000000);
+		nv_mask(priv, 0x400148, 0xff000000, 0x1f000000);
+	}
+	nv_mask(priv, 0x400720, 0x00000001, 0x00000001);
+
+	nv_wo32(priv->ctxtab, chan->chid * 4, 0x00000000);
+	return nouveau_graph_context_fini(&chan->base, suspend);
+}
+
+static struct nouveau_oclass
+nv20_graph_cclass = {
+	.handle = NV_ENGCTX(GR, 0x20),
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nv20_graph_context_ctor,
+		.dtor = _nouveau_graph_context_dtor,
+		.init = nv20_graph_context_init,
+		.fini = nv20_graph_context_fini,
+		.rd32 = _nouveau_graph_context_rd32,
+		.wr32 = _nouveau_graph_context_wr32,
+	},
+};
+
+/*******************************************************************************
+ * PGRAPH engine/subdev functions
+ ******************************************************************************/
+
+void
+nv20_graph_tile_prog(struct nouveau_engine *engine, int i)
+{
+	struct nouveau_fb_tile *tile = &nouveau_fb(engine)->tile.region[i];
+	struct nouveau_fifo *pfifo = nouveau_fifo(engine);
+	struct nv20_graph_priv *priv = (void *)engine;
+	unsigned long flags;
+
+	pfifo->pause(pfifo, &flags);
+	nv04_graph_idle(priv);
+
+	nv_wr32(priv, NV20_PGRAPH_TLIMIT(i), tile->limit);
+	nv_wr32(priv, NV20_PGRAPH_TSIZE(i), tile->pitch);
+	nv_wr32(priv, NV20_PGRAPH_TILE(i), tile->addr);
+
+	nv_wr32(priv, NV10_PGRAPH_RDI_INDEX, 0x00EA0030 + 4 * i);
+	nv_wr32(priv, NV10_PGRAPH_RDI_DATA, tile->limit);
+	nv_wr32(priv, NV10_PGRAPH_RDI_INDEX, 0x00EA0050 + 4 * i);
+	nv_wr32(priv, NV10_PGRAPH_RDI_DATA, tile->pitch);
+	nv_wr32(priv, NV10_PGRAPH_RDI_INDEX, 0x00EA0010 + 4 * i);
+	nv_wr32(priv, NV10_PGRAPH_RDI_DATA, tile->addr);
+
+	if (nv_device(engine)->card_type == NV_20) {
+		nv_wr32(priv, NV20_PGRAPH_ZCOMP(i), tile->zcomp);
+		nv_wr32(priv, NV10_PGRAPH_RDI_INDEX, 0x00ea0090 + 4 * i);
+		nv_wr32(priv, NV10_PGRAPH_RDI_DATA, tile->zcomp);
+	}
+
+	pfifo->start(pfifo, &flags);
+}
+
+void
+nv20_graph_intr(struct nouveau_subdev *subdev)
+{
+	struct nouveau_engine *engine = nv_engine(subdev);
+	struct nouveau_object *engctx;
+	struct nouveau_handle *handle;
+	struct nv20_graph_priv *priv = (void *)subdev;
+	u32 stat = nv_rd32(priv, NV03_PGRAPH_INTR);
+	u32 nsource = nv_rd32(priv, NV03_PGRAPH_NSOURCE);
+	u32 nstatus = nv_rd32(priv, NV03_PGRAPH_NSTATUS);
+	u32 addr = nv_rd32(priv, NV04_PGRAPH_TRAPPED_ADDR);
+	u32 chid = (addr & 0x01f00000) >> 20;
+	u32 subc = (addr & 0x00070000) >> 16;
+	u32 mthd = (addr & 0x00001ffc);
+	u32 data = nv_rd32(priv, NV04_PGRAPH_TRAPPED_DATA);
+	u32 class = nv_rd32(priv, 0x400160 + subc * 4) & 0xfff;
+	u32 show = stat;
+
+	engctx = nouveau_engctx_get(engine, chid);
+	if (stat & NV_PGRAPH_INTR_ERROR) {
+		if (nsource & NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD) {
+			handle = nouveau_handle_get_class(engctx, class);
+			if (handle && !nv_call(handle->object, mthd, data))
+				show &= ~NV_PGRAPH_INTR_ERROR;
+			nouveau_handle_put(handle);
+		}
+	}
+
+	nv_wr32(priv, NV03_PGRAPH_INTR, stat);
+	nv_wr32(priv, NV04_PGRAPH_FIFO, 0x00000001);
+
+	if (show) {
+		nv_info(priv, "");
+		nouveau_bitfield_print(nv10_graph_intr_name, show);
+		printk(" nsource:");
+		nouveau_bitfield_print(nv04_graph_nsource, nsource);
+		printk(" nstatus:");
+		nouveau_bitfield_print(nv10_graph_nstatus, nstatus);
+		printk("\n");
+		nv_info(priv, "ch %d/%d class 0x%04x mthd 0x%04x data 0x%08x\n",
+			chid, subc, class, mthd, data);
+	}
+
+	nouveau_engctx_put(engctx);
+}
+
+static int
+nv20_graph_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+	       struct nouveau_oclass *oclass, void *data, u32 size,
+	       struct nouveau_object **pobject)
+{
+	struct nv20_graph_priv *priv;
+	int ret;
+
+	ret = nouveau_graph_create(parent, engine, oclass, true, &priv);
+	*pobject = nv_object(priv);
+	if (ret)
+		return ret;
+
+	ret = nouveau_gpuobj_new(parent, NULL, 32 * 4, 16,
+				 NVOBJ_FLAG_ZERO_ALLOC, &priv->ctxtab);
+	if (ret)
+		return ret;
+
+	nv_subdev(priv)->unit = 0x00001000;
+	nv_subdev(priv)->intr = nv20_graph_intr;
+	nv_engine(priv)->cclass = &nv20_graph_cclass;
+	nv_engine(priv)->sclass = nv20_graph_sclass;
+	nv_engine(priv)->tile_prog = nv20_graph_tile_prog;
+	return 0;
+}
+
+void
+nv20_graph_dtor(struct nouveau_object *object)
+{
+	struct nv20_graph_priv *priv = (void *)object;
+	nouveau_gpuobj_ref(NULL, &priv->ctxtab);
+	nouveau_graph_destroy(&priv->base);
+}
+
+int
+nv20_graph_init(struct nouveau_object *object)
+{
+	struct nouveau_engine *engine = nv_engine(object);
+	struct nv20_graph_priv *priv = (void *)engine;
+	struct nouveau_fb *pfb = nouveau_fb(object);
+	u32 tmp, vramsz;
+	int ret, i;
+
+	ret = nouveau_graph_init(&priv->base);
+	if (ret)
+		return ret;
+
+	nv_wr32(priv, NV20_PGRAPH_CHANNEL_CTX_TABLE, priv->ctxtab->addr >> 4);
+
+	if (nv_device(priv)->chipset == 0x20) {
+		nv_wr32(priv, NV10_PGRAPH_RDI_INDEX, 0x003d0000);
+		for (i = 0; i < 15; i++)
+			nv_wr32(priv, NV10_PGRAPH_RDI_DATA, 0x00000000);
+		nv_wait(priv, 0x400700, 0xffffffff, 0x00000000);
+	} else {
+		nv_wr32(priv, NV10_PGRAPH_RDI_INDEX, 0x02c80000);
+		for (i = 0; i < 32; i++)
+			nv_wr32(priv, NV10_PGRAPH_RDI_DATA, 0x00000000);
+		nv_wait(priv, 0x400700, 0xffffffff, 0x00000000);
+	}
+
+	nv_wr32(priv, NV03_PGRAPH_INTR   , 0xFFFFFFFF);
+	nv_wr32(priv, NV03_PGRAPH_INTR_EN, 0xFFFFFFFF);
+
+	nv_wr32(priv, NV04_PGRAPH_DEBUG_0, 0xFFFFFFFF);
+	nv_wr32(priv, NV04_PGRAPH_DEBUG_0, 0x00000000);
+	nv_wr32(priv, NV04_PGRAPH_DEBUG_1, 0x00118700);
+	nv_wr32(priv, NV04_PGRAPH_DEBUG_3, 0xF3CE0475); /* 0x4 = auto ctx switch */
+	nv_wr32(priv, NV10_PGRAPH_DEBUG_4, 0x00000000);
+	nv_wr32(priv, 0x40009C           , 0x00000040);
+
+	if (nv_device(priv)->chipset >= 0x25) {
+		nv_wr32(priv, 0x400890, 0x00a8cfff);
+		nv_wr32(priv, 0x400610, 0x304B1FB6);
+		nv_wr32(priv, 0x400B80, 0x1cbd3883);
+		nv_wr32(priv, 0x400B84, 0x44000000);
+		nv_wr32(priv, 0x400098, 0x40000080);
+		nv_wr32(priv, 0x400B88, 0x000000ff);
+
+	} else {
+		nv_wr32(priv, 0x400880, 0x0008c7df);
+		nv_wr32(priv, 0x400094, 0x00000005);
+		nv_wr32(priv, 0x400B80, 0x45eae20e);
+		nv_wr32(priv, 0x400B84, 0x24000000);
+		nv_wr32(priv, 0x400098, 0x00000040);
+		nv_wr32(priv, NV10_PGRAPH_RDI_INDEX, 0x00E00038);
+		nv_wr32(priv, NV10_PGRAPH_RDI_DATA , 0x00000030);
+		nv_wr32(priv, NV10_PGRAPH_RDI_INDEX, 0x00E10038);
+		nv_wr32(priv, NV10_PGRAPH_RDI_DATA , 0x00000030);
+	}
+
+	/* Turn all the tiling regions off. */
+	for (i = 0; i < pfb->tile.regions; i++)
+		engine->tile_prog(engine, i);
+
+	nv_wr32(priv, 0x4009a0, nv_rd32(priv, 0x100324));
+	nv_wr32(priv, NV10_PGRAPH_RDI_INDEX, 0x00EA000C);
+	nv_wr32(priv, NV10_PGRAPH_RDI_DATA, nv_rd32(priv, 0x100324));
+
+	nv_wr32(priv, NV10_PGRAPH_CTX_CONTROL, 0x10000100);
+	nv_wr32(priv, NV10_PGRAPH_STATE      , 0xFFFFFFFF);
+
+	tmp = nv_rd32(priv, NV10_PGRAPH_SURFACE) & 0x0007ff00;
+	nv_wr32(priv, NV10_PGRAPH_SURFACE, tmp);
+	tmp = nv_rd32(priv, NV10_PGRAPH_SURFACE) | 0x00020100;
+	nv_wr32(priv, NV10_PGRAPH_SURFACE, tmp);
+
+	/* begin RAM config */
+	vramsz = pci_resource_len(nv_device(priv)->pdev, 0) - 1;
+	nv_wr32(priv, 0x4009A4, nv_rd32(priv, 0x100200));
+	nv_wr32(priv, 0x4009A8, nv_rd32(priv, 0x100204));
+	nv_wr32(priv, NV10_PGRAPH_RDI_INDEX, 0x00EA0000);
+	nv_wr32(priv, NV10_PGRAPH_RDI_DATA , nv_rd32(priv, 0x100200));
+	nv_wr32(priv, NV10_PGRAPH_RDI_INDEX, 0x00EA0004);
+	nv_wr32(priv, NV10_PGRAPH_RDI_DATA , nv_rd32(priv, 0x100204));
+	nv_wr32(priv, 0x400820, 0);
+	nv_wr32(priv, 0x400824, 0);
+	nv_wr32(priv, 0x400864, vramsz - 1);
+	nv_wr32(priv, 0x400868, vramsz - 1);
+
+	/* interesting.. the below overwrites some of the tile setup above.. */
+	nv_wr32(priv, 0x400B20, 0x00000000);
+	nv_wr32(priv, 0x400B04, 0xFFFFFFFF);
+
+	nv_wr32(priv, NV03_PGRAPH_ABS_UCLIP_XMIN, 0);
+	nv_wr32(priv, NV03_PGRAPH_ABS_UCLIP_YMIN, 0);
+	nv_wr32(priv, NV03_PGRAPH_ABS_UCLIP_XMAX, 0x7fff);
+	nv_wr32(priv, NV03_PGRAPH_ABS_UCLIP_YMAX, 0x7fff);
+	return 0;
+}
+
+struct nouveau_oclass
+nv20_graph_oclass = {
+	.handle = NV_ENGINE(GR, 0x20),
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nv20_graph_ctor,
+		.dtor = nv20_graph_dtor,
+		.init = nv20_graph_init,
+		.fini = _nouveau_graph_fini,
+	},
+};
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nv20.h b/drivers/gpu/drm/nouveau/core/engine/graph/nv20.h
new file mode 100644
index 000000000000..2bea7313e03f
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nv20.h
@@ -0,0 +1,31 @@
+#ifndef __NV20_GRAPH_H__
+#define __NV20_GRAPH_H__
+
+#include <core/enum.h>
+
+#include <engine/graph.h>
+#include <engine/fifo.h>
+
+struct nv20_graph_priv {
+	struct nouveau_graph base;
+	struct nouveau_gpuobj *ctxtab;
+};
+
+struct nv20_graph_chan {
+	struct nouveau_graph_chan base;
+	int chid;
+};
+
+extern struct nouveau_oclass nv25_graph_sclass[];
+int  nv20_graph_context_init(struct nouveau_object *);
+int  nv20_graph_context_fini(struct nouveau_object *, bool);
+
+void nv20_graph_tile_prog(struct nouveau_engine *, int);
+void nv20_graph_intr(struct nouveau_subdev *);
+
+void nv20_graph_dtor(struct nouveau_object *);
+int  nv20_graph_init(struct nouveau_object *);
+
+int  nv30_graph_init(struct nouveau_object *);
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nv25.c b/drivers/gpu/drm/nouveau/core/engine/graph/nv25.c
new file mode 100644
index 000000000000..b2b650dd8b28
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nv25.c
@@ -0,0 +1,167 @@
+#include <core/os.h>
+#include <core/class.h>
+#include <core/engctx.h>
+#include <core/enum.h>
+
+#include <subdev/timer.h>
+#include <subdev/fb.h>
+
+#include <engine/graph.h>
+
+#include "nv20.h"
+#include "regs.h"
+
+/*******************************************************************************
+ * Graphics object classes
+ ******************************************************************************/
+
+struct nouveau_oclass
+nv25_graph_sclass[] = {
+	{ 0x0012, &nv04_graph_ofuncs, NULL }, /* beta1 */
+	{ 0x0019, &nv04_graph_ofuncs, NULL }, /* clip */
+	{ 0x0030, &nv04_graph_ofuncs, NULL }, /* null */
+	{ 0x0039, &nv04_graph_ofuncs, NULL }, /* m2mf */
+	{ 0x0043, &nv04_graph_ofuncs, NULL }, /* rop */
+	{ 0x0044, &nv04_graph_ofuncs, NULL }, /* patt */
+	{ 0x004a, &nv04_graph_ofuncs, NULL }, /* gdi */
+	{ 0x0062, &nv04_graph_ofuncs, NULL }, /* surf2d */
+	{ 0x0072, &nv04_graph_ofuncs, NULL }, /* beta4 */
+	{ 0x0089, &nv04_graph_ofuncs, NULL }, /* sifm */
+	{ 0x008a, &nv04_graph_ofuncs, NULL }, /* ifc */
+	{ 0x0096, &nv04_graph_ofuncs, NULL }, /* celcius */
+	{ 0x009e, &nv04_graph_ofuncs, NULL }, /* swzsurf */
+	{ 0x009f, &nv04_graph_ofuncs, NULL }, /* imageblit */
+	{ 0x0597, &nv04_graph_ofuncs, NULL }, /* kelvin */
+	{},
+};
+
+/*******************************************************************************
+ * PGRAPH context
+ ******************************************************************************/
+
+static int
+nv25_graph_context_ctor(struct nouveau_object *parent,
+			struct nouveau_object *engine,
+			struct nouveau_oclass *oclass, void *data, u32 size,
+			struct nouveau_object **pobject)
+{
+	struct nv20_graph_chan *chan;
+	int ret, i;
+
+	ret = nouveau_graph_context_create(parent, engine, oclass, NULL, 0x3724,
+					   16, NVOBJ_FLAG_ZERO_ALLOC, &chan);
+	*pobject = nv_object(chan);
+	if (ret)
+		return ret;
+
+	chan->chid = nouveau_fifo_chan(parent)->chid;
+
+	nv_wo32(chan, 0x0028, 0x00000001 | (chan->chid << 24));
+	nv_wo32(chan, 0x035c, 0xffff0000);
+	nv_wo32(chan, 0x03c0, 0x0fff0000);
+	nv_wo32(chan, 0x03c4, 0x0fff0000);
+	nv_wo32(chan, 0x049c, 0x00000101);
+	nv_wo32(chan, 0x04b0, 0x00000111);
+	nv_wo32(chan, 0x04c8, 0x00000080);
+	nv_wo32(chan, 0x04cc, 0xffff0000);
+	nv_wo32(chan, 0x04d0, 0x00000001);
+	nv_wo32(chan, 0x04e4, 0x44400000);
+	nv_wo32(chan, 0x04fc, 0x4b800000);
+	for (i = 0x0510; i <= 0x051c; i += 4)
+		nv_wo32(chan, i, 0x00030303);
+	for (i = 0x0530; i <= 0x053c; i += 4)
+		nv_wo32(chan, i, 0x00080000);
+	for (i = 0x0548; i <= 0x0554; i += 4)
+		nv_wo32(chan, i, 0x01012000);
+	for (i = 0x0558; i <= 0x0564; i += 4)
+		nv_wo32(chan, i, 0x000105b8);
+	for (i = 0x0568; i <= 0x0574; i += 4)
+		nv_wo32(chan, i, 0x00080008);
+	for (i = 0x0598; i <= 0x05d4; i += 4)
+		nv_wo32(chan, i, 0x07ff0000);
+	nv_wo32(chan, 0x05e0, 0x4b7fffff);
+	nv_wo32(chan, 0x0620, 0x00000080);
+	nv_wo32(chan, 0x0624, 0x30201000);
+	nv_wo32(chan, 0x0628, 0x70605040);
+	nv_wo32(chan, 0x062c, 0xb0a09080);
+	nv_wo32(chan, 0x0630, 0xf0e0d0c0);
+	nv_wo32(chan, 0x0664, 0x00000001);
+	nv_wo32(chan, 0x066c, 0x00004000);
+	nv_wo32(chan, 0x0678, 0x00000001);
+	nv_wo32(chan, 0x0680, 0x00040000);
+	nv_wo32(chan, 0x0684, 0x00010000);
+	for (i = 0x1b04; i <= 0x2374; i += 16) {
+		nv_wo32(chan, (i + 0), 0x10700ff9);
+		nv_wo32(chan, (i + 4), 0x0436086c);
+		nv_wo32(chan, (i + 8), 0x000c001b);
+	}
+	nv_wo32(chan, 0x2704, 0x3f800000);
+	nv_wo32(chan, 0x2718, 0x3f800000);
+	nv_wo32(chan, 0x2744, 0x40000000);
+	nv_wo32(chan, 0x2748, 0x3f800000);
+	nv_wo32(chan, 0x274c, 0x3f000000);
+	nv_wo32(chan, 0x2754, 0x40000000);
+	nv_wo32(chan, 0x2758, 0x3f800000);
+	nv_wo32(chan, 0x2760, 0xbf800000);
+	nv_wo32(chan, 0x2768, 0xbf800000);
+	nv_wo32(chan, 0x308c, 0x000fe000);
+	nv_wo32(chan, 0x3108, 0x000003f8);
+	nv_wo32(chan, 0x3468, 0x002fe000);
+	for (i = 0x3484; i <= 0x34a0; i += 4)
+		nv_wo32(chan, i, 0x001c527c);
+	return 0;
+}
+
+static struct nouveau_oclass
+nv25_graph_cclass = {
+	.handle = NV_ENGCTX(GR, 0x25),
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nv25_graph_context_ctor,
+		.dtor = _nouveau_graph_context_dtor,
+		.init = nv20_graph_context_init,
+		.fini = nv20_graph_context_fini,
+		.rd32 = _nouveau_graph_context_rd32,
+		.wr32 = _nouveau_graph_context_wr32,
+	},
+};
+
+/*******************************************************************************
+ * PGRAPH engine/subdev functions
+ ******************************************************************************/
+
+static int
+nv25_graph_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+	       struct nouveau_oclass *oclass, void *data, u32 size,
+	       struct nouveau_object **pobject)
+{
+	struct nv20_graph_priv *priv;
+	int ret;
+
+	ret = nouveau_graph_create(parent, engine, oclass, true, &priv);
+	*pobject = nv_object(priv);
+	if (ret)
+		return ret;
+
+	ret = nouveau_gpuobj_new(parent, NULL, 32 * 4, 16,
+				 NVOBJ_FLAG_ZERO_ALLOC, &priv->ctxtab);
+	if (ret)
+		return ret;
+
+	nv_subdev(priv)->unit = 0x00001000;
+	nv_subdev(priv)->intr = nv20_graph_intr;
+	nv_engine(priv)->cclass = &nv25_graph_cclass;
+	nv_engine(priv)->sclass = nv25_graph_sclass;
+	nv_engine(priv)->tile_prog = nv20_graph_tile_prog;
+	return 0;
+}
+
+struct nouveau_oclass
+nv25_graph_oclass = {
+	.handle = NV_ENGINE(GR, 0x25),
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nv25_graph_ctor,
+		.dtor = nv20_graph_dtor,
+		.init = nv20_graph_init,
+		.fini = _nouveau_graph_fini,
+	},
+};
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nv2a.c b/drivers/gpu/drm/nouveau/core/engine/graph/nv2a.c
new file mode 100644
index 000000000000..700462fa0ae0
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nv2a.c
@@ -0,0 +1,134 @@
+#include <core/os.h>
+#include <core/class.h>
+#include <core/engctx.h>
+#include <core/enum.h>
+
+#include <subdev/timer.h>
+#include <subdev/fb.h>
+
+#include <engine/graph.h>
+
+#include "nv20.h"
+#include "regs.h"
+
+/*******************************************************************************
+ * PGRAPH context
+ ******************************************************************************/
+
+static int
+nv2a_graph_context_ctor(struct nouveau_object *parent,
+			struct nouveau_object *engine,
+			struct nouveau_oclass *oclass, void *data, u32 size,
+			struct nouveau_object **pobject)
+{
+	struct nv20_graph_chan *chan;
+	int ret, i;
+
+	ret = nouveau_graph_context_create(parent, engine, oclass, NULL, 0x36b0,
+					   16, NVOBJ_FLAG_ZERO_ALLOC, &chan);
+	*pobject = nv_object(chan);
+	if (ret)
+		return ret;
+
+	chan->chid = nouveau_fifo_chan(parent)->chid;
+
+	nv_wo32(chan, 0x0000, 0x00000001 | (chan->chid << 24));
+	nv_wo32(chan, 0x033c, 0xffff0000);
+	nv_wo32(chan, 0x03a0, 0x0fff0000);
+	nv_wo32(chan, 0x03a4, 0x0fff0000);
+	nv_wo32(chan, 0x047c, 0x00000101);
+	nv_wo32(chan, 0x0490, 0x00000111);
+	nv_wo32(chan, 0x04a8, 0x44400000);
+	for (i = 0x04d4; i <= 0x04e0; i += 4)
+		nv_wo32(chan, i, 0x00030303);
+	for (i = 0x04f4; i <= 0x0500; i += 4)
+		nv_wo32(chan, i, 0x00080000);
+	for (i = 0x050c; i <= 0x0518; i += 4)
+		nv_wo32(chan, i, 0x01012000);
+	for (i = 0x051c; i <= 0x0528; i += 4)
+		nv_wo32(chan, i, 0x000105b8);
+	for (i = 0x052c; i <= 0x0538; i += 4)
+		nv_wo32(chan, i, 0x00080008);
+	for (i = 0x055c; i <= 0x0598; i += 4)
+		nv_wo32(chan, i, 0x07ff0000);
+	nv_wo32(chan, 0x05a4, 0x4b7fffff);
+	nv_wo32(chan, 0x05fc, 0x00000001);
+	nv_wo32(chan, 0x0604, 0x00004000);
+	nv_wo32(chan, 0x0610, 0x00000001);
+	nv_wo32(chan, 0x0618, 0x00040000);
+	nv_wo32(chan, 0x061c, 0x00010000);
+	for (i = 0x1a9c; i <= 0x22fc; i += 16) { /*XXX: check!! */
+		nv_wo32(chan, (i + 0), 0x10700ff9);
+		nv_wo32(chan, (i + 4), 0x0436086c);
+		nv_wo32(chan, (i + 8), 0x000c001b);
+	}
+	nv_wo32(chan, 0x269c, 0x3f800000);
+	nv_wo32(chan, 0x26b0, 0x3f800000);
+	nv_wo32(chan, 0x26dc, 0x40000000);
+	nv_wo32(chan, 0x26e0, 0x3f800000);
+	nv_wo32(chan, 0x26e4, 0x3f000000);
+	nv_wo32(chan, 0x26ec, 0x40000000);
+	nv_wo32(chan, 0x26f0, 0x3f800000);
+	nv_wo32(chan, 0x26f8, 0xbf800000);
+	nv_wo32(chan, 0x2700, 0xbf800000);
+	nv_wo32(chan, 0x3024, 0x000fe000);
+	nv_wo32(chan, 0x30a0, 0x000003f8);
+	nv_wo32(chan, 0x33fc, 0x002fe000);
+	for (i = 0x341c; i <= 0x3438; i += 4)
+		nv_wo32(chan, i, 0x001c527c);
+	return 0;
+}
+
+static struct nouveau_oclass
+nv2a_graph_cclass = {
+	.handle = NV_ENGCTX(GR, 0x2a),
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nv2a_graph_context_ctor,
+		.dtor = _nouveau_graph_context_dtor,
+		.init = nv20_graph_context_init,
+		.fini = nv20_graph_context_fini,
+		.rd32 = _nouveau_graph_context_rd32,
+		.wr32 = _nouveau_graph_context_wr32,
+	},
+};
+
+/*******************************************************************************
+ * PGRAPH engine/subdev functions
+ ******************************************************************************/
+
+static int
+nv2a_graph_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+	       struct nouveau_oclass *oclass, void *data, u32 size,
+	       struct nouveau_object **pobject)
+{
+	struct nv20_graph_priv *priv;
+	int ret;
+
+	ret = nouveau_graph_create(parent, engine, oclass, true, &priv);
+	*pobject = nv_object(priv);
+	if (ret)
+		return ret;
+
+	ret = nouveau_gpuobj_new(parent, NULL, 32 * 4, 16,
+				 NVOBJ_FLAG_ZERO_ALLOC, &priv->ctxtab);
+	if (ret)
+		return ret;
+
+	nv_subdev(priv)->unit = 0x00001000;
+	nv_subdev(priv)->intr = nv20_graph_intr;
+	nv_engine(priv)->cclass = &nv2a_graph_cclass;
+	nv_engine(priv)->sclass = nv25_graph_sclass;
+	nv_engine(priv)->tile_prog = nv20_graph_tile_prog;
+	return 0;
+}
+
+struct nouveau_oclass
+nv2a_graph_oclass = {
+	.handle = NV_ENGINE(GR, 0x2a),
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nv2a_graph_ctor,
+		.dtor = nv20_graph_dtor,
+		.init = nv20_graph_init,
+		.fini = _nouveau_graph_fini,
+	},
+};
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nv30.c b/drivers/gpu/drm/nouveau/core/engine/graph/nv30.c
new file mode 100644
index 000000000000..cedadaa92d3f
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nv30.c
@@ -0,0 +1,238 @@
+#include <core/os.h>
+#include <core/class.h>
+#include <core/engctx.h>
+#include <core/enum.h>
+
+#include <subdev/timer.h>
+#include <subdev/fb.h>
+
+#include <engine/graph.h>
+
+#include "nv20.h"
+#include "regs.h"
+
+/*******************************************************************************
+ * Graphics object classes
+ ******************************************************************************/
+
+static struct nouveau_oclass
+nv30_graph_sclass[] = {
+	{ 0x0012, &nv04_graph_ofuncs, NULL }, /* beta1 */
+	{ 0x0019, &nv04_graph_ofuncs, NULL }, /* clip */
+	{ 0x0030, &nv04_graph_ofuncs, NULL }, /* null */
+	{ 0x0039, &nv04_graph_ofuncs, NULL }, /* m2mf */
+	{ 0x0043, &nv04_graph_ofuncs, NULL }, /* rop */
+	{ 0x0044, &nv04_graph_ofuncs, NULL }, /* patt */
+	{ 0x004a, &nv04_graph_ofuncs, NULL }, /* gdi */
+	{ 0x0062, &nv04_graph_ofuncs, NULL }, /* surf2d */
+	{ 0x0072, &nv04_graph_ofuncs, NULL }, /* beta4 */
+	{ 0x0089, &nv04_graph_ofuncs, NULL }, /* sifm */
+	{ 0x008a, &nv04_graph_ofuncs, NULL }, /* ifc */
+	{ 0x009f, &nv04_graph_ofuncs, NULL }, /* imageblit */
+	{ 0x0362, &nv04_graph_ofuncs, NULL }, /* surf2d (nv30) */
+	{ 0x0389, &nv04_graph_ofuncs, NULL }, /* sifm (nv30) */
+	{ 0x038a, &nv04_graph_ofuncs, NULL }, /* ifc (nv30) */
+	{ 0x039e, &nv04_graph_ofuncs, NULL }, /* swzsurf (nv30) */
+	{ 0x0397, &nv04_graph_ofuncs, NULL }, /* rankine */
+	{},
+};
+
+/*******************************************************************************
+ * PGRAPH context
+ ******************************************************************************/
+
+static int
+nv30_graph_context_ctor(struct nouveau_object *parent,
+			struct nouveau_object *engine,
+			struct nouveau_oclass *oclass, void *data, u32 size,
+			struct nouveau_object **pobject)
+{
+	struct nv20_graph_chan *chan;
+	int ret, i;
+
+	ret = nouveau_graph_context_create(parent, engine, oclass, NULL, 0x5f48,
+					   16, NVOBJ_FLAG_ZERO_ALLOC, &chan);
+	*pobject = nv_object(chan);
+	if (ret)
+		return ret;
+
+	chan->chid = nouveau_fifo_chan(parent)->chid;
+
+	nv_wo32(chan, 0x0028, 0x00000001 | (chan->chid << 24));
+	nv_wo32(chan, 0x0410, 0x00000101);
+	nv_wo32(chan, 0x0424, 0x00000111);
+	nv_wo32(chan, 0x0428, 0x00000060);
+	nv_wo32(chan, 0x0444, 0x00000080);
+	nv_wo32(chan, 0x0448, 0xffff0000);
+	nv_wo32(chan, 0x044c, 0x00000001);
+	nv_wo32(chan, 0x0460, 0x44400000);
+	nv_wo32(chan, 0x048c, 0xffff0000);
+	for (i = 0x04e0; i < 0x04e8; i += 4)
+		nv_wo32(chan, i, 0x0fff0000);
+	nv_wo32(chan, 0x04ec, 0x00011100);
+	for (i = 0x0508; i < 0x0548; i += 4)
+		nv_wo32(chan, i, 0x07ff0000);
+	nv_wo32(chan, 0x0550, 0x4b7fffff);
+	nv_wo32(chan, 0x058c, 0x00000080);
+	nv_wo32(chan, 0x0590, 0x30201000);
+	nv_wo32(chan, 0x0594, 0x70605040);
+	nv_wo32(chan, 0x0598, 0xb8a89888);
+	nv_wo32(chan, 0x059c, 0xf8e8d8c8);
+	nv_wo32(chan, 0x05b0, 0xb0000000);
+	for (i = 0x0600; i < 0x0640; i += 4)
+		nv_wo32(chan, i, 0x00010588);
+	for (i = 0x0640; i < 0x0680; i += 4)
+		nv_wo32(chan, i, 0x00030303);
+	for (i = 0x06c0; i < 0x0700; i += 4)
+		nv_wo32(chan, i, 0x0008aae4);
+	for (i = 0x0700; i < 0x0740; i += 4)
+		nv_wo32(chan, i, 0x01012000);
+	for (i = 0x0740; i < 0x0780; i += 4)
+		nv_wo32(chan, i, 0x00080008);
+	nv_wo32(chan, 0x085c, 0x00040000);
+	nv_wo32(chan, 0x0860, 0x00010000);
+	for (i = 0x0864; i < 0x0874; i += 4)
+		nv_wo32(chan, i, 0x00040004);
+	for (i = 0x1f18; i <= 0x3088 ; i += 16) {
+		nv_wo32(chan, i + 0, 0x10700ff9);
+		nv_wo32(chan, i + 1, 0x0436086c);
+		nv_wo32(chan, i + 2, 0x000c001b);
+	}
+	for (i = 0x30b8; i < 0x30c8; i += 4)
+		nv_wo32(chan, i, 0x0000ffff);
+	nv_wo32(chan, 0x344c, 0x3f800000);
+	nv_wo32(chan, 0x3808, 0x3f800000);
+	nv_wo32(chan, 0x381c, 0x3f800000);
+	nv_wo32(chan, 0x3848, 0x40000000);
+	nv_wo32(chan, 0x384c, 0x3f800000);
+	nv_wo32(chan, 0x3850, 0x3f000000);
+	nv_wo32(chan, 0x3858, 0x40000000);
+	nv_wo32(chan, 0x385c, 0x3f800000);
+	nv_wo32(chan, 0x3864, 0xbf800000);
+	nv_wo32(chan, 0x386c, 0xbf800000);
+	return 0;
+}
+
+static struct nouveau_oclass
+nv30_graph_cclass = {
+	.handle = NV_ENGCTX(GR, 0x30),
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nv30_graph_context_ctor,
+		.dtor = _nouveau_graph_context_dtor,
+		.init = nv20_graph_context_init,
+		.fini = nv20_graph_context_fini,
+		.rd32 = _nouveau_graph_context_rd32,
+		.wr32 = _nouveau_graph_context_wr32,
+	},
+};
+
+/*******************************************************************************
+ * PGRAPH engine/subdev functions
+ ******************************************************************************/
+
+static int
+nv30_graph_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+	       struct nouveau_oclass *oclass, void *data, u32 size,
+	       struct nouveau_object **pobject)
+{
+	struct nv20_graph_priv *priv;
+	int ret;
+
+	ret = nouveau_graph_create(parent, engine, oclass, true, &priv);
+	*pobject = nv_object(priv);
+	if (ret)
+		return ret;
+
+	ret = nouveau_gpuobj_new(parent, NULL, 32 * 4, 16,
+				 NVOBJ_FLAG_ZERO_ALLOC, &priv->ctxtab);
+	if (ret)
+		return ret;
+
+	nv_subdev(priv)->unit = 0x00001000;
+	nv_subdev(priv)->intr = nv20_graph_intr;
+	nv_engine(priv)->cclass = &nv30_graph_cclass;
+	nv_engine(priv)->sclass = nv30_graph_sclass;
+	nv_engine(priv)->tile_prog = nv20_graph_tile_prog;
+	return 0;
+}
+
+int
+nv30_graph_init(struct nouveau_object *object)
+{
+	struct nouveau_engine *engine = nv_engine(object);
+	struct nv20_graph_priv *priv = (void *)engine;
+	struct nouveau_fb *pfb = nouveau_fb(object);
+	int ret, i;
+
+	ret = nouveau_graph_init(&priv->base);
+	if (ret)
+		return ret;
+
+	nv_wr32(priv, NV20_PGRAPH_CHANNEL_CTX_TABLE, priv->ctxtab->addr >> 4);
+
+	nv_wr32(priv, NV03_PGRAPH_INTR   , 0xFFFFFFFF);
+	nv_wr32(priv, NV03_PGRAPH_INTR_EN, 0xFFFFFFFF);
+
+	nv_wr32(priv, NV04_PGRAPH_DEBUG_0, 0xFFFFFFFF);
+	nv_wr32(priv, NV04_PGRAPH_DEBUG_0, 0x00000000);
+	nv_wr32(priv, NV04_PGRAPH_DEBUG_1, 0x401287c0);
+	nv_wr32(priv, 0x400890, 0x01b463ff);
+	nv_wr32(priv, NV04_PGRAPH_DEBUG_3, 0xf2de0475);
+	nv_wr32(priv, NV10_PGRAPH_DEBUG_4, 0x00008000);
+	nv_wr32(priv, NV04_PGRAPH_LIMIT_VIOL_PIX, 0xf04bdff6);
+	nv_wr32(priv, 0x400B80, 0x1003d888);
+	nv_wr32(priv, 0x400B84, 0x0c000000);
+	nv_wr32(priv, 0x400098, 0x00000000);
+	nv_wr32(priv, 0x40009C, 0x0005ad00);
+	nv_wr32(priv, 0x400B88, 0x62ff00ff); /* suspiciously like PGRAPH_DEBUG_2 */
+	nv_wr32(priv, 0x4000a0, 0x00000000);
+	nv_wr32(priv, 0x4000a4, 0x00000008);
+	nv_wr32(priv, 0x4008a8, 0xb784a400);
+	nv_wr32(priv, 0x400ba0, 0x002f8685);
+	nv_wr32(priv, 0x400ba4, 0x00231f3f);
+	nv_wr32(priv, 0x4008a4, 0x40000020);
+
+	if (nv_device(priv)->chipset == 0x34) {
+		nv_wr32(priv, NV10_PGRAPH_RDI_INDEX, 0x00EA0004);
+		nv_wr32(priv, NV10_PGRAPH_RDI_DATA , 0x00200201);
+		nv_wr32(priv, NV10_PGRAPH_RDI_INDEX, 0x00EA0008);
+		nv_wr32(priv, NV10_PGRAPH_RDI_DATA , 0x00000008);
+		nv_wr32(priv, NV10_PGRAPH_RDI_INDEX, 0x00EA0000);
+		nv_wr32(priv, NV10_PGRAPH_RDI_DATA , 0x00000032);
+		nv_wr32(priv, NV10_PGRAPH_RDI_INDEX, 0x00E00004);
+		nv_wr32(priv, NV10_PGRAPH_RDI_DATA , 0x00000002);
+	}
+
+	nv_wr32(priv, 0x4000c0, 0x00000016);
+
+	/* Turn all the tiling regions off. */
+	for (i = 0; i < pfb->tile.regions; i++)
+		engine->tile_prog(engine, i);
+
+	nv_wr32(priv, NV10_PGRAPH_CTX_CONTROL, 0x10000100);
+	nv_wr32(priv, NV10_PGRAPH_STATE      , 0xFFFFFFFF);
+	nv_wr32(priv, 0x0040075c             , 0x00000001);
+
+	/* begin RAM config */
+	/* vramsz = pci_resource_len(priv->dev->pdev, 0) - 1; */
+	nv_wr32(priv, 0x4009A4, nv_rd32(priv, 0x100200));
+	nv_wr32(priv, 0x4009A8, nv_rd32(priv, 0x100204));
+	if (nv_device(priv)->chipset != 0x34) {
+		nv_wr32(priv, 0x400750, 0x00EA0000);
+		nv_wr32(priv, 0x400754, nv_rd32(priv, 0x100200));
+		nv_wr32(priv, 0x400750, 0x00EA0004);
+		nv_wr32(priv, 0x400754, nv_rd32(priv, 0x100204));
+	}
+	return 0;
+}
+
+struct nouveau_oclass
+nv30_graph_oclass = {
+	.handle = NV_ENGINE(GR, 0x30),
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nv30_graph_ctor,
+		.dtor = nv20_graph_dtor,
+		.init = nv30_graph_init,
+		.fini = _nouveau_graph_fini,
+	},
+};
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nv34.c b/drivers/gpu/drm/nouveau/core/engine/graph/nv34.c
new file mode 100644
index 000000000000..273f6320027b
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nv34.c
@@ -0,0 +1,168 @@
+#include <core/os.h>
+#include <core/class.h>
+#include <core/engctx.h>
+#include <core/enum.h>
+
+#include <subdev/timer.h>
+#include <subdev/fb.h>
+
+#include <engine/graph.h>
+
+#include "nv20.h"
+#include "regs.h"
+
+/*******************************************************************************
+ * Graphics object classes
+ ******************************************************************************/
+
+static struct nouveau_oclass
+nv34_graph_sclass[] = {
+	{ 0x0012, &nv04_graph_ofuncs, NULL }, /* beta1 */
+	{ 0x0019, &nv04_graph_ofuncs, NULL }, /* clip */
+	{ 0x0030, &nv04_graph_ofuncs, NULL }, /* null */
+	{ 0x0039, &nv04_graph_ofuncs, NULL }, /* m2mf */
+	{ 0x0043, &nv04_graph_ofuncs, NULL }, /* rop */
+	{ 0x0044, &nv04_graph_ofuncs, NULL }, /* patt */
+	{ 0x004a, &nv04_graph_ofuncs, NULL }, /* gdi */
+	{ 0x0062, &nv04_graph_ofuncs, NULL }, /* surf2d */
+	{ 0x0072, &nv04_graph_ofuncs, NULL }, /* beta4 */
+	{ 0x0089, &nv04_graph_ofuncs, NULL }, /* sifm */
+	{ 0x008a, &nv04_graph_ofuncs, NULL }, /* ifc */
+	{ 0x009f, &nv04_graph_ofuncs, NULL }, /* imageblit */
+	{ 0x0362, &nv04_graph_ofuncs, NULL }, /* surf2d (nv30) */
+	{ 0x0389, &nv04_graph_ofuncs, NULL }, /* sifm (nv30) */
+	{ 0x038a, &nv04_graph_ofuncs, NULL }, /* ifc (nv30) */
+	{ 0x039e, &nv04_graph_ofuncs, NULL }, /* swzsurf (nv30) */
+	{ 0x0697, &nv04_graph_ofuncs, NULL }, /* rankine */
+	{},
+};
+
+/*******************************************************************************
+ * PGRAPH context
+ ******************************************************************************/
+
+static int
+nv34_graph_context_ctor(struct nouveau_object *parent,
+			struct nouveau_object *engine,
+			struct nouveau_oclass *oclass, void *data, u32 size,
+			struct nouveau_object **pobject)
+{
+	struct nv20_graph_chan *chan;
+	int ret, i;
+
+	ret = nouveau_graph_context_create(parent, engine, oclass, NULL, 0x46dc,
+					   16, NVOBJ_FLAG_ZERO_ALLOC, &chan);
+	*pobject = nv_object(chan);
+	if (ret)
+		return ret;
+
+	chan->chid = nouveau_fifo_chan(parent)->chid;
+
+	nv_wo32(chan, 0x0028, 0x00000001 | (chan->chid << 24));
+	nv_wo32(chan, 0x040c, 0x01000101);
+	nv_wo32(chan, 0x0420, 0x00000111);
+	nv_wo32(chan, 0x0424, 0x00000060);
+	nv_wo32(chan, 0x0440, 0x00000080);
+	nv_wo32(chan, 0x0444, 0xffff0000);
+	nv_wo32(chan, 0x0448, 0x00000001);
+	nv_wo32(chan, 0x045c, 0x44400000);
+	nv_wo32(chan, 0x0480, 0xffff0000);
+	for (i = 0x04d4; i < 0x04dc; i += 4)
+		nv_wo32(chan, i, 0x0fff0000);
+	nv_wo32(chan, 0x04e0, 0x00011100);
+	for (i = 0x04fc; i < 0x053c; i += 4)
+		nv_wo32(chan, i, 0x07ff0000);
+	nv_wo32(chan, 0x0544, 0x4b7fffff);
+	nv_wo32(chan, 0x057c, 0x00000080);
+	nv_wo32(chan, 0x0580, 0x30201000);
+	nv_wo32(chan, 0x0584, 0x70605040);
+	nv_wo32(chan, 0x0588, 0xb8a89888);
+	nv_wo32(chan, 0x058c, 0xf8e8d8c8);
+	nv_wo32(chan, 0x05a0, 0xb0000000);
+	for (i = 0x05f0; i < 0x0630; i += 4)
+		nv_wo32(chan, i, 0x00010588);
+	for (i = 0x0630; i < 0x0670; i += 4)
+		nv_wo32(chan, i, 0x00030303);
+	for (i = 0x06b0; i < 0x06f0; i += 4)
+		nv_wo32(chan, i, 0x0008aae4);
+	for (i = 0x06f0; i < 0x0730; i += 4)
+		nv_wo32(chan, i, 0x01012000);
+	for (i = 0x0730; i < 0x0770; i += 4)
+		nv_wo32(chan, i, 0x00080008);
+	nv_wo32(chan, 0x0850, 0x00040000);
+	nv_wo32(chan, 0x0854, 0x00010000);
+	for (i = 0x0858; i < 0x0868; i += 4)
+		nv_wo32(chan, i, 0x00040004);
+	for (i = 0x15ac; i <= 0x271c ; i += 16) {
+		nv_wo32(chan, i + 0, 0x10700ff9);
+		nv_wo32(chan, i + 1, 0x0436086c);
+		nv_wo32(chan, i + 2, 0x000c001b);
+	}
+	for (i = 0x274c; i < 0x275c; i += 4)
+		nv_wo32(chan, i, 0x0000ffff);
+	nv_wo32(chan, 0x2ae0, 0x3f800000);
+	nv_wo32(chan, 0x2e9c, 0x3f800000);
+	nv_wo32(chan, 0x2eb0, 0x3f800000);
+	nv_wo32(chan, 0x2edc, 0x40000000);
+	nv_wo32(chan, 0x2ee0, 0x3f800000);
+	nv_wo32(chan, 0x2ee4, 0x3f000000);
+	nv_wo32(chan, 0x2eec, 0x40000000);
+	nv_wo32(chan, 0x2ef0, 0x3f800000);
+	nv_wo32(chan, 0x2ef8, 0xbf800000);
+	nv_wo32(chan, 0x2f00, 0xbf800000);
+	return 0;
+}
+
+static struct nouveau_oclass
+nv34_graph_cclass = {
+	.handle = NV_ENGCTX(GR, 0x34),
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nv34_graph_context_ctor,
+		.dtor = _nouveau_graph_context_dtor,
+		.init = nv20_graph_context_init,
+		.fini = nv20_graph_context_fini,
+		.rd32 = _nouveau_graph_context_rd32,
+		.wr32 = _nouveau_graph_context_wr32,
+	},
+};
+
+/*******************************************************************************
+ * PGRAPH engine/subdev functions
+ ******************************************************************************/
+
+static int
+nv34_graph_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+	       struct nouveau_oclass *oclass, void *data, u32 size,
+	       struct nouveau_object **pobject)
+{
+	struct nv20_graph_priv *priv;
+	int ret;
+
+	ret = nouveau_graph_create(parent, engine, oclass, true, &priv);
+	*pobject = nv_object(priv);
+	if (ret)
+		return ret;
+
+	ret = nouveau_gpuobj_new(parent, NULL, 32 * 4, 16,
+				 NVOBJ_FLAG_ZERO_ALLOC, &priv->ctxtab);
+	if (ret)
+		return ret;
+
+	nv_subdev(priv)->unit = 0x00001000;
+	nv_subdev(priv)->intr = nv20_graph_intr;
+	nv_engine(priv)->cclass = &nv34_graph_cclass;
+	nv_engine(priv)->sclass = nv34_graph_sclass;
+	nv_engine(priv)->tile_prog = nv20_graph_tile_prog;
+	return 0;
+}
+
+struct nouveau_oclass
+nv34_graph_oclass = {
+	.handle = NV_ENGINE(GR, 0x34),
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nv34_graph_ctor,
+		.dtor = nv20_graph_dtor,
+		.init = nv30_graph_init,
+		.fini = _nouveau_graph_fini,
+	},
+};
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nv35.c b/drivers/gpu/drm/nouveau/core/engine/graph/nv35.c
new file mode 100644
index 000000000000..f40ee2116ee1
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nv35.c
@@ -0,0 +1,166 @@
+#include <core/os.h>
+#include <core/class.h>
+#include <core/engctx.h>
+#include <core/enum.h>
+
+#include <subdev/timer.h>
+#include <subdev/fb.h>
+
+#include "nv20.h"
+#include "regs.h"
+
+/*******************************************************************************
+ * Graphics object classes
+ ******************************************************************************/
+
+static struct nouveau_oclass
+nv35_graph_sclass[] = {
+	{ 0x0012, &nv04_graph_ofuncs, NULL }, /* beta1 */
+	{ 0x0019, &nv04_graph_ofuncs, NULL }, /* clip */
+	{ 0x0030, &nv04_graph_ofuncs, NULL }, /* null */
+	{ 0x0039, &nv04_graph_ofuncs, NULL }, /* m2mf */
+	{ 0x0043, &nv04_graph_ofuncs, NULL }, /* rop */
+	{ 0x0044, &nv04_graph_ofuncs, NULL }, /* patt */
+	{ 0x004a, &nv04_graph_ofuncs, NULL }, /* gdi */
+	{ 0x0062, &nv04_graph_ofuncs, NULL }, /* surf2d */
+	{ 0x0072, &nv04_graph_ofuncs, NULL }, /* beta4 */
+	{ 0x0089, &nv04_graph_ofuncs, NULL }, /* sifm */
+	{ 0x008a, &nv04_graph_ofuncs, NULL }, /* ifc */
+	{ 0x009f, &nv04_graph_ofuncs, NULL }, /* imageblit */
+	{ 0x0362, &nv04_graph_ofuncs, NULL }, /* surf2d (nv30) */
+	{ 0x0389, &nv04_graph_ofuncs, NULL }, /* sifm (nv30) */
+	{ 0x038a, &nv04_graph_ofuncs, NULL }, /* ifc (nv30) */
+	{ 0x039e, &nv04_graph_ofuncs, NULL }, /* swzsurf (nv30) */
+	{ 0x0497, &nv04_graph_ofuncs, NULL }, /* rankine */
+	{},
+};
+
+/*******************************************************************************
+ * PGRAPH context
+ ******************************************************************************/
+
+static int
+nv35_graph_context_ctor(struct nouveau_object *parent,
+			struct nouveau_object *engine,
+			struct nouveau_oclass *oclass, void *data, u32 size,
+			struct nouveau_object **pobject)
+{
+	struct nv20_graph_chan *chan;
+	int ret, i;
+
+	ret = nouveau_graph_context_create(parent, engine, oclass, NULL, 0x577c,
+					   16, NVOBJ_FLAG_ZERO_ALLOC, &chan);
+	*pobject = nv_object(chan);
+	if (ret)
+		return ret;
+
+	chan->chid = nouveau_fifo_chan(parent)->chid;
+
+	nv_wo32(chan, 0x0028, 0x00000001 | (chan->chid << 24));
+	nv_wo32(chan, 0x040c, 0x00000101);
+	nv_wo32(chan, 0x0420, 0x00000111);
+	nv_wo32(chan, 0x0424, 0x00000060);
+	nv_wo32(chan, 0x0440, 0x00000080);
+	nv_wo32(chan, 0x0444, 0xffff0000);
+	nv_wo32(chan, 0x0448, 0x00000001);
+	nv_wo32(chan, 0x045c, 0x44400000);
+	nv_wo32(chan, 0x0488, 0xffff0000);
+	for (i = 0x04dc; i < 0x04e4; i += 4)
+		nv_wo32(chan, i, 0x0fff0000);
+	nv_wo32(chan, 0x04e8, 0x00011100);
+	for (i = 0x0504; i < 0x0544; i += 4)
+		nv_wo32(chan, i, 0x07ff0000);
+	nv_wo32(chan, 0x054c, 0x4b7fffff);
+	nv_wo32(chan, 0x0588, 0x00000080);
+	nv_wo32(chan, 0x058c, 0x30201000);
+	nv_wo32(chan, 0x0590, 0x70605040);
+	nv_wo32(chan, 0x0594, 0xb8a89888);
+	nv_wo32(chan, 0x0598, 0xf8e8d8c8);
+	nv_wo32(chan, 0x05ac, 0xb0000000);
+	for (i = 0x0604; i < 0x0644; i += 4)
+		nv_wo32(chan, i, 0x00010588);
+	for (i = 0x0644; i < 0x0684; i += 4)
+		nv_wo32(chan, i, 0x00030303);
+	for (i = 0x06c4; i < 0x0704; i += 4)
+		nv_wo32(chan, i, 0x0008aae4);
+	for (i = 0x0704; i < 0x0744; i += 4)
+		nv_wo32(chan, i, 0x01012000);
+	for (i = 0x0744; i < 0x0784; i += 4)
+		nv_wo32(chan, i, 0x00080008);
+	nv_wo32(chan, 0x0860, 0x00040000);
+	nv_wo32(chan, 0x0864, 0x00010000);
+	for (i = 0x0868; i < 0x0878; i += 4)
+		nv_wo32(chan, i, 0x00040004);
+	for (i = 0x1f1c; i <= 0x308c ; i += 16) {
+		nv_wo32(chan, i + 0, 0x10700ff9);
+		nv_wo32(chan, i + 4, 0x0436086c);
+		nv_wo32(chan, i + 8, 0x000c001b);
+	}
+	for (i = 0x30bc; i < 0x30cc; i += 4)
+		nv_wo32(chan, i, 0x0000ffff);
+	nv_wo32(chan, 0x3450, 0x3f800000);
+	nv_wo32(chan, 0x380c, 0x3f800000);
+	nv_wo32(chan, 0x3820, 0x3f800000);
+	nv_wo32(chan, 0x384c, 0x40000000);
+	nv_wo32(chan, 0x3850, 0x3f800000);
+	nv_wo32(chan, 0x3854, 0x3f000000);
+	nv_wo32(chan, 0x385c, 0x40000000);
+	nv_wo32(chan, 0x3860, 0x3f800000);
+	nv_wo32(chan, 0x3868, 0xbf800000);
+	nv_wo32(chan, 0x3870, 0xbf800000);
+	return 0;
+}
+
+static struct nouveau_oclass
+nv35_graph_cclass = {
+	.handle = NV_ENGCTX(GR, 0x35),
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nv35_graph_context_ctor,
+		.dtor = _nouveau_graph_context_dtor,
+		.init = nv20_graph_context_init,
+		.fini = nv20_graph_context_fini,
+		.rd32 = _nouveau_graph_context_rd32,
+		.wr32 = _nouveau_graph_context_wr32,
+	},
+};
+
+/*******************************************************************************
+ * PGRAPH engine/subdev functions
+ ******************************************************************************/
+
+static int
+nv35_graph_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+	       struct nouveau_oclass *oclass, void *data, u32 size,
+	       struct nouveau_object **pobject)
+{
+	struct nv20_graph_priv *priv;
+	int ret;
+
+	ret = nouveau_graph_create(parent, engine, oclass, true, &priv);
+	*pobject = nv_object(priv);
+	if (ret)
+		return ret;
+
+	ret = nouveau_gpuobj_new(parent, NULL, 32 * 4, 16,
+				 NVOBJ_FLAG_ZERO_ALLOC, &priv->ctxtab);
+	if (ret)
+		return ret;
+
+	nv_subdev(priv)->unit = 0x00001000;
+	nv_subdev(priv)->intr = nv20_graph_intr;
+	nv_engine(priv)->cclass = &nv35_graph_cclass;
+	nv_engine(priv)->sclass = nv35_graph_sclass;
+	nv_engine(priv)->tile_prog = nv20_graph_tile_prog;
+	return 0;
+}
+
+struct nouveau_oclass
+nv35_graph_oclass = {
+	.handle = NV_ENGINE(GR, 0x35),
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nv35_graph_ctor,
+		.dtor = nv20_graph_dtor,
+		.init = nv30_graph_init,
+		.fini = _nouveau_graph_fini,
+	},
+};
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nv40.c b/drivers/gpu/drm/nouveau/core/engine/graph/nv40.c
new file mode 100644
index 000000000000..8d0021049ec0
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nv40.c
@@ -0,0 +1,495 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <core/os.h>
+#include <core/class.h>
+#include <core/handle.h>
+#include <core/engctx.h>
+
+#include <subdev/fb.h>
+#include <subdev/timer.h>
+
+#include <engine/graph.h>
+#include <engine/fifo.h>
+
+#include "nv40.h"
+#include "regs.h"
+
+struct nv40_graph_priv {
+	struct nouveau_graph base;
+	u32 size;
+};
+
+struct nv40_graph_chan {
+	struct nouveau_graph_chan base;
+};
+
+/*******************************************************************************
+ * Graphics object classes
+ ******************************************************************************/
+
+static int
+nv40_graph_object_ctor(struct nouveau_object *parent,
+		       struct nouveau_object *engine,
+		       struct nouveau_oclass *oclass, void *data, u32 size,
+		       struct nouveau_object **pobject)
+{
+	struct nouveau_gpuobj *obj;
+	int ret;
+
+	ret = nouveau_gpuobj_create(parent, engine, oclass, 0, parent,
+				    20, 16, 0, &obj);
+	*pobject = nv_object(obj);
+	if (ret)
+		return ret;
+
+	nv_wo32(obj, 0x00, nv_mclass(obj));
+	nv_wo32(obj, 0x04, 0x00000000);
+	nv_wo32(obj, 0x08, 0x00000000);
+#ifdef __BIG_ENDIAN
+	nv_mo32(obj, 0x08, 0x01000000, 0x01000000);
+#endif
+	nv_wo32(obj, 0x0c, 0x00000000);
+	nv_wo32(obj, 0x10, 0x00000000);
+	return 0;
+}
+
+static struct nouveau_ofuncs
+nv40_graph_ofuncs = {
+	.ctor = nv40_graph_object_ctor,
+	.dtor = _nouveau_gpuobj_dtor,
+	.init = _nouveau_gpuobj_init,
+	.fini = _nouveau_gpuobj_fini,
+	.rd32 = _nouveau_gpuobj_rd32,
+	.wr32 = _nouveau_gpuobj_wr32,
+};
+
+static struct nouveau_oclass
+nv40_graph_sclass[] = {
+	{ 0x0012, &nv40_graph_ofuncs, NULL }, /* beta1 */
+	{ 0x0019, &nv40_graph_ofuncs, NULL }, /* clip */
+	{ 0x0030, &nv40_graph_ofuncs, NULL }, /* null */
+	{ 0x0039, &nv40_graph_ofuncs, NULL }, /* m2mf */
+	{ 0x0043, &nv40_graph_ofuncs, NULL }, /* rop */
+	{ 0x0044, &nv40_graph_ofuncs, NULL }, /* patt */
+	{ 0x004a, &nv40_graph_ofuncs, NULL }, /* gdi */
+	{ 0x0062, &nv40_graph_ofuncs, NULL }, /* surf2d */
+	{ 0x0072, &nv40_graph_ofuncs, NULL }, /* beta4 */
+	{ 0x0089, &nv40_graph_ofuncs, NULL }, /* sifm */
+	{ 0x008a, &nv40_graph_ofuncs, NULL }, /* ifc */
+	{ 0x009f, &nv40_graph_ofuncs, NULL }, /* imageblit */
+	{ 0x3062, &nv40_graph_ofuncs, NULL }, /* surf2d (nv40) */
+	{ 0x3089, &nv40_graph_ofuncs, NULL }, /* sifm (nv40) */
+	{ 0x309e, &nv40_graph_ofuncs, NULL }, /* swzsurf (nv40) */
+	{ 0x4097, &nv40_graph_ofuncs, NULL }, /* curie */
+	{},
+};
+
+static struct nouveau_oclass
+nv44_graph_sclass[] = {
+	{ 0x0012, &nv40_graph_ofuncs, NULL }, /* beta1 */
+	{ 0x0019, &nv40_graph_ofuncs, NULL }, /* clip */
+	{ 0x0030, &nv40_graph_ofuncs, NULL }, /* null */
+	{ 0x0039, &nv40_graph_ofuncs, NULL }, /* m2mf */
+	{ 0x0043, &nv40_graph_ofuncs, NULL }, /* rop */
+	{ 0x0044, &nv40_graph_ofuncs, NULL }, /* patt */
+	{ 0x004a, &nv40_graph_ofuncs, NULL }, /* gdi */
+	{ 0x0062, &nv40_graph_ofuncs, NULL }, /* surf2d */
+	{ 0x0072, &nv40_graph_ofuncs, NULL }, /* beta4 */
+	{ 0x0089, &nv40_graph_ofuncs, NULL }, /* sifm */
+	{ 0x008a, &nv40_graph_ofuncs, NULL }, /* ifc */
+	{ 0x009f, &nv40_graph_ofuncs, NULL }, /* imageblit */
+	{ 0x3062, &nv40_graph_ofuncs, NULL }, /* surf2d (nv40) */
+	{ 0x3089, &nv40_graph_ofuncs, NULL }, /* sifm (nv40) */
+	{ 0x309e, &nv40_graph_ofuncs, NULL }, /* swzsurf (nv40) */
+	{ 0x4497, &nv40_graph_ofuncs, NULL }, /* curie */
+	{},
+};
+
+/*******************************************************************************
+ * PGRAPH context
+ ******************************************************************************/
+
+static int
+nv40_graph_context_ctor(struct nouveau_object *parent,
+			struct nouveau_object *engine,
+			struct nouveau_oclass *oclass, void *data, u32 size,
+			struct nouveau_object **pobject)
+{
+	struct nv40_graph_priv *priv = (void *)engine;
+	struct nv40_graph_chan *chan;
+	int ret;
+
+	ret = nouveau_graph_context_create(parent, engine, oclass, NULL,
+					   priv->size, 16,
+					   NVOBJ_FLAG_ZERO_ALLOC, &chan);
+	*pobject = nv_object(chan);
+	if (ret)
+		return ret;
+
+	nv40_grctx_fill(nv_device(priv), nv_gpuobj(chan));
+	nv_wo32(chan, 0x00000, nv_gpuobj(chan)->addr >> 4);
+	return 0;
+}
+
+static int
+nv40_graph_context_fini(struct nouveau_object *object, bool suspend)
+{
+	struct nv04_graph_priv *priv = (void *)object->engine;
+	struct nv04_graph_chan *chan = (void *)object;
+	u32 inst = 0x01000000 | nv_gpuobj(chan)->addr >> 4;
+	int ret = 0;
+
+	nv_mask(priv, 0x400720, 0x00000001, 0x00000000);
+
+	if (nv_rd32(priv, 0x40032c) == inst) {
+		if (suspend) {
+			nv_wr32(priv, 0x400720, 0x00000000);
+			nv_wr32(priv, 0x400784, inst);
+			nv_mask(priv, 0x400310, 0x00000020, 0x00000020);
+			nv_mask(priv, 0x400304, 0x00000001, 0x00000001);
+			if (!nv_wait(priv, 0x400300, 0x00000001, 0x00000000)) {
+				u32 insn = nv_rd32(priv, 0x400308);
+				nv_warn(priv, "ctxprog timeout 0x%08x\n", insn);
+				ret = -EBUSY;
+			}
+		}
+
+		nv_mask(priv, 0x40032c, 0x01000000, 0x00000000);
+	}
+
+	if (nv_rd32(priv, 0x400330) == inst)
+		nv_mask(priv, 0x400330, 0x01000000, 0x00000000);
+
+	nv_mask(priv, 0x400720, 0x00000001, 0x00000001);
+	return ret;
+}
+
+static struct nouveau_oclass
+nv40_graph_cclass = {
+	.handle = NV_ENGCTX(GR, 0x40),
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nv40_graph_context_ctor,
+		.dtor = _nouveau_graph_context_dtor,
+		.init = _nouveau_graph_context_init,
+		.fini = nv40_graph_context_fini,
+		.rd32 = _nouveau_graph_context_rd32,
+		.wr32 = _nouveau_graph_context_wr32,
+	},
+};
+
+/*******************************************************************************
+ * PGRAPH engine/subdev functions
+ ******************************************************************************/
+
+static void
+nv40_graph_tile_prog(struct nouveau_engine *engine, int i)
+{
+	struct nouveau_fb_tile *tile = &nouveau_fb(engine)->tile.region[i];
+	struct nouveau_fifo *pfifo = nouveau_fifo(engine);
+	struct nv40_graph_priv *priv = (void *)engine;
+	unsigned long flags;
+
+	pfifo->pause(pfifo, &flags);
+	nv04_graph_idle(priv);
+
+	switch (nv_device(priv)->chipset) {
+	case 0x40:
+	case 0x41: /* guess */
+	case 0x42:
+	case 0x43:
+	case 0x45: /* guess */
+	case 0x4e:
+		nv_wr32(priv, NV20_PGRAPH_TSIZE(i), tile->pitch);
+		nv_wr32(priv, NV20_PGRAPH_TLIMIT(i), tile->limit);
+		nv_wr32(priv, NV20_PGRAPH_TILE(i), tile->addr);
+		nv_wr32(priv, NV40_PGRAPH_TSIZE1(i), tile->pitch);
+		nv_wr32(priv, NV40_PGRAPH_TLIMIT1(i), tile->limit);
+		nv_wr32(priv, NV40_PGRAPH_TILE1(i), tile->addr);
+		break;
+	case 0x44:
+	case 0x4a:
+		nv_wr32(priv, NV20_PGRAPH_TSIZE(i), tile->pitch);
+		nv_wr32(priv, NV20_PGRAPH_TLIMIT(i), tile->limit);
+		nv_wr32(priv, NV20_PGRAPH_TILE(i), tile->addr);
+		break;
+	case 0x46:
+	case 0x47:
+	case 0x49:
+	case 0x4b:
+	case 0x4c:
+	case 0x67:
+	default:
+		nv_wr32(priv, NV47_PGRAPH_TSIZE(i), tile->pitch);
+		nv_wr32(priv, NV47_PGRAPH_TLIMIT(i), tile->limit);
+		nv_wr32(priv, NV47_PGRAPH_TILE(i), tile->addr);
+		nv_wr32(priv, NV40_PGRAPH_TSIZE1(i), tile->pitch);
+		nv_wr32(priv, NV40_PGRAPH_TLIMIT1(i), tile->limit);
+		nv_wr32(priv, NV40_PGRAPH_TILE1(i), tile->addr);
+		break;
+	}
+
+	pfifo->start(pfifo, &flags);
+}
+
+static void
+nv40_graph_intr(struct nouveau_subdev *subdev)
+{
+	struct nouveau_fifo *pfifo = nouveau_fifo(subdev);
+	struct nouveau_engine *engine = nv_engine(subdev);
+	struct nouveau_object *engctx;
+	struct nouveau_handle *handle = NULL;
+	struct nv40_graph_priv *priv = (void *)subdev;
+	u32 stat = nv_rd32(priv, NV03_PGRAPH_INTR);
+	u32 nsource = nv_rd32(priv, NV03_PGRAPH_NSOURCE);
+	u32 nstatus = nv_rd32(priv, NV03_PGRAPH_NSTATUS);
+	u32 inst = nv_rd32(priv, 0x40032c) & 0x000fffff;
+	u32 addr = nv_rd32(priv, NV04_PGRAPH_TRAPPED_ADDR);
+	u32 subc = (addr & 0x00070000) >> 16;
+	u32 mthd = (addr & 0x00001ffc);
+	u32 data = nv_rd32(priv, NV04_PGRAPH_TRAPPED_DATA);
+	u32 class = nv_rd32(priv, 0x400160 + subc * 4) & 0xffff;
+	u32 show = stat;
+	int chid;
+
+	engctx = nouveau_engctx_get(engine, inst);
+	chid   = pfifo->chid(pfifo, engctx);
+
+	if (stat & NV_PGRAPH_INTR_ERROR) {
+		if (nsource & NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD) {
+			handle = nouveau_handle_get_class(engctx, class);
+			if (handle && !nv_call(handle->object, mthd, data))
+				show &= ~NV_PGRAPH_INTR_ERROR;
+			nouveau_handle_put(handle);
+		}
+
+		if (nsource & NV03_PGRAPH_NSOURCE_DMA_VTX_PROTECTION) {
+			nv_mask(priv, 0x402000, 0, 0);
+		}
+	}
+
+	nv_wr32(priv, NV03_PGRAPH_INTR, stat);
+	nv_wr32(priv, NV04_PGRAPH_FIFO, 0x00000001);
+
+	if (show) {
+		nv_info(priv, "");
+		nouveau_bitfield_print(nv10_graph_intr_name, show);
+		printk(" nsource:");
+		nouveau_bitfield_print(nv04_graph_nsource, nsource);
+		printk(" nstatus:");
+		nouveau_bitfield_print(nv10_graph_nstatus, nstatus);
+		printk("\n");
+		nv_error(priv, "ch %d [0x%08x] subc %d class 0x%04x "
+			       "mthd 0x%04x data 0x%08x\n",
+			 chid, inst << 4, subc, class, mthd, data);
+	}
+
+	nouveau_engctx_put(engctx);
+}
+
+static int
+nv40_graph_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+	       struct nouveau_oclass *oclass, void *data, u32 size,
+	       struct nouveau_object **pobject)
+{
+	struct nv40_graph_priv *priv;
+	int ret;
+
+	ret = nouveau_graph_create(parent, engine, oclass, true, &priv);
+	*pobject = nv_object(priv);
+	if (ret)
+		return ret;
+
+	nv_subdev(priv)->unit = 0x00001000;
+	nv_subdev(priv)->intr = nv40_graph_intr;
+	nv_engine(priv)->cclass = &nv40_graph_cclass;
+	if (nv44_graph_class(priv))
+		nv_engine(priv)->sclass = nv44_graph_sclass;
+	else
+		nv_engine(priv)->sclass = nv40_graph_sclass;
+	nv_engine(priv)->tile_prog = nv40_graph_tile_prog;
+	return 0;
+}
+
+static int
+nv40_graph_init(struct nouveau_object *object)
+{
+	struct nouveau_engine *engine = nv_engine(object);
+	struct nouveau_fb *pfb = nouveau_fb(object);
+	struct nv40_graph_priv *priv = (void *)engine;
+	int ret, i, j;
+	u32 vramsz;
+
+	ret = nouveau_graph_init(&priv->base);
+	if (ret)
+		return ret;
+
+	/* generate and upload context program */
+	nv40_grctx_init(nv_device(priv), &priv->size);
+
+	/* No context present currently */
+	nv_wr32(priv, NV40_PGRAPH_CTXCTL_CUR, 0x00000000);
+
+	nv_wr32(priv, NV03_PGRAPH_INTR   , 0xFFFFFFFF);
+	nv_wr32(priv, NV40_PGRAPH_INTR_EN, 0xFFFFFFFF);
+
+	nv_wr32(priv, NV04_PGRAPH_DEBUG_0, 0xFFFFFFFF);
+	nv_wr32(priv, NV04_PGRAPH_DEBUG_0, 0x00000000);
+	nv_wr32(priv, NV04_PGRAPH_DEBUG_1, 0x401287c0);
+	nv_wr32(priv, NV04_PGRAPH_DEBUG_3, 0xe0de8055);
+	nv_wr32(priv, NV10_PGRAPH_DEBUG_4, 0x00008000);
+	nv_wr32(priv, NV04_PGRAPH_LIMIT_VIOL_PIX, 0x00be3c5f);
+
+	nv_wr32(priv, NV10_PGRAPH_CTX_CONTROL, 0x10010100);
+	nv_wr32(priv, NV10_PGRAPH_STATE      , 0xFFFFFFFF);
+
+	j = nv_rd32(priv, 0x1540) & 0xff;
+	if (j) {
+		for (i = 0; !(j & 1); j >>= 1, i++)
+			;
+		nv_wr32(priv, 0x405000, i);
+	}
+
+	if (nv_device(priv)->chipset == 0x40) {
+		nv_wr32(priv, 0x4009b0, 0x83280fff);
+		nv_wr32(priv, 0x4009b4, 0x000000a0);
+	} else {
+		nv_wr32(priv, 0x400820, 0x83280eff);
+		nv_wr32(priv, 0x400824, 0x000000a0);
+	}
+
+	switch (nv_device(priv)->chipset) {
+	case 0x40:
+	case 0x45:
+		nv_wr32(priv, 0x4009b8, 0x0078e366);
+		nv_wr32(priv, 0x4009bc, 0x0000014c);
+		break;
+	case 0x41:
+	case 0x42: /* pciid also 0x00Cx */
+	/* case 0x0120: XXX (pciid) */
+		nv_wr32(priv, 0x400828, 0x007596ff);
+		nv_wr32(priv, 0x40082c, 0x00000108);
+		break;
+	case 0x43:
+		nv_wr32(priv, 0x400828, 0x0072cb77);
+		nv_wr32(priv, 0x40082c, 0x00000108);
+		break;
+	case 0x44:
+	case 0x46: /* G72 */
+	case 0x4a:
+	case 0x4c: /* G7x-based C51 */
+	case 0x4e:
+		nv_wr32(priv, 0x400860, 0);
+		nv_wr32(priv, 0x400864, 0);
+		break;
+	case 0x47: /* G70 */
+	case 0x49: /* G71 */
+	case 0x4b: /* G73 */
+		nv_wr32(priv, 0x400828, 0x07830610);
+		nv_wr32(priv, 0x40082c, 0x0000016A);
+		break;
+	default:
+		break;
+	}
+
+	nv_wr32(priv, 0x400b38, 0x2ffff800);
+	nv_wr32(priv, 0x400b3c, 0x00006000);
+
+	/* Tiling related stuff. */
+	switch (nv_device(priv)->chipset) {
+	case 0x44:
+	case 0x4a:
+		nv_wr32(priv, 0x400bc4, 0x1003d888);
+		nv_wr32(priv, 0x400bbc, 0xb7a7b500);
+		break;
+	case 0x46:
+		nv_wr32(priv, 0x400bc4, 0x0000e024);
+		nv_wr32(priv, 0x400bbc, 0xb7a7b520);
+		break;
+	case 0x4c:
+	case 0x4e:
+	case 0x67:
+		nv_wr32(priv, 0x400bc4, 0x1003d888);
+		nv_wr32(priv, 0x400bbc, 0xb7a7b540);
+		break;
+	default:
+		break;
+	}
+
+	/* Turn all the tiling regions off. */
+	for (i = 0; i < pfb->tile.regions; i++)
+		engine->tile_prog(engine, i);
+
+	/* begin RAM config */
+	vramsz = pci_resource_len(nv_device(priv)->pdev, 0) - 1;
+	switch (nv_device(priv)->chipset) {
+	case 0x40:
+		nv_wr32(priv, 0x4009A4, nv_rd32(priv, 0x100200));
+		nv_wr32(priv, 0x4009A8, nv_rd32(priv, 0x100204));
+		nv_wr32(priv, 0x4069A4, nv_rd32(priv, 0x100200));
+		nv_wr32(priv, 0x4069A8, nv_rd32(priv, 0x100204));
+		nv_wr32(priv, 0x400820, 0);
+		nv_wr32(priv, 0x400824, 0);
+		nv_wr32(priv, 0x400864, vramsz);
+		nv_wr32(priv, 0x400868, vramsz);
+		break;
+	default:
+		switch (nv_device(priv)->chipset) {
+		case 0x41:
+		case 0x42:
+		case 0x43:
+		case 0x45:
+		case 0x4e:
+		case 0x44:
+		case 0x4a:
+			nv_wr32(priv, 0x4009F0, nv_rd32(priv, 0x100200));
+			nv_wr32(priv, 0x4009F4, nv_rd32(priv, 0x100204));
+			break;
+		default:
+			nv_wr32(priv, 0x400DF0, nv_rd32(priv, 0x100200));
+			nv_wr32(priv, 0x400DF4, nv_rd32(priv, 0x100204));
+			break;
+		}
+		nv_wr32(priv, 0x4069F0, nv_rd32(priv, 0x100200));
+		nv_wr32(priv, 0x4069F4, nv_rd32(priv, 0x100204));
+		nv_wr32(priv, 0x400840, 0);
+		nv_wr32(priv, 0x400844, 0);
+		nv_wr32(priv, 0x4008A0, vramsz);
+		nv_wr32(priv, 0x4008A4, vramsz);
+		break;
+	}
+
+	return 0;
+}
+
+struct nouveau_oclass
+nv40_graph_oclass = {
+	.handle = NV_ENGINE(GR, 0x40),
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nv40_graph_ctor,
+		.dtor = _nouveau_graph_dtor,
+		.init = nv40_graph_init,
+		.fini = _nouveau_graph_fini,
+	},
+};
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nv40.h b/drivers/gpu/drm/nouveau/core/engine/graph/nv40.h
new file mode 100644
index 000000000000..d2ac975afc2e
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nv40.h
@@ -0,0 +1,21 @@
+#ifndef __NV40_GRAPH_H__
+#define __NV40_GRAPH_H__
+
+/* returns 1 if device is one of the nv4x using the 0x4497 object class,
+ * helpful to determine a number of other hardware features
+ */
+static inline int
+nv44_graph_class(void *priv)
+{
+	struct nouveau_device *device = nv_device(priv);
+
+	if ((device->chipset & 0xf0) == 0x60)
+		return 1;
+
+	return !(0x0baf & (1 << (device->chipset & 0x0f)));
+}
+
+void nv40_grctx_init(struct nouveau_device *, u32 *size);
+void nv40_grctx_fill(struct nouveau_device *, struct nouveau_gpuobj *);
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nv50.c b/drivers/gpu/drm/nouveau/core/engine/graph/nv50.c
new file mode 100644
index 000000000000..ab3b9dcaf478
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nv50.c
@@ -0,0 +1,888 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <core/os.h>
+#include <core/class.h>
+#include <core/handle.h>
+#include <core/engctx.h>
+#include <core/enum.h>
+
+#include <subdev/fb.h>
+#include <subdev/vm.h>
+#include <subdev/timer.h>
+
+#include <engine/fifo.h>
+#include <engine/graph.h>
+
+#include "nv50.h"
+
+struct nv50_graph_priv {
+	struct nouveau_graph base;
+	spinlock_t lock;
+	u32 size;
+};
+
+struct nv50_graph_chan {
+	struct nouveau_graph_chan base;
+};
+
+/*******************************************************************************
+ * Graphics object classes
+ ******************************************************************************/
+
+static int
+nv50_graph_object_ctor(struct nouveau_object *parent,
+		       struct nouveau_object *engine,
+		       struct nouveau_oclass *oclass, void *data, u32 size,
+		       struct nouveau_object **pobject)
+{
+	struct nouveau_gpuobj *obj;
+	int ret;
+
+	ret = nouveau_gpuobj_create(parent, engine, oclass, 0, parent,
+				    16, 16, 0, &obj);
+	*pobject = nv_object(obj);
+	if (ret)
+		return ret;
+
+	nv_wo32(obj, 0x00, nv_mclass(obj));
+	nv_wo32(obj, 0x04, 0x00000000);
+	nv_wo32(obj, 0x08, 0x00000000);
+	nv_wo32(obj, 0x0c, 0x00000000);
+	return 0;
+}
+
+static struct nouveau_ofuncs
+nv50_graph_ofuncs = {
+	.ctor = nv50_graph_object_ctor,
+	.dtor = _nouveau_gpuobj_dtor,
+	.init = _nouveau_gpuobj_init,
+	.fini = _nouveau_gpuobj_fini,
+	.rd32 = _nouveau_gpuobj_rd32,
+	.wr32 = _nouveau_gpuobj_wr32,
+};
+
+static struct nouveau_oclass
+nv50_graph_sclass[] = {
+	{ 0x0030, &nv50_graph_ofuncs },
+	{ 0x502d, &nv50_graph_ofuncs },
+	{ 0x5039, &nv50_graph_ofuncs },
+	{ 0x5097, &nv50_graph_ofuncs },
+	{ 0x50c0, &nv50_graph_ofuncs },
+	{}
+};
+
+static struct nouveau_oclass
+nv84_graph_sclass[] = {
+	{ 0x0030, &nv50_graph_ofuncs },
+	{ 0x502d, &nv50_graph_ofuncs },
+	{ 0x5039, &nv50_graph_ofuncs },
+	{ 0x50c0, &nv50_graph_ofuncs },
+	{ 0x8297, &nv50_graph_ofuncs },
+	{}
+};
+
+static struct nouveau_oclass
+nva0_graph_sclass[] = {
+	{ 0x0030, &nv50_graph_ofuncs },
+	{ 0x502d, &nv50_graph_ofuncs },
+	{ 0x5039, &nv50_graph_ofuncs },
+	{ 0x50c0, &nv50_graph_ofuncs },
+	{ 0x8397, &nv50_graph_ofuncs },
+	{}
+};
+
+static struct nouveau_oclass
+nva3_graph_sclass[] = {
+	{ 0x0030, &nv50_graph_ofuncs },
+	{ 0x502d, &nv50_graph_ofuncs },
+	{ 0x5039, &nv50_graph_ofuncs },
+	{ 0x50c0, &nv50_graph_ofuncs },
+	{ 0x8597, &nv50_graph_ofuncs },
+	{ 0x85c0, &nv50_graph_ofuncs },
+	{}
+};
+
+static struct nouveau_oclass
+nvaf_graph_sclass[] = {
+	{ 0x0030, &nv50_graph_ofuncs },
+	{ 0x502d, &nv50_graph_ofuncs },
+	{ 0x5039, &nv50_graph_ofuncs },
+	{ 0x50c0, &nv50_graph_ofuncs },
+	{ 0x85c0, &nv50_graph_ofuncs },
+	{ 0x8697, &nv50_graph_ofuncs },
+	{}
+};
+
+/*******************************************************************************
+ * PGRAPH context
+ ******************************************************************************/
+
+static int
+nv50_graph_context_ctor(struct nouveau_object *parent,
+			struct nouveau_object *engine,
+			struct nouveau_oclass *oclass, void *data, u32 size,
+			struct nouveau_object **pobject)
+{
+	struct nv50_graph_priv *priv = (void *)engine;
+	struct nv50_graph_chan *chan;
+	int ret;
+
+	ret = nouveau_graph_context_create(parent, engine, oclass, NULL,
+					   priv->size, 0,
+					   NVOBJ_FLAG_ZERO_ALLOC, &chan);
+	*pobject = nv_object(chan);
+	if (ret)
+		return ret;
+
+	nv50_grctx_fill(nv_device(priv), nv_gpuobj(chan));
+	return 0;
+}
+
+static struct nouveau_oclass
+nv50_graph_cclass = {
+	.handle = NV_ENGCTX(GR, 0x50),
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nv50_graph_context_ctor,
+		.dtor = _nouveau_graph_context_dtor,
+		.init = _nouveau_graph_context_init,
+		.fini = _nouveau_graph_context_fini,
+		.rd32 = _nouveau_graph_context_rd32,
+		.wr32 = _nouveau_graph_context_wr32,
+	},
+};
+
+/*******************************************************************************
+ * PGRAPH engine/subdev functions
+ ******************************************************************************/
+
+static int
+nv50_graph_tlb_flush(struct nouveau_engine *engine)
+{
+	nv50_vm_flush_engine(&engine->base, 0x00);
+	return 0;
+}
+
+static int
+nv84_graph_tlb_flush(struct nouveau_engine *engine)
+{
+	struct nouveau_timer *ptimer = nouveau_timer(engine);
+	struct nv50_graph_priv *priv = (void *)engine;
+	bool idle, timeout = false;
+	unsigned long flags;
+	u64 start;
+	u32 tmp;
+
+	spin_lock_irqsave(&priv->lock, flags);
+	nv_mask(priv, 0x400500, 0x00000001, 0x00000000);
+
+	start = ptimer->read(ptimer);
+	do {
+		idle = true;
+
+		for (tmp = nv_rd32(priv, 0x400380); tmp && idle; tmp >>= 3) {
+			if ((tmp & 7) == 1)
+				idle = false;
+		}
+
+		for (tmp = nv_rd32(priv, 0x400384); tmp && idle; tmp >>= 3) {
+			if ((tmp & 7) == 1)
+				idle = false;
+		}
+
+		for (tmp = nv_rd32(priv, 0x400388); tmp && idle; tmp >>= 3) {
+			if ((tmp & 7) == 1)
+				idle = false;
+		}
+	} while (!idle &&
+		 !(timeout = ptimer->read(ptimer) - start > 2000000000));
+
+	if (timeout) {
+		nv_error(priv, "PGRAPH TLB flush idle timeout fail: "
+			      "0x%08x 0x%08x 0x%08x 0x%08x\n",
+			 nv_rd32(priv, 0x400700), nv_rd32(priv, 0x400380),
+			 nv_rd32(priv, 0x400384), nv_rd32(priv, 0x400388));
+	}
+
+	nv50_vm_flush_engine(&engine->base, 0x00);
+
+	nv_mask(priv, 0x400500, 0x00000001, 0x00000001);
+	spin_unlock_irqrestore(&priv->lock, flags);
+	return timeout ? -EBUSY : 0;
+}
+
+static const struct nouveau_enum nv50_mp_exec_error_names[] = {
+	{ 3, "STACK_UNDERFLOW", NULL },
+	{ 4, "QUADON_ACTIVE", NULL },
+	{ 8, "TIMEOUT", NULL },
+	{ 0x10, "INVALID_OPCODE", NULL },
+	{ 0x40, "BREAKPOINT", NULL },
+	{}
+};
+
+static const struct nouveau_bitfield nv50_graph_trap_m2mf[] = {
+	{ 0x00000001, "NOTIFY" },
+	{ 0x00000002, "IN" },
+	{ 0x00000004, "OUT" },
+	{}
+};
+
+static const struct nouveau_bitfield nv50_graph_trap_vfetch[] = {
+	{ 0x00000001, "FAULT" },
+	{}
+};
+
+static const struct nouveau_bitfield nv50_graph_trap_strmout[] = {
+	{ 0x00000001, "FAULT" },
+	{}
+};
+
+static const struct nouveau_bitfield nv50_graph_trap_ccache[] = {
+	{ 0x00000001, "FAULT" },
+	{}
+};
+
+/* There must be a *lot* of these. Will take some time to gather them up. */
+const struct nouveau_enum nv50_data_error_names[] = {
+	{ 0x00000003, "INVALID_OPERATION", NULL },
+	{ 0x00000004, "INVALID_VALUE", NULL },
+	{ 0x00000005, "INVALID_ENUM", NULL },
+	{ 0x00000008, "INVALID_OBJECT", NULL },
+	{ 0x00000009, "READ_ONLY_OBJECT", NULL },
+	{ 0x0000000a, "SUPERVISOR_OBJECT", NULL },
+	{ 0x0000000b, "INVALID_ADDRESS_ALIGNMENT", NULL },
+	{ 0x0000000c, "INVALID_BITFIELD", NULL },
+	{ 0x0000000d, "BEGIN_END_ACTIVE", NULL },
+	{ 0x0000000e, "SEMANTIC_COLOR_BACK_OVER_LIMIT", NULL },
+	{ 0x0000000f, "VIEWPORT_ID_NEEDS_GP", NULL },
+	{ 0x00000010, "RT_DOUBLE_BIND", NULL },
+	{ 0x00000011, "RT_TYPES_MISMATCH", NULL },
+	{ 0x00000012, "RT_LINEAR_WITH_ZETA", NULL },
+	{ 0x00000015, "FP_TOO_FEW_REGS", NULL },
+	{ 0x00000016, "ZETA_FORMAT_CSAA_MISMATCH", NULL },
+	{ 0x00000017, "RT_LINEAR_WITH_MSAA", NULL },
+	{ 0x00000018, "FP_INTERPOLANT_START_OVER_LIMIT", NULL },
+	{ 0x00000019, "SEMANTIC_LAYER_OVER_LIMIT", NULL },
+	{ 0x0000001a, "RT_INVALID_ALIGNMENT", NULL },
+	{ 0x0000001b, "SAMPLER_OVER_LIMIT", NULL },
+	{ 0x0000001c, "TEXTURE_OVER_LIMIT", NULL },
+	{ 0x0000001e, "GP_TOO_MANY_OUTPUTS", NULL },
+	{ 0x0000001f, "RT_BPP128_WITH_MS8", NULL },
+	{ 0x00000021, "Z_OUT_OF_BOUNDS", NULL },
+	{ 0x00000023, "XY_OUT_OF_BOUNDS", NULL },
+	{ 0x00000024, "VP_ZERO_INPUTS", NULL },
+	{ 0x00000027, "CP_MORE_PARAMS_THAN_SHARED", NULL },
+	{ 0x00000028, "CP_NO_REG_SPACE_STRIPED", NULL },
+	{ 0x00000029, "CP_NO_REG_SPACE_PACKED", NULL },
+	{ 0x0000002a, "CP_NOT_ENOUGH_WARPS", NULL },
+	{ 0x0000002b, "CP_BLOCK_SIZE_MISMATCH", NULL },
+	{ 0x0000002c, "CP_NOT_ENOUGH_LOCAL_WARPS", NULL },
+	{ 0x0000002d, "CP_NOT_ENOUGH_STACK_WARPS", NULL },
+	{ 0x0000002e, "CP_NO_BLOCKDIM_LATCH", NULL },
+	{ 0x00000031, "ENG2D_FORMAT_MISMATCH", NULL },
+	{ 0x0000003f, "PRIMITIVE_ID_NEEDS_GP", NULL },
+	{ 0x00000044, "SEMANTIC_VIEWPORT_OVER_LIMIT", NULL },
+	{ 0x00000045, "SEMANTIC_COLOR_FRONT_OVER_LIMIT", NULL },
+	{ 0x00000046, "LAYER_ID_NEEDS_GP", NULL },
+	{ 0x00000047, "SEMANTIC_CLIP_OVER_LIMIT", NULL },
+	{ 0x00000048, "SEMANTIC_PTSZ_OVER_LIMIT", NULL },
+	{}
+};
+
+static const struct nouveau_bitfield nv50_graph_intr_name[] = {
+	{ 0x00000001, "NOTIFY" },
+	{ 0x00000002, "COMPUTE_QUERY" },
+	{ 0x00000010, "ILLEGAL_MTHD" },
+	{ 0x00000020, "ILLEGAL_CLASS" },
+	{ 0x00000040, "DOUBLE_NOTIFY" },
+	{ 0x00001000, "CONTEXT_SWITCH" },
+	{ 0x00010000, "BUFFER_NOTIFY" },
+	{ 0x00100000, "DATA_ERROR" },
+	{ 0x00200000, "TRAP" },
+	{ 0x01000000, "SINGLE_STEP" },
+	{}
+};
+
+static void
+nv50_priv_mp_trap(struct nv50_graph_priv *priv, int tpid, int display)
+{
+	u32 units = nv_rd32(priv, 0x1540);
+	u32 addr, mp10, status, pc, oplow, ophigh;
+	int i;
+	int mps = 0;
+	for (i = 0; i < 4; i++) {
+		if (!(units & 1 << (i+24)))
+			continue;
+		if (nv_device(priv)->chipset < 0xa0)
+			addr = 0x408200 + (tpid << 12) + (i << 7);
+		else
+			addr = 0x408100 + (tpid << 11) + (i << 7);
+		mp10 = nv_rd32(priv, addr + 0x10);
+		status = nv_rd32(priv, addr + 0x14);
+		if (!status)
+			continue;
+		if (display) {
+			nv_rd32(priv, addr + 0x20);
+			pc = nv_rd32(priv, addr + 0x24);
+			oplow = nv_rd32(priv, addr + 0x70);
+			ophigh = nv_rd32(priv, addr + 0x74);
+			nv_error(priv, "TRAP_MP_EXEC - "
+					"TP %d MP %d: ", tpid, i);
+			nouveau_enum_print(nv50_mp_exec_error_names, status);
+			printk(" at %06x warp %d, opcode %08x %08x\n",
+					pc&0xffffff, pc >> 24,
+					oplow, ophigh);
+		}
+		nv_wr32(priv, addr + 0x10, mp10);
+		nv_wr32(priv, addr + 0x14, 0);
+		mps++;
+	}
+	if (!mps && display)
+		nv_error(priv, "TRAP_MP_EXEC - TP %d: "
+				"No MPs claiming errors?\n", tpid);
+}
+
+static void
+nv50_priv_tp_trap(struct nv50_graph_priv *priv, int type, u32 ustatus_old,
+		u32 ustatus_new, int display, const char *name)
+{
+	int tps = 0;
+	u32 units = nv_rd32(priv, 0x1540);
+	int i, r;
+	u32 ustatus_addr, ustatus;
+	for (i = 0; i < 16; i++) {
+		if (!(units & (1 << i)))
+			continue;
+		if (nv_device(priv)->chipset < 0xa0)
+			ustatus_addr = ustatus_old + (i << 12);
+		else
+			ustatus_addr = ustatus_new + (i << 11);
+		ustatus = nv_rd32(priv, ustatus_addr) & 0x7fffffff;
+		if (!ustatus)
+			continue;
+		tps++;
+		switch (type) {
+		case 6: /* texture error... unknown for now */
+			if (display) {
+				nv_error(priv, "magic set %d:\n", i);
+				for (r = ustatus_addr + 4; r <= ustatus_addr + 0x10; r += 4)
+					nv_error(priv, "\t0x%08x: 0x%08x\n", r,
+						nv_rd32(priv, r));
+			}
+			break;
+		case 7: /* MP error */
+			if (ustatus & 0x04030000) {
+				nv50_priv_mp_trap(priv, i, display);
+				ustatus &= ~0x04030000;
+			}
+			break;
+		case 8: /* TPDMA error */
+			{
+			u32 e0c = nv_rd32(priv, ustatus_addr + 4);
+			u32 e10 = nv_rd32(priv, ustatus_addr + 8);
+			u32 e14 = nv_rd32(priv, ustatus_addr + 0xc);
+			u32 e18 = nv_rd32(priv, ustatus_addr + 0x10);
+			u32 e1c = nv_rd32(priv, ustatus_addr + 0x14);
+			u32 e20 = nv_rd32(priv, ustatus_addr + 0x18);
+			u32 e24 = nv_rd32(priv, ustatus_addr + 0x1c);
+			/* 2d engine destination */
+			if (ustatus & 0x00000010) {
+				if (display) {
+					nv_error(priv, "TRAP_TPDMA_2D - TP %d - Unknown fault at address %02x%08x\n",
+							i, e14, e10);
+					nv_error(priv, "TRAP_TPDMA_2D - TP %d - e0c: %08x, e18: %08x, e1c: %08x, e20: %08x, e24: %08x\n",
+							i, e0c, e18, e1c, e20, e24);
+				}
+				ustatus &= ~0x00000010;
+			}
+			/* Render target */
+			if (ustatus & 0x00000040) {
+				if (display) {
+					nv_error(priv, "TRAP_TPDMA_RT - TP %d - Unknown fault at address %02x%08x\n",
+							i, e14, e10);
+					nv_error(priv, "TRAP_TPDMA_RT - TP %d - e0c: %08x, e18: %08x, e1c: %08x, e20: %08x, e24: %08x\n",
+							i, e0c, e18, e1c, e20, e24);
+				}
+				ustatus &= ~0x00000040;
+			}
+			/* CUDA memory: l[], g[] or stack. */
+			if (ustatus & 0x00000080) {
+				if (display) {
+					if (e18 & 0x80000000) {
+						/* g[] read fault? */
+						nv_error(priv, "TRAP_TPDMA - TP %d - Global read fault at address %02x%08x\n",
+								i, e14, e10 | ((e18 >> 24) & 0x1f));
+						e18 &= ~0x1f000000;
+					} else if (e18 & 0xc) {
+						/* g[] write fault? */
+						nv_error(priv, "TRAP_TPDMA - TP %d - Global write fault at address %02x%08x\n",
+								i, e14, e10 | ((e18 >> 7) & 0x1f));
+						e18 &= ~0x00000f80;
+					} else {
+						nv_error(priv, "TRAP_TPDMA - TP %d - Unknown CUDA fault at address %02x%08x\n",
+								i, e14, e10);
+					}
+					nv_error(priv, "TRAP_TPDMA - TP %d - e0c: %08x, e18: %08x, e1c: %08x, e20: %08x, e24: %08x\n",
+							i, e0c, e18, e1c, e20, e24);
+				}
+				ustatus &= ~0x00000080;
+			}
+			}
+			break;
+		}
+		if (ustatus) {
+			if (display)
+				nv_info(priv, "%s - TP%d: Unhandled ustatus 0x%08x\n", name, i, ustatus);
+		}
+		nv_wr32(priv, ustatus_addr, 0xc0000000);
+	}
+
+	if (!tps && display)
+		nv_info(priv, "%s - No TPs claiming errors?\n", name);
+}
+
+static int
+nv50_graph_trap_handler(struct nv50_graph_priv *priv, u32 display,
+			int chid, u64 inst)
+{
+	u32 status = nv_rd32(priv, 0x400108);
+	u32 ustatus;
+
+	if (!status && display) {
+		nv_error(priv, "TRAP: no units reporting traps?\n");
+		return 1;
+	}
+
+	/* DISPATCH: Relays commands to other units and handles NOTIFY,
+	 * COND, QUERY. If you get a trap from it, the command is still stuck
+	 * in DISPATCH and you need to do something about it. */
+	if (status & 0x001) {
+		ustatus = nv_rd32(priv, 0x400804) & 0x7fffffff;
+		if (!ustatus && display) {
+			nv_error(priv, "TRAP_DISPATCH - no ustatus?\n");
+		}
+
+		nv_wr32(priv, 0x400500, 0x00000000);
+
+		/* Known to be triggered by screwed up NOTIFY and COND... */
+		if (ustatus & 0x00000001) {
+			u32 addr = nv_rd32(priv, 0x400808);
+			u32 subc = (addr & 0x00070000) >> 16;
+			u32 mthd = (addr & 0x00001ffc);
+			u32 datal = nv_rd32(priv, 0x40080c);
+			u32 datah = nv_rd32(priv, 0x400810);
+			u32 class = nv_rd32(priv, 0x400814);
+			u32 r848 = nv_rd32(priv, 0x400848);
+
+			nv_error(priv, "TRAP DISPATCH_FAULT\n");
+			if (display && (addr & 0x80000000)) {
+				nv_error(priv, "ch %d [0x%010llx] "
+					     "subc %d class 0x%04x mthd 0x%04x "
+					     "data 0x%08x%08x "
+					     "400808 0x%08x 400848 0x%08x\n",
+					chid, inst, subc, class, mthd, datah,
+					datal, addr, r848);
+			} else
+			if (display) {
+				nv_error(priv, "no stuck command?\n");
+			}
+
+			nv_wr32(priv, 0x400808, 0);
+			nv_wr32(priv, 0x4008e8, nv_rd32(priv, 0x4008e8) & 3);
+			nv_wr32(priv, 0x400848, 0);
+			ustatus &= ~0x00000001;
+		}
+
+		if (ustatus & 0x00000002) {
+			u32 addr = nv_rd32(priv, 0x40084c);
+			u32 subc = (addr & 0x00070000) >> 16;
+			u32 mthd = (addr & 0x00001ffc);
+			u32 data = nv_rd32(priv, 0x40085c);
+			u32 class = nv_rd32(priv, 0x400814);
+
+			nv_error(priv, "TRAP DISPATCH_QUERY\n");
+			if (display && (addr & 0x80000000)) {
+				nv_error(priv, "ch %d [0x%010llx] "
+					     "subc %d class 0x%04x mthd 0x%04x "
+					     "data 0x%08x 40084c 0x%08x\n",
+					chid, inst, subc, class, mthd,
+					data, addr);
+			} else
+			if (display) {
+				nv_error(priv, "no stuck command?\n");
+			}
+
+			nv_wr32(priv, 0x40084c, 0);
+			ustatus &= ~0x00000002;
+		}
+
+		if (ustatus && display) {
+			nv_error(priv, "TRAP_DISPATCH (unknown "
+				      "0x%08x)\n", ustatus);
+		}
+
+		nv_wr32(priv, 0x400804, 0xc0000000);
+		nv_wr32(priv, 0x400108, 0x001);
+		status &= ~0x001;
+		if (!status)
+			return 0;
+	}
+
+	/* M2MF: Memory to memory copy engine. */
+	if (status & 0x002) {
+		u32 ustatus = nv_rd32(priv, 0x406800) & 0x7fffffff;
+		if (display) {
+			nv_error(priv, "TRAP_M2MF");
+			nouveau_bitfield_print(nv50_graph_trap_m2mf, ustatus);
+			printk("\n");
+			nv_error(priv, "TRAP_M2MF %08x %08x %08x %08x\n",
+				nv_rd32(priv, 0x406804), nv_rd32(priv, 0x406808),
+				nv_rd32(priv, 0x40680c), nv_rd32(priv, 0x406810));
+
+		}
+
+		/* No sane way found yet -- just reset the bugger. */
+		nv_wr32(priv, 0x400040, 2);
+		nv_wr32(priv, 0x400040, 0);
+		nv_wr32(priv, 0x406800, 0xc0000000);
+		nv_wr32(priv, 0x400108, 0x002);
+		status &= ~0x002;
+	}
+
+	/* VFETCH: Fetches data from vertex buffers. */
+	if (status & 0x004) {
+		u32 ustatus = nv_rd32(priv, 0x400c04) & 0x7fffffff;
+		if (display) {
+			nv_error(priv, "TRAP_VFETCH");
+			nouveau_bitfield_print(nv50_graph_trap_vfetch, ustatus);
+			printk("\n");
+			nv_error(priv, "TRAP_VFETCH %08x %08x %08x %08x\n",
+				nv_rd32(priv, 0x400c00), nv_rd32(priv, 0x400c08),
+				nv_rd32(priv, 0x400c0c), nv_rd32(priv, 0x400c10));
+		}
+
+		nv_wr32(priv, 0x400c04, 0xc0000000);
+		nv_wr32(priv, 0x400108, 0x004);
+		status &= ~0x004;
+	}
+
+	/* STRMOUT: DirectX streamout / OpenGL transform feedback. */
+	if (status & 0x008) {
+		ustatus = nv_rd32(priv, 0x401800) & 0x7fffffff;
+		if (display) {
+			nv_error(priv, "TRAP_STRMOUT");
+			nouveau_bitfield_print(nv50_graph_trap_strmout, ustatus);
+			printk("\n");
+			nv_error(priv, "TRAP_STRMOUT %08x %08x %08x %08x\n",
+				nv_rd32(priv, 0x401804), nv_rd32(priv, 0x401808),
+				nv_rd32(priv, 0x40180c), nv_rd32(priv, 0x401810));
+
+		}
+
+		/* No sane way found yet -- just reset the bugger. */
+		nv_wr32(priv, 0x400040, 0x80);
+		nv_wr32(priv, 0x400040, 0);
+		nv_wr32(priv, 0x401800, 0xc0000000);
+		nv_wr32(priv, 0x400108, 0x008);
+		status &= ~0x008;
+	}
+
+	/* CCACHE: Handles code and c[] caches and fills them. */
+	if (status & 0x010) {
+		ustatus = nv_rd32(priv, 0x405018) & 0x7fffffff;
+		if (display) {
+			nv_error(priv, "TRAP_CCACHE");
+			nouveau_bitfield_print(nv50_graph_trap_ccache, ustatus);
+			printk("\n");
+			nv_error(priv, "TRAP_CCACHE %08x %08x %08x %08x"
+				     " %08x %08x %08x\n",
+				nv_rd32(priv, 0x405000), nv_rd32(priv, 0x405004),
+				nv_rd32(priv, 0x405008), nv_rd32(priv, 0x40500c),
+				nv_rd32(priv, 0x405010), nv_rd32(priv, 0x405014),
+				nv_rd32(priv, 0x40501c));
+
+		}
+
+		nv_wr32(priv, 0x405018, 0xc0000000);
+		nv_wr32(priv, 0x400108, 0x010);
+		status &= ~0x010;
+	}
+
+	/* Unknown, not seen yet... 0x402000 is the only trap status reg
+	 * remaining, so try to handle it anyway. Perhaps related to that
+	 * unknown DMA slot on tesla? */
+	if (status & 0x20) {
+		ustatus = nv_rd32(priv, 0x402000) & 0x7fffffff;
+		if (display)
+			nv_error(priv, "TRAP_UNKC04 0x%08x\n", ustatus);
+		nv_wr32(priv, 0x402000, 0xc0000000);
+		/* no status modifiction on purpose */
+	}
+
+	/* TEXTURE: CUDA texturing units */
+	if (status & 0x040) {
+		nv50_priv_tp_trap(priv, 6, 0x408900, 0x408600, display,
+				    "TRAP_TEXTURE");
+		nv_wr32(priv, 0x400108, 0x040);
+		status &= ~0x040;
+	}
+
+	/* MP: CUDA execution engines. */
+	if (status & 0x080) {
+		nv50_priv_tp_trap(priv, 7, 0x408314, 0x40831c, display,
+				    "TRAP_MP");
+		nv_wr32(priv, 0x400108, 0x080);
+		status &= ~0x080;
+	}
+
+	/* TPDMA:  Handles TP-initiated uncached memory accesses:
+	 * l[], g[], stack, 2d surfaces, render targets. */
+	if (status & 0x100) {
+		nv50_priv_tp_trap(priv, 8, 0x408e08, 0x408708, display,
+				    "TRAP_TPDMA");
+		nv_wr32(priv, 0x400108, 0x100);
+		status &= ~0x100;
+	}
+
+	if (status) {
+		if (display)
+			nv_error(priv, "TRAP: unknown 0x%08x\n", status);
+		nv_wr32(priv, 0x400108, status);
+	}
+
+	return 1;
+}
+
+static void
+nv50_graph_intr(struct nouveau_subdev *subdev)
+{
+	struct nouveau_fifo *pfifo = nouveau_fifo(subdev);
+	struct nouveau_engine *engine = nv_engine(subdev);
+	struct nouveau_object *engctx;
+	struct nouveau_handle *handle = NULL;
+	struct nv50_graph_priv *priv = (void *)subdev;
+	u32 stat = nv_rd32(priv, 0x400100);
+	u32 inst = nv_rd32(priv, 0x40032c) & 0x0fffffff;
+	u32 addr = nv_rd32(priv, 0x400704);
+	u32 subc = (addr & 0x00070000) >> 16;
+	u32 mthd = (addr & 0x00001ffc);
+	u32 data = nv_rd32(priv, 0x400708);
+	u32 class = nv_rd32(priv, 0x400814);
+	u32 show = stat;
+	int chid;
+
+	engctx = nouveau_engctx_get(engine, inst);
+	chid   = pfifo->chid(pfifo, engctx);
+
+	if (stat & 0x00000010) {
+		handle = nouveau_handle_get_class(engctx, class);
+		if (handle && !nv_call(handle->object, mthd, data))
+			show &= ~0x00000010;
+		nouveau_handle_put(handle);
+	}
+
+	if (show & 0x00100000) {
+		u32 ecode = nv_rd32(priv, 0x400110);
+		nv_error(priv, "DATA_ERROR ");
+		nouveau_enum_print(nv50_data_error_names, ecode);
+		printk("\n");
+	}
+
+	if (stat & 0x00200000) {
+		if (!nv50_graph_trap_handler(priv, show, chid, (u64)inst << 12))
+			show &= ~0x00200000;
+	}
+
+	nv_wr32(priv, 0x400100, stat);
+	nv_wr32(priv, 0x400500, 0x00010001);
+
+	if (show) {
+		nv_info(priv, "");
+		nouveau_bitfield_print(nv50_graph_intr_name, show);
+		printk("\n");
+		nv_error(priv, "ch %d [0x%010llx] subc %d class 0x%04x "
+			       "mthd 0x%04x data 0x%08x\n",
+			 chid, (u64)inst << 12, subc, class, mthd, data);
+		nv50_fb_trap(nouveau_fb(priv), 1);
+	}
+
+	if (nv_rd32(priv, 0x400824) & (1 << 31))
+		nv_wr32(priv, 0x400824, nv_rd32(priv, 0x400824) & ~(1 << 31));
+
+	nouveau_engctx_put(engctx);
+}
+
+static int
+nv50_graph_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+	       struct nouveau_oclass *oclass, void *data, u32 size,
+	       struct nouveau_object **pobject)
+{
+	struct nv50_graph_priv *priv;
+	int ret;
+
+	ret = nouveau_graph_create(parent, engine, oclass, true, &priv);
+	*pobject = nv_object(priv);
+	if (ret)
+		return ret;
+
+	nv_subdev(priv)->unit = 0x00201000;
+	nv_subdev(priv)->intr = nv50_graph_intr;
+	nv_engine(priv)->cclass = &nv50_graph_cclass;
+
+	switch (nv_device(priv)->chipset) {
+	case 0x50:
+		nv_engine(priv)->sclass = nv50_graph_sclass;
+		break;
+	case 0x84:
+	case 0x86:
+	case 0x92:
+	case 0x94:
+	case 0x96:
+	case 0x98:
+		nv_engine(priv)->sclass = nv84_graph_sclass;
+		break;
+	case 0xa0:
+	case 0xaa:
+	case 0xac:
+		nv_engine(priv)->sclass = nva0_graph_sclass;
+		break;
+	case 0xa3:
+	case 0xa5:
+	case 0xa8:
+		nv_engine(priv)->sclass = nva3_graph_sclass;
+		break;
+	case 0xaf:
+		nv_engine(priv)->sclass = nvaf_graph_sclass;
+		break;
+
+	};
+
+	if (nv_device(priv)->chipset == 0x50 ||
+	    nv_device(priv)->chipset == 0xac)
+		nv_engine(priv)->tlb_flush = nv50_graph_tlb_flush;
+	else
+		nv_engine(priv)->tlb_flush = nv84_graph_tlb_flush;
+
+	spin_lock_init(&priv->lock);
+	return 0;
+}
+
+static int
+nv50_graph_init(struct nouveau_object *object)
+{
+	struct nv50_graph_priv *priv = (void *)object;
+	int ret, units, i;
+
+	ret = nouveau_graph_init(&priv->base);
+	if (ret)
+		return ret;
+
+	/* NV_PGRAPH_DEBUG_3_HW_CTX_SWITCH_ENABLED */
+	nv_wr32(priv, 0x40008c, 0x00000004);
+
+	/* reset/enable traps and interrupts */
+	nv_wr32(priv, 0x400804, 0xc0000000);
+	nv_wr32(priv, 0x406800, 0xc0000000);
+	nv_wr32(priv, 0x400c04, 0xc0000000);
+	nv_wr32(priv, 0x401800, 0xc0000000);
+	nv_wr32(priv, 0x405018, 0xc0000000);
+	nv_wr32(priv, 0x402000, 0xc0000000);
+
+	units = nv_rd32(priv, 0x001540);
+	for (i = 0; i < 16; i++) {
+		if (!(units & (1 << i)))
+			continue;
+
+		if (nv_device(priv)->chipset < 0xa0) {
+			nv_wr32(priv, 0x408900 + (i << 12), 0xc0000000);
+			nv_wr32(priv, 0x408e08 + (i << 12), 0xc0000000);
+			nv_wr32(priv, 0x408314 + (i << 12), 0xc0000000);
+		} else {
+			nv_wr32(priv, 0x408600 + (i << 11), 0xc0000000);
+			nv_wr32(priv, 0x408708 + (i << 11), 0xc0000000);
+			nv_wr32(priv, 0x40831c + (i << 11), 0xc0000000);
+		}
+	}
+
+	nv_wr32(priv, 0x400108, 0xffffffff);
+	nv_wr32(priv, 0x400138, 0xffffffff);
+	nv_wr32(priv, 0x400100, 0xffffffff);
+	nv_wr32(priv, 0x40013c, 0xffffffff);
+	nv_wr32(priv, 0x400500, 0x00010001);
+
+	/* upload context program, initialise ctxctl defaults */
+	ret = nv50_grctx_init(nv_device(priv), &priv->size);
+	if (ret)
+		return ret;
+
+	nv_wr32(priv, 0x400824, 0x00000000);
+	nv_wr32(priv, 0x400828, 0x00000000);
+	nv_wr32(priv, 0x40082c, 0x00000000);
+	nv_wr32(priv, 0x400830, 0x00000000);
+	nv_wr32(priv, 0x400724, 0x00000000);
+	nv_wr32(priv, 0x40032c, 0x00000000);
+	nv_wr32(priv, 0x400320, 4);	/* CTXCTL_CMD = NEWCTXDMA */
+
+	/* some unknown zcull magic */
+	switch (nv_device(priv)->chipset & 0xf0) {
+	case 0x50:
+	case 0x80:
+	case 0x90:
+		nv_wr32(priv, 0x402ca8, 0x00000800);
+		break;
+	case 0xa0:
+	default:
+		nv_wr32(priv, 0x402cc0, 0x00000000);
+		if (nv_device(priv)->chipset == 0xa0 ||
+		    nv_device(priv)->chipset == 0xaa ||
+		    nv_device(priv)->chipset == 0xac) {
+			nv_wr32(priv, 0x402ca8, 0x00000802);
+		} else {
+			nv_wr32(priv, 0x402cc0, 0x00000000);
+			nv_wr32(priv, 0x402ca8, 0x00000002);
+		}
+
+		break;
+	}
+
+	/* zero out zcull regions */
+	for (i = 0; i < 8; i++) {
+		nv_wr32(priv, 0x402c20 + (i * 8), 0x00000000);
+		nv_wr32(priv, 0x402c24 + (i * 8), 0x00000000);
+		nv_wr32(priv, 0x402c28 + (i * 8), 0x00000000);
+		nv_wr32(priv, 0x402c2c + (i * 8), 0x00000000);
+	}
+	return 0;
+}
+
+struct nouveau_oclass
+nv50_graph_oclass = {
+	.handle = NV_ENGINE(GR, 0x50),
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nv50_graph_ctor,
+		.dtor = _nouveau_graph_dtor,
+		.init = nv50_graph_init,
+		.fini = _nouveau_graph_fini,
+	},
+};
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nv50.h b/drivers/gpu/drm/nouveau/core/engine/graph/nv50.h
new file mode 100644
index 000000000000..0505fb419bde
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nv50.h
@@ -0,0 +1,7 @@
+#ifndef __NV50_GRAPH_H__
+#define __NV50_GRAPH_H__
+
+int  nv50_grctx_init(struct nouveau_device *, u32 *size);
+void nv50_grctx_fill(struct nouveau_device *, struct nouveau_gpuobj *);
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.c b/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.c
new file mode 100644
index 000000000000..c62f2d0f5f0a
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.c
@@ -0,0 +1,955 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include "nvc0.h"
+#include "fuc/hubnvc0.fuc.h"
+#include "fuc/gpcnvc0.fuc.h"
+
+/*******************************************************************************
+ * Graphics object classes
+ ******************************************************************************/
+
+static struct nouveau_oclass
+nvc0_graph_sclass[] = {
+	{ 0x902d, &nouveau_object_ofuncs },
+	{ 0x9039, &nouveau_object_ofuncs },
+	{ 0x9097, &nouveau_object_ofuncs },
+	{ 0x90c0, &nouveau_object_ofuncs },
+	{}
+};
+
+static struct nouveau_oclass
+nvc1_graph_sclass[] = {
+	{ 0x902d, &nouveau_object_ofuncs },
+	{ 0x9039, &nouveau_object_ofuncs },
+	{ 0x9097, &nouveau_object_ofuncs },
+	{ 0x90c0, &nouveau_object_ofuncs },
+	{ 0x9197, &nouveau_object_ofuncs },
+	{}
+};
+
+static struct nouveau_oclass
+nvc8_graph_sclass[] = {
+	{ 0x902d, &nouveau_object_ofuncs },
+	{ 0x9039, &nouveau_object_ofuncs },
+	{ 0x9097, &nouveau_object_ofuncs },
+	{ 0x90c0, &nouveau_object_ofuncs },
+	{ 0x9197, &nouveau_object_ofuncs },
+	{ 0x9297, &nouveau_object_ofuncs },
+	{}
+};
+
+/*******************************************************************************
+ * PGRAPH context
+ ******************************************************************************/
+
+int
+nvc0_graph_context_ctor(struct nouveau_object *parent,
+			struct nouveau_object *engine,
+			struct nouveau_oclass *oclass, void *args, u32 size,
+			struct nouveau_object **pobject)
+{
+	struct nouveau_vm *vm = nouveau_client(parent)->vm;
+	struct nvc0_graph_priv *priv = (void *)engine;
+	struct nvc0_graph_data *data = priv->mmio_data;
+	struct nvc0_graph_mmio *mmio = priv->mmio_list;
+	struct nvc0_graph_chan *chan;
+	int ret, i;
+
+	/* allocate memory for context, and fill with default values */
+	ret = nouveau_graph_context_create(parent, engine, oclass, NULL,
+					   priv->size, 0x100,
+					   NVOBJ_FLAG_ZERO_ALLOC, &chan);
+	*pobject = nv_object(chan);
+	if (ret)
+		return ret;
+
+	/* allocate memory for a "mmio list" buffer that's used by the HUB
+	 * fuc to modify some per-context register settings on first load
+	 * of the context.
+	 */
+	ret = nouveau_gpuobj_new(parent, NULL, 0x1000, 0x100, 0, &chan->mmio);
+	if (ret)
+		return ret;
+
+	ret = nouveau_gpuobj_map_vm(nv_gpuobj(chan->mmio), vm,
+				    NV_MEM_ACCESS_RW | NV_MEM_ACCESS_SYS,
+				    &chan->mmio_vma);
+	if (ret)
+		return ret;
+
+	/* allocate buffers referenced by mmio list */
+	for (i = 0; data->size && i < ARRAY_SIZE(priv->mmio_data); i++) {
+		ret = nouveau_gpuobj_new(parent, NULL, data->size, data->align,
+					 0, &chan->data[i].mem);
+		if (ret)
+			return ret;
+
+		ret = nouveau_gpuobj_map_vm(chan->data[i].mem, vm, data->access,
+					   &chan->data[i].vma);
+		if (ret)
+			return ret;
+
+		data++;
+	}
+
+	/* finally, fill in the mmio list and point the context at it */
+	for (i = 0; mmio->addr && i < ARRAY_SIZE(priv->mmio_list); i++) {
+		u32 addr = mmio->addr;
+		u32 data = mmio->data;
+
+		if (mmio->shift) {
+			u64 info = chan->data[mmio->buffer].vma.offset;
+			data |= info >> mmio->shift;
+		}
+
+		nv_wo32(chan->mmio, chan->mmio_nr++ * 4, addr);
+		nv_wo32(chan->mmio, chan->mmio_nr++ * 4, data);
+		mmio++;
+	}
+
+	for (i = 0; i < priv->size; i += 4)
+		nv_wo32(chan, i, priv->data[i / 4]);
+
+	if (!priv->firmware) {
+		nv_wo32(chan, 0x00, chan->mmio_nr / 2);
+		nv_wo32(chan, 0x04, chan->mmio_vma.offset >> 8);
+	} else {
+		nv_wo32(chan, 0xf4, 0);
+		nv_wo32(chan, 0xf8, 0);
+		nv_wo32(chan, 0x10, chan->mmio_nr / 2);
+		nv_wo32(chan, 0x14, lower_32_bits(chan->mmio_vma.offset));
+		nv_wo32(chan, 0x18, upper_32_bits(chan->mmio_vma.offset));
+		nv_wo32(chan, 0x1c, 1);
+		nv_wo32(chan, 0x20, 0);
+		nv_wo32(chan, 0x28, 0);
+		nv_wo32(chan, 0x2c, 0);
+	}
+
+	return 0;
+}
+
+void
+nvc0_graph_context_dtor(struct nouveau_object *object)
+{
+	struct nvc0_graph_chan *chan = (void *)object;
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(chan->data); i++) {
+		nouveau_gpuobj_unmap(&chan->data[i].vma);
+		nouveau_gpuobj_ref(NULL, &chan->data[i].mem);
+	}
+
+	nouveau_gpuobj_unmap(&chan->mmio_vma);
+	nouveau_gpuobj_ref(NULL, &chan->mmio);
+
+	nouveau_graph_context_destroy(&chan->base);
+}
+
+static struct nouveau_oclass
+nvc0_graph_cclass = {
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nvc0_graph_context_ctor,
+		.dtor = nvc0_graph_context_dtor,
+		.init = _nouveau_graph_context_init,
+		.fini = _nouveau_graph_context_fini,
+		.rd32 = _nouveau_graph_context_rd32,
+		.wr32 = _nouveau_graph_context_wr32,
+	},
+};
+
+/*******************************************************************************
+ * PGRAPH engine/subdev functions
+ ******************************************************************************/
+
+static void
+nvc0_graph_ctxctl_debug_unit(struct nvc0_graph_priv *priv, u32 base)
+{
+	nv_error(priv, "%06x - done 0x%08x\n", base,
+		 nv_rd32(priv, base + 0x400));
+	nv_error(priv, "%06x - stat 0x%08x 0x%08x 0x%08x 0x%08x\n", base,
+		 nv_rd32(priv, base + 0x800), nv_rd32(priv, base + 0x804),
+		 nv_rd32(priv, base + 0x808), nv_rd32(priv, base + 0x80c));
+	nv_error(priv, "%06x - stat 0x%08x 0x%08x 0x%08x 0x%08x\n", base,
+		 nv_rd32(priv, base + 0x810), nv_rd32(priv, base + 0x814),
+		 nv_rd32(priv, base + 0x818), nv_rd32(priv, base + 0x81c));
+}
+
+void
+nvc0_graph_ctxctl_debug(struct nvc0_graph_priv *priv)
+{
+	u32 gpcnr = nv_rd32(priv, 0x409604) & 0xffff;
+	u32 gpc;
+
+	nvc0_graph_ctxctl_debug_unit(priv, 0x409000);
+	for (gpc = 0; gpc < gpcnr; gpc++)
+		nvc0_graph_ctxctl_debug_unit(priv, 0x502000 + (gpc * 0x8000));
+}
+
+static void
+nvc0_graph_ctxctl_isr(struct nvc0_graph_priv *priv)
+{
+	u32 ustat = nv_rd32(priv, 0x409c18);
+
+	if (ustat & 0x00000001)
+		nv_error(priv, "CTXCTRL ucode error\n");
+	if (ustat & 0x00080000)
+		nv_error(priv, "CTXCTRL watchdog timeout\n");
+	if (ustat & ~0x00080001)
+		nv_error(priv, "CTXCTRL 0x%08x\n", ustat);
+
+	nvc0_graph_ctxctl_debug(priv);
+	nv_wr32(priv, 0x409c20, ustat);
+}
+
+static void
+nvc0_graph_trap_tpc(struct nvc0_graph_priv *priv, int gpc, int tpc)
+{
+	u32 stat = nv_rd32(priv, TPC_UNIT(gpc, tpc, 0x0508));
+
+	if (stat & 0x00000001) {
+		u32 trap = nv_rd32(priv, TPC_UNIT(gpc, tpc, 0x0224));
+		nv_error(priv, "GPC%d/TPC%d/TEX: 0x%08x\n", gpc, tpc, trap);
+		nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x0224), 0xc0000000);
+		nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x0508), 0x00000001);
+		stat &= ~0x00000001;
+	}
+
+	if (stat & 0x00000002) {
+		u32 trap0 = nv_rd32(priv, TPC_UNIT(gpc, tpc, 0x0644));
+		u32 trap1 = nv_rd32(priv, TPC_UNIT(gpc, tpc, 0x064c));
+		nv_error(priv, "GPC%d/TPC%d/MP: 0x%08x 0x%08x\n",
+			       gpc, tpc, trap0, trap1);
+		nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x0644), 0x001ffffe);
+		nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x064c), 0x0000000f);
+		nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x0508), 0x00000002);
+		stat &= ~0x00000002;
+	}
+
+	if (stat & 0x00000004) {
+		u32 trap = nv_rd32(priv, TPC_UNIT(gpc, tpc, 0x0084));
+		nv_error(priv, "GPC%d/TPC%d/POLY: 0x%08x\n", gpc, tpc, trap);
+		nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x0084), 0xc0000000);
+		nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x0508), 0x00000004);
+		stat &= ~0x00000004;
+	}
+
+	if (stat & 0x00000008) {
+		u32 trap = nv_rd32(priv, TPC_UNIT(gpc, tpc, 0x048c));
+		nv_error(priv, "GPC%d/TPC%d/L1C: 0x%08x\n", gpc, tpc, trap);
+		nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x048c), 0xc0000000);
+		nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x0508), 0x00000008);
+		stat &= ~0x00000008;
+	}
+
+	if (stat) {
+		nv_error(priv, "GPC%d/TPC%d/0x%08x: unknown\n", gpc, tpc, stat);
+		nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x0508), stat);
+	}
+}
+
+static void
+nvc0_graph_trap_gpc(struct nvc0_graph_priv *priv, int gpc)
+{
+	u32 stat = nv_rd32(priv, GPC_UNIT(gpc, 0x2c90));
+	int tpc;
+
+	if (stat & 0x00000001) {
+		u32 trap = nv_rd32(priv, GPC_UNIT(gpc, 0x0420));
+		nv_error(priv, "GPC%d/PROP: 0x%08x\n", gpc, trap);
+		nv_wr32(priv, GPC_UNIT(gpc, 0x0420), 0xc0000000);
+		nv_wr32(priv, GPC_UNIT(gpc, 0x2c90), 0x00000001);
+		stat &= ~0x00000001;
+	}
+
+	if (stat & 0x00000002) {
+		u32 trap = nv_rd32(priv, GPC_UNIT(gpc, 0x0900));
+		nv_error(priv, "GPC%d/ZCULL: 0x%08x\n", gpc, trap);
+		nv_wr32(priv, GPC_UNIT(gpc, 0x0900), 0xc0000000);
+		nv_wr32(priv, GPC_UNIT(gpc, 0x2c90), 0x00000002);
+		stat &= ~0x00000002;
+	}
+
+	if (stat & 0x00000004) {
+		u32 trap = nv_rd32(priv, GPC_UNIT(gpc, 0x1028));
+		nv_error(priv, "GPC%d/CCACHE: 0x%08x\n", gpc, trap);
+		nv_wr32(priv, GPC_UNIT(gpc, 0x1028), 0xc0000000);
+		nv_wr32(priv, GPC_UNIT(gpc, 0x2c90), 0x00000004);
+		stat &= ~0x00000004;
+	}
+
+	if (stat & 0x00000008) {
+		u32 trap = nv_rd32(priv, GPC_UNIT(gpc, 0x0824));
+		nv_error(priv, "GPC%d/ESETUP: 0x%08x\n", gpc, trap);
+		nv_wr32(priv, GPC_UNIT(gpc, 0x0824), 0xc0000000);
+		nv_wr32(priv, GPC_UNIT(gpc, 0x2c90), 0x00000008);
+		stat &= ~0x00000009;
+	}
+
+	for (tpc = 0; tpc < priv->tpc_nr[gpc]; tpc++) {
+		u32 mask = 0x00010000 << tpc;
+		if (stat & mask) {
+			nvc0_graph_trap_tpc(priv, gpc, tpc);
+			nv_wr32(priv, GPC_UNIT(gpc, 0x2c90), mask);
+			stat &= ~mask;
+		}
+	}
+
+	if (stat) {
+		nv_error(priv, "GPC%d/0x%08x: unknown\n", gpc, stat);
+		nv_wr32(priv, GPC_UNIT(gpc, 0x2c90), stat);
+	}
+}
+
+static void
+nvc0_graph_trap_intr(struct nvc0_graph_priv *priv)
+{
+	u32 trap = nv_rd32(priv, 0x400108);
+	int rop, gpc;
+
+	if (trap & 0x00000001) {
+		u32 stat = nv_rd32(priv, 0x404000);
+		nv_error(priv, "DISPATCH 0x%08x\n", stat);
+		nv_wr32(priv, 0x404000, 0xc0000000);
+		nv_wr32(priv, 0x400108, 0x00000001);
+		trap &= ~0x00000001;
+	}
+
+	if (trap & 0x00000002) {
+		u32 stat = nv_rd32(priv, 0x404600);
+		nv_error(priv, "M2MF 0x%08x\n", stat);
+		nv_wr32(priv, 0x404600, 0xc0000000);
+		nv_wr32(priv, 0x400108, 0x00000002);
+		trap &= ~0x00000002;
+	}
+
+	if (trap & 0x00000008) {
+		u32 stat = nv_rd32(priv, 0x408030);
+		nv_error(priv, "CCACHE 0x%08x\n", stat);
+		nv_wr32(priv, 0x408030, 0xc0000000);
+		nv_wr32(priv, 0x400108, 0x00000008);
+		trap &= ~0x00000008;
+	}
+
+	if (trap & 0x00000010) {
+		u32 stat = nv_rd32(priv, 0x405840);
+		nv_error(priv, "SHADER 0x%08x\n", stat);
+		nv_wr32(priv, 0x405840, 0xc0000000);
+		nv_wr32(priv, 0x400108, 0x00000010);
+		trap &= ~0x00000010;
+	}
+
+	if (trap & 0x00000040) {
+		u32 stat = nv_rd32(priv, 0x40601c);
+		nv_error(priv, "UNK6 0x%08x\n", stat);
+		nv_wr32(priv, 0x40601c, 0xc0000000);
+		nv_wr32(priv, 0x400108, 0x00000040);
+		trap &= ~0x00000040;
+	}
+
+	if (trap & 0x00000080) {
+		u32 stat = nv_rd32(priv, 0x404490);
+		nv_error(priv, "MACRO 0x%08x\n", stat);
+		nv_wr32(priv, 0x404490, 0xc0000000);
+		nv_wr32(priv, 0x400108, 0x00000080);
+		trap &= ~0x00000080;
+	}
+
+	if (trap & 0x01000000) {
+		u32 stat = nv_rd32(priv, 0x400118);
+		for (gpc = 0; stat && gpc < priv->gpc_nr; gpc++) {
+			u32 mask = 0x00000001 << gpc;
+			if (stat & mask) {
+				nvc0_graph_trap_gpc(priv, gpc);
+				nv_wr32(priv, 0x400118, mask);
+				stat &= ~mask;
+			}
+		}
+		nv_wr32(priv, 0x400108, 0x01000000);
+		trap &= ~0x01000000;
+	}
+
+	if (trap & 0x02000000) {
+		for (rop = 0; rop < priv->rop_nr; rop++) {
+			u32 statz = nv_rd32(priv, ROP_UNIT(rop, 0x070));
+			u32 statc = nv_rd32(priv, ROP_UNIT(rop, 0x144));
+			nv_error(priv, "ROP%d 0x%08x 0x%08x\n",
+				 rop, statz, statc);
+			nv_wr32(priv, ROP_UNIT(rop, 0x070), 0xc0000000);
+			nv_wr32(priv, ROP_UNIT(rop, 0x144), 0xc0000000);
+		}
+		nv_wr32(priv, 0x400108, 0x02000000);
+		trap &= ~0x02000000;
+	}
+
+	if (trap) {
+		nv_error(priv, "TRAP UNHANDLED 0x%08x\n", trap);
+		nv_wr32(priv, 0x400108, trap);
+	}
+}
+
+static void
+nvc0_graph_intr(struct nouveau_subdev *subdev)
+{
+	struct nouveau_fifo *pfifo = nouveau_fifo(subdev);
+	struct nouveau_engine *engine = nv_engine(subdev);
+	struct nouveau_object *engctx;
+	struct nouveau_handle *handle;
+	struct nvc0_graph_priv *priv = (void *)subdev;
+	u64 inst = nv_rd32(priv, 0x409b00) & 0x0fffffff;
+	u32 stat = nv_rd32(priv, 0x400100);
+	u32 addr = nv_rd32(priv, 0x400704);
+	u32 mthd = (addr & 0x00003ffc);
+	u32 subc = (addr & 0x00070000) >> 16;
+	u32 data = nv_rd32(priv, 0x400708);
+	u32 code = nv_rd32(priv, 0x400110);
+	u32 class = nv_rd32(priv, 0x404200 + (subc * 4));
+	int chid;
+
+	engctx = nouveau_engctx_get(engine, inst);
+	chid   = pfifo->chid(pfifo, engctx);
+
+	if (stat & 0x00000010) {
+		handle = nouveau_handle_get_class(engctx, class);
+		if (!handle || nv_call(handle->object, mthd, data)) {
+			nv_error(priv, "ILLEGAL_MTHD ch %d [0x%010llx] "
+				     "subc %d class 0x%04x mthd 0x%04x "
+				     "data 0x%08x\n",
+				 chid, inst << 12, subc, class, mthd, data);
+		}
+		nouveau_handle_put(handle);
+		nv_wr32(priv, 0x400100, 0x00000010);
+		stat &= ~0x00000010;
+	}
+
+	if (stat & 0x00000020) {
+		nv_error(priv, "ILLEGAL_CLASS ch %d [0x%010llx] subc %d "
+			     "class 0x%04x mthd 0x%04x data 0x%08x\n",
+			chid, inst << 12, subc, class, mthd, data);
+		nv_wr32(priv, 0x400100, 0x00000020);
+		stat &= ~0x00000020;
+	}
+
+	if (stat & 0x00100000) {
+		nv_error(priv, "DATA_ERROR [");
+		nouveau_enum_print(nv50_data_error_names, code);
+		printk("] ch %d [0x%010llx] subc %d class 0x%04x "
+		       "mthd 0x%04x data 0x%08x\n",
+		       chid, inst << 12, subc, class, mthd, data);
+		nv_wr32(priv, 0x400100, 0x00100000);
+		stat &= ~0x00100000;
+	}
+
+	if (stat & 0x00200000) {
+		nv_error(priv, "TRAP ch %d [0x%010llx]\n", chid, inst << 12);
+		nvc0_graph_trap_intr(priv);
+		nv_wr32(priv, 0x400100, 0x00200000);
+		stat &= ~0x00200000;
+	}
+
+	if (stat & 0x00080000) {
+		nvc0_graph_ctxctl_isr(priv);
+		nv_wr32(priv, 0x400100, 0x00080000);
+		stat &= ~0x00080000;
+	}
+
+	if (stat) {
+		nv_error(priv, "unknown stat 0x%08x\n", stat);
+		nv_wr32(priv, 0x400100, stat);
+	}
+
+	nv_wr32(priv, 0x400500, 0x00010001);
+	nouveau_engctx_put(engctx);
+}
+
+int
+nvc0_graph_ctor_fw(struct nvc0_graph_priv *priv, const char *fwname,
+		   struct nvc0_graph_fuc *fuc)
+{
+	struct nouveau_device *device = nv_device(priv);
+	const struct firmware *fw;
+	char f[32];
+	int ret;
+
+	snprintf(f, sizeof(f), "nouveau/nv%02x_%s", device->chipset, fwname);
+	ret = request_firmware(&fw, f, &device->pdev->dev);
+	if (ret) {
+		snprintf(f, sizeof(f), "nouveau/%s", fwname);
+		ret = request_firmware(&fw, f, &device->pdev->dev);
+		if (ret) {
+			nv_error(priv, "failed to load %s\n", fwname);
+			return ret;
+		}
+	}
+
+	fuc->size = fw->size;
+	fuc->data = kmemdup(fw->data, fuc->size, GFP_KERNEL);
+	release_firmware(fw);
+	return (fuc->data != NULL) ? 0 : -ENOMEM;
+}
+
+static int
+nvc0_graph_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+		struct nouveau_oclass *oclass, void *data, u32 size,
+		struct nouveau_object **pobject)
+{
+	struct nouveau_device *device = nv_device(parent);
+	struct nvc0_graph_priv *priv;
+	bool enable = true;
+	int ret, i;
+
+	switch (device->chipset) {
+	case 0xd9: /* known broken without binary driver firmware */
+		enable = false;
+		break;
+	default:
+		break;
+	}
+
+	ret = nouveau_graph_create(parent, engine, oclass, enable, &priv);
+	*pobject = nv_object(priv);
+	if (ret)
+		return ret;
+
+	nv_subdev(priv)->unit = 0x18001000;
+	nv_subdev(priv)->intr = nvc0_graph_intr;
+	nv_engine(priv)->cclass = &nvc0_graph_cclass;
+
+	if (nouveau_boolopt(device->cfgopt, "NvGrUseFW", false)) {
+		nv_info(priv, "using external firmware\n");
+		if (nvc0_graph_ctor_fw(priv, "fuc409c", &priv->fuc409c) ||
+		    nvc0_graph_ctor_fw(priv, "fuc409d", &priv->fuc409d) ||
+		    nvc0_graph_ctor_fw(priv, "fuc41ac", &priv->fuc41ac) ||
+		    nvc0_graph_ctor_fw(priv, "fuc41ad", &priv->fuc41ad))
+			return -EINVAL;
+		priv->firmware = true;
+	}
+
+	switch (nvc0_graph_class(priv)) {
+	case 0x9097:
+		nv_engine(priv)->sclass = nvc0_graph_sclass;
+		break;
+	case 0x9197:
+		nv_engine(priv)->sclass = nvc1_graph_sclass;
+		break;
+	case 0x9297:
+		nv_engine(priv)->sclass = nvc8_graph_sclass;
+		break;
+	}
+
+	ret = nouveau_gpuobj_new(parent, NULL, 0x1000, 256, 0, &priv->unk4188b4);
+	if (ret)
+		return ret;
+
+	ret = nouveau_gpuobj_new(parent, NULL, 0x1000, 256, 0, &priv->unk4188b8);
+	if (ret)
+		return ret;
+
+	for (i = 0; i < 0x1000; i += 4) {
+		nv_wo32(priv->unk4188b4, i, 0x00000010);
+		nv_wo32(priv->unk4188b8, i, 0x00000010);
+	}
+
+	priv->rop_nr = (nv_rd32(priv, 0x409604) & 0x001f0000) >> 16;
+	priv->gpc_nr =  nv_rd32(priv, 0x409604) & 0x0000001f;
+	for (i = 0; i < priv->gpc_nr; i++) {
+		priv->tpc_nr[i]  = nv_rd32(priv, GPC_UNIT(i, 0x2608));
+		priv->tpc_total += priv->tpc_nr[i];
+	}
+
+	/*XXX: these need figuring out... though it might not even matter */
+	switch (nv_device(priv)->chipset) {
+	case 0xc0:
+		if (priv->tpc_total == 11) { /* 465, 3/4/4/0, 4 */
+			priv->magic_not_rop_nr = 0x07;
+		} else
+		if (priv->tpc_total == 14) { /* 470, 3/3/4/4, 5 */
+			priv->magic_not_rop_nr = 0x05;
+		} else
+		if (priv->tpc_total == 15) { /* 480, 3/4/4/4, 6 */
+			priv->magic_not_rop_nr = 0x06;
+		}
+		break;
+	case 0xc3: /* 450, 4/0/0/0, 2 */
+		priv->magic_not_rop_nr = 0x03;
+		break;
+	case 0xc4: /* 460, 3/4/0/0, 4 */
+		priv->magic_not_rop_nr = 0x01;
+		break;
+	case 0xc1: /* 2/0/0/0, 1 */
+		priv->magic_not_rop_nr = 0x01;
+		break;
+	case 0xc8: /* 4/4/3/4, 5 */
+		priv->magic_not_rop_nr = 0x06;
+		break;
+	case 0xce: /* 4/4/0/0, 4 */
+		priv->magic_not_rop_nr = 0x03;
+		break;
+	case 0xcf: /* 4/0/0/0, 3 */
+		priv->magic_not_rop_nr = 0x03;
+		break;
+	case 0xd9: /* 1/0/0/0, 1 */
+		priv->magic_not_rop_nr = 0x01;
+		break;
+	}
+
+	return 0;
+}
+
+static void
+nvc0_graph_dtor_fw(struct nvc0_graph_fuc *fuc)
+{
+	if (fuc->data) {
+		kfree(fuc->data);
+		fuc->data = NULL;
+	}
+}
+
+void
+nvc0_graph_dtor(struct nouveau_object *object)
+{
+	struct nvc0_graph_priv *priv = (void *)object;
+
+	if (priv->data)
+		kfree(priv->data);
+
+	nvc0_graph_dtor_fw(&priv->fuc409c);
+	nvc0_graph_dtor_fw(&priv->fuc409d);
+	nvc0_graph_dtor_fw(&priv->fuc41ac);
+	nvc0_graph_dtor_fw(&priv->fuc41ad);
+
+	nouveau_gpuobj_ref(NULL, &priv->unk4188b8);
+	nouveau_gpuobj_ref(NULL, &priv->unk4188b4);
+
+	nouveau_graph_destroy(&priv->base);
+}
+
+static void
+nvc0_graph_init_obj418880(struct nvc0_graph_priv *priv)
+{
+	int i;
+
+	nv_wr32(priv, GPC_BCAST(0x0880), 0x00000000);
+	nv_wr32(priv, GPC_BCAST(0x08a4), 0x00000000);
+	for (i = 0; i < 4; i++)
+		nv_wr32(priv, GPC_BCAST(0x0888) + (i * 4), 0x00000000);
+	nv_wr32(priv, GPC_BCAST(0x08b4), priv->unk4188b4->addr >> 8);
+	nv_wr32(priv, GPC_BCAST(0x08b8), priv->unk4188b8->addr >> 8);
+}
+
+static void
+nvc0_graph_init_regs(struct nvc0_graph_priv *priv)
+{
+	nv_wr32(priv, 0x400080, 0x003083c2);
+	nv_wr32(priv, 0x400088, 0x00006fe7);
+	nv_wr32(priv, 0x40008c, 0x00000000);
+	nv_wr32(priv, 0x400090, 0x00000030);
+	nv_wr32(priv, 0x40013c, 0x013901f7);
+	nv_wr32(priv, 0x400140, 0x00000100);
+	nv_wr32(priv, 0x400144, 0x00000000);
+	nv_wr32(priv, 0x400148, 0x00000110);
+	nv_wr32(priv, 0x400138, 0x00000000);
+	nv_wr32(priv, 0x400130, 0x00000000);
+	nv_wr32(priv, 0x400134, 0x00000000);
+	nv_wr32(priv, 0x400124, 0x00000002);
+}
+
+static void
+nvc0_graph_init_gpc_0(struct nvc0_graph_priv *priv)
+{
+	const u32 magicgpc918 = DIV_ROUND_UP(0x00800000, priv->tpc_total);
+	u32 data[TPC_MAX / 8];
+	u8  tpcnr[GPC_MAX];
+	int i, gpc, tpc;
+
+	nv_wr32(priv, TPC_UNIT(0, 0, 0x5c), 1); /* affects TFB offset queries */
+
+	/*
+	 *      TP      ROP UNKVAL(magic_not_rop_nr)
+	 * 450: 4/0/0/0 2        3
+	 * 460: 3/4/0/0 4        1
+	 * 465: 3/4/4/0 4        7
+	 * 470: 3/3/4/4 5        5
+	 * 480: 3/4/4/4 6        6
+	 */
+
+	memset(data, 0x00, sizeof(data));
+	memcpy(tpcnr, priv->tpc_nr, sizeof(priv->tpc_nr));
+	for (i = 0, gpc = -1; i < priv->tpc_total; i++) {
+		do {
+			gpc = (gpc + 1) % priv->gpc_nr;
+		} while (!tpcnr[gpc]);
+		tpc = priv->tpc_nr[gpc] - tpcnr[gpc]--;
+
+		data[i / 8] |= tpc << ((i % 8) * 4);
+	}
+
+	nv_wr32(priv, GPC_BCAST(0x0980), data[0]);
+	nv_wr32(priv, GPC_BCAST(0x0984), data[1]);
+	nv_wr32(priv, GPC_BCAST(0x0988), data[2]);
+	nv_wr32(priv, GPC_BCAST(0x098c), data[3]);
+
+	for (gpc = 0; gpc < priv->gpc_nr; gpc++) {
+		nv_wr32(priv, GPC_UNIT(gpc, 0x0914), priv->magic_not_rop_nr << 8 |
+						  priv->tpc_nr[gpc]);
+		nv_wr32(priv, GPC_UNIT(gpc, 0x0910), 0x00040000 | priv->tpc_total);
+		nv_wr32(priv, GPC_UNIT(gpc, 0x0918), magicgpc918);
+	}
+
+	nv_wr32(priv, GPC_BCAST(0x1bd4), magicgpc918);
+	nv_wr32(priv, GPC_BCAST(0x08ac), nv_rd32(priv, 0x100800));
+}
+
+static void
+nvc0_graph_init_units(struct nvc0_graph_priv *priv)
+{
+	nv_wr32(priv, 0x409c24, 0x000f0000);
+	nv_wr32(priv, 0x404000, 0xc0000000); /* DISPATCH */
+	nv_wr32(priv, 0x404600, 0xc0000000); /* M2MF */
+	nv_wr32(priv, 0x408030, 0xc0000000);
+	nv_wr32(priv, 0x40601c, 0xc0000000);
+	nv_wr32(priv, 0x404490, 0xc0000000); /* MACRO */
+	nv_wr32(priv, 0x406018, 0xc0000000);
+	nv_wr32(priv, 0x405840, 0xc0000000);
+	nv_wr32(priv, 0x405844, 0x00ffffff);
+	nv_mask(priv, 0x419cc0, 0x00000008, 0x00000008);
+	nv_mask(priv, 0x419eb4, 0x00001000, 0x00001000);
+}
+
+static void
+nvc0_graph_init_gpc_1(struct nvc0_graph_priv *priv)
+{
+	int gpc, tpc;
+
+	for (gpc = 0; gpc < priv->gpc_nr; gpc++) {
+		nv_wr32(priv, GPC_UNIT(gpc, 0x0420), 0xc0000000);
+		nv_wr32(priv, GPC_UNIT(gpc, 0x0900), 0xc0000000);
+		nv_wr32(priv, GPC_UNIT(gpc, 0x1028), 0xc0000000);
+		nv_wr32(priv, GPC_UNIT(gpc, 0x0824), 0xc0000000);
+		for (tpc = 0; tpc < priv->tpc_nr[gpc]; tpc++) {
+			nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x508), 0xffffffff);
+			nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x50c), 0xffffffff);
+			nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x224), 0xc0000000);
+			nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x48c), 0xc0000000);
+			nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x084), 0xc0000000);
+			nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x644), 0x001ffffe);
+			nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x64c), 0x0000000f);
+		}
+		nv_wr32(priv, GPC_UNIT(gpc, 0x2c90), 0xffffffff);
+		nv_wr32(priv, GPC_UNIT(gpc, 0x2c94), 0xffffffff);
+	}
+}
+
+static void
+nvc0_graph_init_rop(struct nvc0_graph_priv *priv)
+{
+	int rop;
+
+	for (rop = 0; rop < priv->rop_nr; rop++) {
+		nv_wr32(priv, ROP_UNIT(rop, 0x144), 0xc0000000);
+		nv_wr32(priv, ROP_UNIT(rop, 0x070), 0xc0000000);
+		nv_wr32(priv, ROP_UNIT(rop, 0x204), 0xffffffff);
+		nv_wr32(priv, ROP_UNIT(rop, 0x208), 0xffffffff);
+	}
+}
+
+void
+nvc0_graph_init_fw(struct nvc0_graph_priv *priv, u32 fuc_base,
+		   struct nvc0_graph_fuc *code, struct nvc0_graph_fuc *data)
+{
+	int i;
+
+	nv_wr32(priv, fuc_base + 0x01c0, 0x01000000);
+	for (i = 0; i < data->size / 4; i++)
+		nv_wr32(priv, fuc_base + 0x01c4, data->data[i]);
+
+	nv_wr32(priv, fuc_base + 0x0180, 0x01000000);
+	for (i = 0; i < code->size / 4; i++) {
+		if ((i & 0x3f) == 0)
+			nv_wr32(priv, fuc_base + 0x0188, i >> 6);
+		nv_wr32(priv, fuc_base + 0x0184, code->data[i]);
+	}
+}
+
+static int
+nvc0_graph_init_ctxctl(struct nvc0_graph_priv *priv)
+{
+	u32 r000260;
+	int i;
+
+	if (priv->firmware) {
+		/* load fuc microcode */
+		r000260 = nv_mask(priv, 0x000260, 0x00000001, 0x00000000);
+		nvc0_graph_init_fw(priv, 0x409000, &priv->fuc409c,
+						   &priv->fuc409d);
+		nvc0_graph_init_fw(priv, 0x41a000, &priv->fuc41ac,
+						   &priv->fuc41ad);
+		nv_wr32(priv, 0x000260, r000260);
+
+		/* start both of them running */
+		nv_wr32(priv, 0x409840, 0xffffffff);
+		nv_wr32(priv, 0x41a10c, 0x00000000);
+		nv_wr32(priv, 0x40910c, 0x00000000);
+		nv_wr32(priv, 0x41a100, 0x00000002);
+		nv_wr32(priv, 0x409100, 0x00000002);
+		if (!nv_wait(priv, 0x409800, 0x00000001, 0x00000001))
+			nv_info(priv, "0x409800 wait failed\n");
+
+		nv_wr32(priv, 0x409840, 0xffffffff);
+		nv_wr32(priv, 0x409500, 0x7fffffff);
+		nv_wr32(priv, 0x409504, 0x00000021);
+
+		nv_wr32(priv, 0x409840, 0xffffffff);
+		nv_wr32(priv, 0x409500, 0x00000000);
+		nv_wr32(priv, 0x409504, 0x00000010);
+		if (!nv_wait_ne(priv, 0x409800, 0xffffffff, 0x00000000)) {
+			nv_error(priv, "fuc09 req 0x10 timeout\n");
+			return -EBUSY;
+		}
+		priv->size = nv_rd32(priv, 0x409800);
+
+		nv_wr32(priv, 0x409840, 0xffffffff);
+		nv_wr32(priv, 0x409500, 0x00000000);
+		nv_wr32(priv, 0x409504, 0x00000016);
+		if (!nv_wait_ne(priv, 0x409800, 0xffffffff, 0x00000000)) {
+			nv_error(priv, "fuc09 req 0x16 timeout\n");
+			return -EBUSY;
+		}
+
+		nv_wr32(priv, 0x409840, 0xffffffff);
+		nv_wr32(priv, 0x409500, 0x00000000);
+		nv_wr32(priv, 0x409504, 0x00000025);
+		if (!nv_wait_ne(priv, 0x409800, 0xffffffff, 0x00000000)) {
+			nv_error(priv, "fuc09 req 0x25 timeout\n");
+			return -EBUSY;
+		}
+
+		if (priv->data == NULL) {
+			int ret = nvc0_grctx_generate(priv);
+			if (ret) {
+				nv_error(priv, "failed to construct context\n");
+				return ret;
+			}
+		}
+
+		return 0;
+	}
+
+	/* load HUB microcode */
+	r000260 = nv_mask(priv, 0x000260, 0x00000001, 0x00000000);
+	nv_wr32(priv, 0x4091c0, 0x01000000);
+	for (i = 0; i < sizeof(nvc0_grhub_data) / 4; i++)
+		nv_wr32(priv, 0x4091c4, nvc0_grhub_data[i]);
+
+	nv_wr32(priv, 0x409180, 0x01000000);
+	for (i = 0; i < sizeof(nvc0_grhub_code) / 4; i++) {
+		if ((i & 0x3f) == 0)
+			nv_wr32(priv, 0x409188, i >> 6);
+		nv_wr32(priv, 0x409184, nvc0_grhub_code[i]);
+	}
+
+	/* load GPC microcode */
+	nv_wr32(priv, 0x41a1c0, 0x01000000);
+	for (i = 0; i < sizeof(nvc0_grgpc_data) / 4; i++)
+		nv_wr32(priv, 0x41a1c4, nvc0_grgpc_data[i]);
+
+	nv_wr32(priv, 0x41a180, 0x01000000);
+	for (i = 0; i < sizeof(nvc0_grgpc_code) / 4; i++) {
+		if ((i & 0x3f) == 0)
+			nv_wr32(priv, 0x41a188, i >> 6);
+		nv_wr32(priv, 0x41a184, nvc0_grgpc_code[i]);
+	}
+	nv_wr32(priv, 0x000260, r000260);
+
+	/* start HUB ucode running, it'll init the GPCs */
+	nv_wr32(priv, 0x409800, nv_device(priv)->chipset);
+	nv_wr32(priv, 0x40910c, 0x00000000);
+	nv_wr32(priv, 0x409100, 0x00000002);
+	if (!nv_wait(priv, 0x409800, 0x80000000, 0x80000000)) {
+		nv_error(priv, "HUB_INIT timed out\n");
+		nvc0_graph_ctxctl_debug(priv);
+		return -EBUSY;
+	}
+
+	priv->size = nv_rd32(priv, 0x409804);
+	if (priv->data == NULL) {
+		int ret = nvc0_grctx_generate(priv);
+		if (ret) {
+			nv_error(priv, "failed to construct context\n");
+			return ret;
+		}
+	}
+
+	return 0;
+}
+
+static int
+nvc0_graph_init(struct nouveau_object *object)
+{
+	struct nvc0_graph_priv *priv = (void *)object;
+	int ret;
+
+	ret = nouveau_graph_init(&priv->base);
+	if (ret)
+		return ret;
+
+	nvc0_graph_init_obj418880(priv);
+	nvc0_graph_init_regs(priv);
+	/*nvc0_graph_init_unitplemented_magics(priv);*/
+	nvc0_graph_init_gpc_0(priv);
+	/*nvc0_graph_init_unitplemented_c242(priv);*/
+
+	nv_wr32(priv, 0x400500, 0x00010001);
+	nv_wr32(priv, 0x400100, 0xffffffff);
+	nv_wr32(priv, 0x40013c, 0xffffffff);
+
+	nvc0_graph_init_units(priv);
+	nvc0_graph_init_gpc_1(priv);
+	nvc0_graph_init_rop(priv);
+
+	nv_wr32(priv, 0x400108, 0xffffffff);
+	nv_wr32(priv, 0x400138, 0xffffffff);
+	nv_wr32(priv, 0x400118, 0xffffffff);
+	nv_wr32(priv, 0x400130, 0xffffffff);
+	nv_wr32(priv, 0x40011c, 0xffffffff);
+	nv_wr32(priv, 0x400134, 0xffffffff);
+	nv_wr32(priv, 0x400054, 0x34ce3464);
+
+	ret = nvc0_graph_init_ctxctl(priv);
+	if (ret)
+		return ret;
+
+	return 0;
+}
+
+struct nouveau_oclass
+nvc0_graph_oclass = {
+	.handle = NV_ENGINE(GR, 0xc0),
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nvc0_graph_ctor,
+		.dtor = nvc0_graph_dtor,
+		.init = nvc0_graph_init,
+		.fini = _nouveau_graph_fini,
+	},
+};
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.h b/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.h
new file mode 100644
index 000000000000..18d2210e12eb
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.h
@@ -0,0 +1,171 @@
+/*
+ * Copyright 2010 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#ifndef __NVC0_GRAPH_H__
+#define __NVC0_GRAPH_H__
+
+#include <core/client.h>
+#include <core/handle.h>
+#include <core/gpuobj.h>
+#include <core/option.h>
+
+#include <subdev/fb.h>
+#include <subdev/vm.h>
+#include <subdev/bar.h>
+#include <subdev/timer.h>
+
+#include <engine/fifo.h>
+#include <engine/graph.h>
+
+#define GPC_MAX 4
+#define TPC_MAX 32
+
+#define ROP_BCAST(r)      (0x408800 + (r))
+#define ROP_UNIT(u, r)    (0x410000 + (u) * 0x400 + (r))
+#define GPC_BCAST(r)      (0x418000 + (r))
+#define GPC_UNIT(t, r)    (0x500000 + (t) * 0x8000 + (r))
+#define TPC_UNIT(t, m, r) (0x504000 + (t) * 0x8000 + (m) * 0x800 + (r))
+
+struct nvc0_graph_data {
+	u32 size;
+	u32 align;
+	u32 access;
+};
+
+struct nvc0_graph_mmio {
+	u32 addr;
+	u32 data;
+	u32 shift;
+	u32 buffer;
+};
+
+struct nvc0_graph_fuc {
+	u32 *data;
+	u32  size;
+};
+
+struct nvc0_graph_priv {
+	struct nouveau_graph base;
+
+	struct nvc0_graph_fuc fuc409c;
+	struct nvc0_graph_fuc fuc409d;
+	struct nvc0_graph_fuc fuc41ac;
+	struct nvc0_graph_fuc fuc41ad;
+	bool firmware;
+
+	u8 rop_nr;
+	u8 gpc_nr;
+	u8 tpc_nr[GPC_MAX];
+	u8 tpc_total;
+
+	struct nouveau_gpuobj *unk4188b4;
+	struct nouveau_gpuobj *unk4188b8;
+
+	struct nvc0_graph_data mmio_data[4];
+	struct nvc0_graph_mmio mmio_list[4096/8];
+	u32  size;
+	u32 *data;
+
+	u8 magic_not_rop_nr;
+};
+
+struct nvc0_graph_chan {
+	struct nouveau_graph_chan base;
+
+	struct nouveau_gpuobj *mmio;
+	struct nouveau_vma mmio_vma;
+	int mmio_nr;
+	struct {
+		struct nouveau_gpuobj *mem;
+		struct nouveau_vma vma;
+	} data[4];
+};
+
+static inline u32
+nvc0_graph_class(void *obj)
+{
+	struct nouveau_device *device = nv_device(obj);
+
+	switch (device->chipset) {
+	case 0xc0:
+	case 0xc3:
+	case 0xc4:
+	case 0xce: /* guess, mmio trace shows only 0x9097 state */
+	case 0xcf: /* guess, mmio trace shows only 0x9097 state */
+		return 0x9097;
+	case 0xc1:
+		return 0x9197;
+	case 0xc8:
+	case 0xd9:
+		return 0x9297;
+	case 0xe4:
+	case 0xe7:
+		return 0xa097;
+	default:
+		return 0;
+	}
+}
+
+void nv_icmd(struct nvc0_graph_priv *priv, u32 icmd, u32 data);
+
+static inline void
+nv_mthd(struct nvc0_graph_priv *priv, u32 class, u32 mthd, u32 data)
+{
+	nv_wr32(priv, 0x40448c, data);
+	nv_wr32(priv, 0x404488, 0x80000000 | (mthd << 14) | class);
+}
+
+struct nvc0_grctx {
+	struct nvc0_graph_priv *priv;
+	struct nvc0_graph_data *data;
+	struct nvc0_graph_mmio *mmio;
+	struct nouveau_gpuobj *chan;
+	int buffer_nr;
+	u64 buffer[4];
+	u64 addr;
+};
+
+int  nvc0_grctx_generate(struct nvc0_graph_priv *);
+int  nvc0_grctx_init(struct nvc0_graph_priv *, struct nvc0_grctx *);
+void nvc0_grctx_data(struct nvc0_grctx *, u32, u32, u32);
+void nvc0_grctx_mmio(struct nvc0_grctx *, u32, u32, u32, u32);
+int  nvc0_grctx_fini(struct nvc0_grctx *);
+
+int  nve0_grctx_generate(struct nvc0_graph_priv *);
+
+#define mmio_data(s,a,p) nvc0_grctx_data(&info, (s), (a), (p))
+#define mmio_list(r,d,s,b) nvc0_grctx_mmio(&info, (r), (d), (s), (b))
+
+void nvc0_graph_ctxctl_debug(struct nvc0_graph_priv *);
+int  nvc0_graph_ctor_fw(struct nvc0_graph_priv *, const char *,
+			struct nvc0_graph_fuc *);
+void nvc0_graph_dtor(struct nouveau_object *);
+void nvc0_graph_init_fw(struct nvc0_graph_priv *, u32 base,
+			struct nvc0_graph_fuc *, struct nvc0_graph_fuc *);
+int  nvc0_graph_context_ctor(struct nouveau_object *, struct nouveau_object *,
+			     struct nouveau_oclass *, void *, u32,
+			     struct nouveau_object **);
+void nvc0_graph_context_dtor(struct nouveau_object *);
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nve0.c b/drivers/gpu/drm/nouveau/core/engine/graph/nve0.c
new file mode 100644
index 000000000000..539d4c72f192
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nve0.c
@@ -0,0 +1,576 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include "nvc0.h"
+#include "fuc/hubnve0.fuc.h"
+#include "fuc/gpcnve0.fuc.h"
+
+/*******************************************************************************
+ * Graphics object classes
+ ******************************************************************************/
+
+static struct nouveau_oclass
+nve0_graph_sclass[] = {
+	{ 0x902d, &nouveau_object_ofuncs },
+	{ 0xa040, &nouveau_object_ofuncs },
+	{ 0xa097, &nouveau_object_ofuncs },
+	{ 0xa0c0, &nouveau_object_ofuncs },
+	{ 0xa0b5, &nouveau_object_ofuncs },
+	{}
+};
+
+/*******************************************************************************
+ * PGRAPH context
+ ******************************************************************************/
+
+static struct nouveau_oclass
+nve0_graph_cclass = {
+	.handle = NV_ENGCTX(GR, 0xe0),
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nvc0_graph_context_ctor,
+		.dtor = nvc0_graph_context_dtor,
+		.init = _nouveau_graph_context_init,
+		.fini = _nouveau_graph_context_fini,
+		.rd32 = _nouveau_graph_context_rd32,
+		.wr32 = _nouveau_graph_context_wr32,
+	},
+};
+
+/*******************************************************************************
+ * PGRAPH engine/subdev functions
+ ******************************************************************************/
+
+static void
+nve0_graph_ctxctl_isr(struct nvc0_graph_priv *priv)
+{
+	u32 ustat = nv_rd32(priv, 0x409c18);
+
+	if (ustat & 0x00000001)
+		nv_error(priv, "CTXCTRL ucode error\n");
+	if (ustat & 0x00080000)
+		nv_error(priv, "CTXCTRL watchdog timeout\n");
+	if (ustat & ~0x00080001)
+		nv_error(priv, "CTXCTRL 0x%08x\n", ustat);
+
+	nvc0_graph_ctxctl_debug(priv);
+	nv_wr32(priv, 0x409c20, ustat);
+}
+
+static void
+nve0_graph_trap_isr(struct nvc0_graph_priv *priv, int chid, u64 inst)
+{
+	u32 trap = nv_rd32(priv, 0x400108);
+	int rop;
+
+	if (trap & 0x00000001) {
+		u32 stat = nv_rd32(priv, 0x404000);
+		nv_error(priv, "DISPATCH ch %d [0x%010llx] 0x%08x\n",
+			 chid, inst, stat);
+		nv_wr32(priv, 0x404000, 0xc0000000);
+		nv_wr32(priv, 0x400108, 0x00000001);
+		trap &= ~0x00000001;
+	}
+
+	if (trap & 0x00000010) {
+		u32 stat = nv_rd32(priv, 0x405840);
+		nv_error(priv, "SHADER ch %d [0x%010llx] 0x%08x\n",
+			 chid, inst, stat);
+		nv_wr32(priv, 0x405840, 0xc0000000);
+		nv_wr32(priv, 0x400108, 0x00000010);
+		trap &= ~0x00000010;
+	}
+
+	if (trap & 0x02000000) {
+		for (rop = 0; rop < priv->rop_nr; rop++) {
+			u32 statz = nv_rd32(priv, ROP_UNIT(rop, 0x070));
+			u32 statc = nv_rd32(priv, ROP_UNIT(rop, 0x144));
+			nv_error(priv, "ROP%d ch %d [0x%010llx] 0x%08x 0x%08x\n",
+				 rop, chid, inst, statz, statc);
+			nv_wr32(priv, ROP_UNIT(rop, 0x070), 0xc0000000);
+			nv_wr32(priv, ROP_UNIT(rop, 0x144), 0xc0000000);
+		}
+		nv_wr32(priv, 0x400108, 0x02000000);
+		trap &= ~0x02000000;
+	}
+
+	if (trap) {
+		nv_error(priv, "TRAP ch %d [0x%010llx] 0x%08x\n",
+			 chid, inst, trap);
+		nv_wr32(priv, 0x400108, trap);
+	}
+}
+
+static void
+nve0_graph_intr(struct nouveau_subdev *subdev)
+{
+	struct nouveau_fifo *pfifo = nouveau_fifo(subdev);
+	struct nouveau_engine *engine = nv_engine(subdev);
+	struct nouveau_object *engctx;
+	struct nouveau_handle *handle;
+	struct nvc0_graph_priv *priv = (void *)subdev;
+	u64 inst = nv_rd32(priv, 0x409b00) & 0x0fffffff;
+	u32 stat = nv_rd32(priv, 0x400100);
+	u32 addr = nv_rd32(priv, 0x400704);
+	u32 mthd = (addr & 0x00003ffc);
+	u32 subc = (addr & 0x00070000) >> 16;
+	u32 data = nv_rd32(priv, 0x400708);
+	u32 code = nv_rd32(priv, 0x400110);
+	u32 class = nv_rd32(priv, 0x404200 + (subc * 4));
+	int chid;
+
+	engctx = nouveau_engctx_get(engine, inst);
+	chid   = pfifo->chid(pfifo, engctx);
+
+	if (stat & 0x00000010) {
+		handle = nouveau_handle_get_class(engctx, class);
+		if (!handle || nv_call(handle->object, mthd, data)) {
+			nv_error(priv, "ILLEGAL_MTHD ch %d [0x%010llx] "
+				     "subc %d class 0x%04x mthd 0x%04x "
+				     "data 0x%08x\n",
+				 chid, inst, subc, class, mthd, data);
+		}
+		nouveau_handle_put(handle);
+		nv_wr32(priv, 0x400100, 0x00000010);
+		stat &= ~0x00000010;
+	}
+
+	if (stat & 0x00000020) {
+		nv_error(priv, "ILLEGAL_CLASS ch %d [0x%010llx] subc %d "
+			     "class 0x%04x mthd 0x%04x data 0x%08x\n",
+			 chid, inst, subc, class, mthd, data);
+		nv_wr32(priv, 0x400100, 0x00000020);
+		stat &= ~0x00000020;
+	}
+
+	if (stat & 0x00100000) {
+		nv_error(priv, "DATA_ERROR [");
+		nouveau_enum_print(nv50_data_error_names, code);
+		printk("] ch %d [0x%010llx] subc %d class 0x%04x "
+		       "mthd 0x%04x data 0x%08x\n",
+		       chid, inst, subc, class, mthd, data);
+		nv_wr32(priv, 0x400100, 0x00100000);
+		stat &= ~0x00100000;
+	}
+
+	if (stat & 0x00200000) {
+		nve0_graph_trap_isr(priv, chid, inst);
+		nv_wr32(priv, 0x400100, 0x00200000);
+		stat &= ~0x00200000;
+	}
+
+	if (stat & 0x00080000) {
+		nve0_graph_ctxctl_isr(priv);
+		nv_wr32(priv, 0x400100, 0x00080000);
+		stat &= ~0x00080000;
+	}
+
+	if (stat) {
+		nv_error(priv, "unknown stat 0x%08x\n", stat);
+		nv_wr32(priv, 0x400100, stat);
+	}
+
+	nv_wr32(priv, 0x400500, 0x00010001);
+	nouveau_engctx_put(engctx);
+}
+
+static int
+nve0_graph_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+	       struct nouveau_oclass *oclass, void *data, u32 size,
+	       struct nouveau_object **pobject)
+{
+	struct nouveau_device *device = nv_device(parent);
+	struct nvc0_graph_priv *priv;
+	int ret, i;
+
+	ret = nouveau_graph_create(parent, engine, oclass, false, &priv);
+	*pobject = nv_object(priv);
+	if (ret)
+		return ret;
+
+	nv_subdev(priv)->unit = 0x18001000;
+	nv_subdev(priv)->intr = nve0_graph_intr;
+	nv_engine(priv)->cclass = &nve0_graph_cclass;
+	nv_engine(priv)->sclass = nve0_graph_sclass;
+
+	if (nouveau_boolopt(device->cfgopt, "NvGrUseFW", false)) {
+		nv_info(priv, "using external firmware\n");
+		if (nvc0_graph_ctor_fw(priv, "fuc409c", &priv->fuc409c) ||
+		    nvc0_graph_ctor_fw(priv, "fuc409d", &priv->fuc409d) ||
+		    nvc0_graph_ctor_fw(priv, "fuc41ac", &priv->fuc41ac) ||
+		    nvc0_graph_ctor_fw(priv, "fuc41ad", &priv->fuc41ad))
+			return -EINVAL;
+		priv->firmware = true;
+	}
+
+	ret = nouveau_gpuobj_new(parent, NULL, 0x1000, 256, 0, &priv->unk4188b4);
+	if (ret)
+		return ret;
+
+	ret = nouveau_gpuobj_new(parent, NULL, 0x1000, 256, 0, &priv->unk4188b8);
+	if (ret)
+		return ret;
+
+	for (i = 0; i < 0x1000; i += 4) {
+		nv_wo32(priv->unk4188b4, i, 0x00000010);
+		nv_wo32(priv->unk4188b8, i, 0x00000010);
+	}
+
+	priv->gpc_nr =  nv_rd32(priv, 0x409604) & 0x0000001f;
+	priv->rop_nr = (nv_rd32(priv, 0x409604) & 0x001f0000) >> 16;
+	for (i = 0; i < priv->gpc_nr; i++) {
+		priv->tpc_nr[i] = nv_rd32(priv, GPC_UNIT(i, 0x2608));
+		priv->tpc_total += priv->tpc_nr[i];
+	}
+
+	switch (nv_device(priv)->chipset) {
+	case 0xe4:
+		if (priv->tpc_total == 8)
+			priv->magic_not_rop_nr = 3;
+		else
+		if (priv->tpc_total == 7)
+			priv->magic_not_rop_nr = 1;
+		break;
+	case 0xe7:
+		priv->magic_not_rop_nr = 1;
+		break;
+	default:
+		break;
+	}
+
+	return 0;
+}
+
+static void
+nve0_graph_init_obj418880(struct nvc0_graph_priv *priv)
+{
+	int i;
+
+	nv_wr32(priv, GPC_BCAST(0x0880), 0x00000000);
+	nv_wr32(priv, GPC_BCAST(0x08a4), 0x00000000);
+	for (i = 0; i < 4; i++)
+		nv_wr32(priv, GPC_BCAST(0x0888) + (i * 4), 0x00000000);
+	nv_wr32(priv, GPC_BCAST(0x08b4), priv->unk4188b4->addr >> 8);
+	nv_wr32(priv, GPC_BCAST(0x08b8), priv->unk4188b8->addr >> 8);
+}
+
+static void
+nve0_graph_init_regs(struct nvc0_graph_priv *priv)
+{
+	nv_wr32(priv, 0x400080, 0x003083c2);
+	nv_wr32(priv, 0x400088, 0x0001ffe7);
+	nv_wr32(priv, 0x40008c, 0x00000000);
+	nv_wr32(priv, 0x400090, 0x00000030);
+	nv_wr32(priv, 0x40013c, 0x003901f7);
+	nv_wr32(priv, 0x400140, 0x00000100);
+	nv_wr32(priv, 0x400144, 0x00000000);
+	nv_wr32(priv, 0x400148, 0x00000110);
+	nv_wr32(priv, 0x400138, 0x00000000);
+	nv_wr32(priv, 0x400130, 0x00000000);
+	nv_wr32(priv, 0x400134, 0x00000000);
+	nv_wr32(priv, 0x400124, 0x00000002);
+}
+
+static void
+nve0_graph_init_units(struct nvc0_graph_priv *priv)
+{
+	nv_wr32(priv, 0x409ffc, 0x00000000);
+	nv_wr32(priv, 0x409c14, 0x00003e3e);
+	nv_wr32(priv, 0x409c24, 0x000f0000);
+
+	nv_wr32(priv, 0x404000, 0xc0000000);
+	nv_wr32(priv, 0x404600, 0xc0000000);
+	nv_wr32(priv, 0x408030, 0xc0000000);
+	nv_wr32(priv, 0x404490, 0xc0000000);
+	nv_wr32(priv, 0x406018, 0xc0000000);
+	nv_wr32(priv, 0x407020, 0xc0000000);
+	nv_wr32(priv, 0x405840, 0xc0000000);
+	nv_wr32(priv, 0x405844, 0x00ffffff);
+
+	nv_mask(priv, 0x419cc0, 0x00000008, 0x00000008);
+	nv_mask(priv, 0x419eb4, 0x00001000, 0x00001000);
+
+}
+
+static void
+nve0_graph_init_gpc_0(struct nvc0_graph_priv *priv)
+{
+	const u32 magicgpc918 = DIV_ROUND_UP(0x00800000, priv->tpc_total);
+	u32 data[TPC_MAX / 8];
+	u8  tpcnr[GPC_MAX];
+	int i, gpc, tpc;
+
+	nv_wr32(priv, GPC_UNIT(0, 0x3018), 0x00000001);
+
+	memset(data, 0x00, sizeof(data));
+	memcpy(tpcnr, priv->tpc_nr, sizeof(priv->tpc_nr));
+	for (i = 0, gpc = -1; i < priv->tpc_total; i++) {
+		do {
+			gpc = (gpc + 1) % priv->gpc_nr;
+		} while (!tpcnr[gpc]);
+		tpc = priv->tpc_nr[gpc] - tpcnr[gpc]--;
+
+		data[i / 8] |= tpc << ((i % 8) * 4);
+	}
+
+	nv_wr32(priv, GPC_BCAST(0x0980), data[0]);
+	nv_wr32(priv, GPC_BCAST(0x0984), data[1]);
+	nv_wr32(priv, GPC_BCAST(0x0988), data[2]);
+	nv_wr32(priv, GPC_BCAST(0x098c), data[3]);
+
+	for (gpc = 0; gpc < priv->gpc_nr; gpc++) {
+		nv_wr32(priv, GPC_UNIT(gpc, 0x0914), priv->magic_not_rop_nr << 8 |
+						  priv->tpc_nr[gpc]);
+		nv_wr32(priv, GPC_UNIT(gpc, 0x0910), 0x00040000 | priv->tpc_total);
+		nv_wr32(priv, GPC_UNIT(gpc, 0x0918), magicgpc918);
+	}
+
+	nv_wr32(priv, GPC_BCAST(0x1bd4), magicgpc918);
+	nv_wr32(priv, GPC_BCAST(0x08ac), nv_rd32(priv, 0x100800));
+}
+
+static void
+nve0_graph_init_gpc_1(struct nvc0_graph_priv *priv)
+{
+	int gpc, tpc;
+
+	for (gpc = 0; gpc < priv->gpc_nr; gpc++) {
+		nv_wr32(priv, GPC_UNIT(gpc, 0x3038), 0xc0000000);
+		nv_wr32(priv, GPC_UNIT(gpc, 0x0420), 0xc0000000);
+		nv_wr32(priv, GPC_UNIT(gpc, 0x0900), 0xc0000000);
+		nv_wr32(priv, GPC_UNIT(gpc, 0x1028), 0xc0000000);
+		nv_wr32(priv, GPC_UNIT(gpc, 0x0824), 0xc0000000);
+		for (tpc = 0; tpc < priv->tpc_nr[gpc]; tpc++) {
+			nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x508), 0xffffffff);
+			nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x50c), 0xffffffff);
+			nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x224), 0xc0000000);
+			nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x48c), 0xc0000000);
+			nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x084), 0xc0000000);
+			nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x644), 0x001ffffe);
+			nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x64c), 0x0000000f);
+		}
+		nv_wr32(priv, GPC_UNIT(gpc, 0x2c90), 0xffffffff);
+		nv_wr32(priv, GPC_UNIT(gpc, 0x2c94), 0xffffffff);
+	}
+}
+
+static void
+nve0_graph_init_rop(struct nvc0_graph_priv *priv)
+{
+	int rop;
+
+	for (rop = 0; rop < priv->rop_nr; rop++) {
+		nv_wr32(priv, ROP_UNIT(rop, 0x144), 0xc0000000);
+		nv_wr32(priv, ROP_UNIT(rop, 0x070), 0xc0000000);
+		nv_wr32(priv, ROP_UNIT(rop, 0x204), 0xffffffff);
+		nv_wr32(priv, ROP_UNIT(rop, 0x208), 0xffffffff);
+	}
+}
+
+static int
+nve0_graph_init_ctxctl(struct nvc0_graph_priv *priv)
+{
+	u32 r000260;
+	int i;
+
+	if (priv->firmware) {
+		/* load fuc microcode */
+		r000260 = nv_mask(priv, 0x000260, 0x00000001, 0x00000000);
+		nvc0_graph_init_fw(priv, 0x409000, &priv->fuc409c, &priv->fuc409d);
+		nvc0_graph_init_fw(priv, 0x41a000, &priv->fuc41ac, &priv->fuc41ad);
+		nv_wr32(priv, 0x000260, r000260);
+
+		/* start both of them running */
+		nv_wr32(priv, 0x409840, 0xffffffff);
+		nv_wr32(priv, 0x41a10c, 0x00000000);
+		nv_wr32(priv, 0x40910c, 0x00000000);
+		nv_wr32(priv, 0x41a100, 0x00000002);
+		nv_wr32(priv, 0x409100, 0x00000002);
+		if (!nv_wait(priv, 0x409800, 0x00000001, 0x00000001))
+			nv_error(priv, "0x409800 wait failed\n");
+
+		nv_wr32(priv, 0x409840, 0xffffffff);
+		nv_wr32(priv, 0x409500, 0x7fffffff);
+		nv_wr32(priv, 0x409504, 0x00000021);
+
+		nv_wr32(priv, 0x409840, 0xffffffff);
+		nv_wr32(priv, 0x409500, 0x00000000);
+		nv_wr32(priv, 0x409504, 0x00000010);
+		if (!nv_wait_ne(priv, 0x409800, 0xffffffff, 0x00000000)) {
+			nv_error(priv, "fuc09 req 0x10 timeout\n");
+			return -EBUSY;
+		}
+		priv->size = nv_rd32(priv, 0x409800);
+
+		nv_wr32(priv, 0x409840, 0xffffffff);
+		nv_wr32(priv, 0x409500, 0x00000000);
+		nv_wr32(priv, 0x409504, 0x00000016);
+		if (!nv_wait_ne(priv, 0x409800, 0xffffffff, 0x00000000)) {
+			nv_error(priv, "fuc09 req 0x16 timeout\n");
+			return -EBUSY;
+		}
+
+		nv_wr32(priv, 0x409840, 0xffffffff);
+		nv_wr32(priv, 0x409500, 0x00000000);
+		nv_wr32(priv, 0x409504, 0x00000025);
+		if (!nv_wait_ne(priv, 0x409800, 0xffffffff, 0x00000000)) {
+			nv_error(priv, "fuc09 req 0x25 timeout\n");
+			return -EBUSY;
+		}
+
+		nv_wr32(priv, 0x409800, 0x00000000);
+		nv_wr32(priv, 0x409500, 0x00000001);
+		nv_wr32(priv, 0x409504, 0x00000030);
+		if (!nv_wait_ne(priv, 0x409800, 0xffffffff, 0x00000000)) {
+			nv_error(priv, "fuc09 req 0x30 timeout\n");
+			return -EBUSY;
+		}
+
+		nv_wr32(priv, 0x409810, 0xb00095c8);
+		nv_wr32(priv, 0x409800, 0x00000000);
+		nv_wr32(priv, 0x409500, 0x00000001);
+		nv_wr32(priv, 0x409504, 0x00000031);
+		if (!nv_wait_ne(priv, 0x409800, 0xffffffff, 0x00000000)) {
+			nv_error(priv, "fuc09 req 0x31 timeout\n");
+			return -EBUSY;
+		}
+
+		nv_wr32(priv, 0x409810, 0x00080420);
+		nv_wr32(priv, 0x409800, 0x00000000);
+		nv_wr32(priv, 0x409500, 0x00000001);
+		nv_wr32(priv, 0x409504, 0x00000032);
+		if (!nv_wait_ne(priv, 0x409800, 0xffffffff, 0x00000000)) {
+			nv_error(priv, "fuc09 req 0x32 timeout\n");
+			return -EBUSY;
+		}
+
+		nv_wr32(priv, 0x409614, 0x00000070);
+		nv_wr32(priv, 0x409614, 0x00000770);
+		nv_wr32(priv, 0x40802c, 0x00000001);
+
+		if (priv->data == NULL) {
+			int ret = nve0_grctx_generate(priv);
+			if (ret) {
+				nv_error(priv, "failed to construct context\n");
+				return ret;
+			}
+		}
+
+		return 0;
+	}
+
+	/* load HUB microcode */
+	r000260 = nv_mask(priv, 0x000260, 0x00000001, 0x00000000);
+	nv_wr32(priv, 0x4091c0, 0x01000000);
+	for (i = 0; i < sizeof(nve0_grhub_data) / 4; i++)
+		nv_wr32(priv, 0x4091c4, nve0_grhub_data[i]);
+
+	nv_wr32(priv, 0x409180, 0x01000000);
+	for (i = 0; i < sizeof(nve0_grhub_code) / 4; i++) {
+		if ((i & 0x3f) == 0)
+			nv_wr32(priv, 0x409188, i >> 6);
+		nv_wr32(priv, 0x409184, nve0_grhub_code[i]);
+	}
+
+	/* load GPC microcode */
+	nv_wr32(priv, 0x41a1c0, 0x01000000);
+	for (i = 0; i < sizeof(nve0_grgpc_data) / 4; i++)
+		nv_wr32(priv, 0x41a1c4, nve0_grgpc_data[i]);
+
+	nv_wr32(priv, 0x41a180, 0x01000000);
+	for (i = 0; i < sizeof(nve0_grgpc_code) / 4; i++) {
+		if ((i & 0x3f) == 0)
+			nv_wr32(priv, 0x41a188, i >> 6);
+		nv_wr32(priv, 0x41a184, nve0_grgpc_code[i]);
+	}
+	nv_wr32(priv, 0x000260, r000260);
+
+	/* start HUB ucode running, it'll init the GPCs */
+	nv_wr32(priv, 0x409800, nv_device(priv)->chipset);
+	nv_wr32(priv, 0x40910c, 0x00000000);
+	nv_wr32(priv, 0x409100, 0x00000002);
+	if (!nv_wait(priv, 0x409800, 0x80000000, 0x80000000)) {
+		nv_error(priv, "HUB_INIT timed out\n");
+		nvc0_graph_ctxctl_debug(priv);
+		return -EBUSY;
+	}
+
+	priv->size = nv_rd32(priv, 0x409804);
+	if (priv->data == NULL) {
+		int ret = nve0_grctx_generate(priv);
+		if (ret) {
+			nv_error(priv, "failed to construct context\n");
+			return ret;
+		}
+	}
+
+	return 0;
+}
+
+static int
+nve0_graph_init(struct nouveau_object *object)
+{
+	struct nvc0_graph_priv *priv = (void *)object;
+	int ret;
+
+	ret = nouveau_graph_init(&priv->base);
+	if (ret)
+		return ret;
+
+	nve0_graph_init_obj418880(priv);
+	nve0_graph_init_regs(priv);
+	nve0_graph_init_gpc_0(priv);
+
+	nv_wr32(priv, 0x400500, 0x00010001);
+	nv_wr32(priv, 0x400100, 0xffffffff);
+	nv_wr32(priv, 0x40013c, 0xffffffff);
+
+	nve0_graph_init_units(priv);
+	nve0_graph_init_gpc_1(priv);
+	nve0_graph_init_rop(priv);
+
+	nv_wr32(priv, 0x400108, 0xffffffff);
+	nv_wr32(priv, 0x400138, 0xffffffff);
+	nv_wr32(priv, 0x400118, 0xffffffff);
+	nv_wr32(priv, 0x400130, 0xffffffff);
+	nv_wr32(priv, 0x40011c, 0xffffffff);
+	nv_wr32(priv, 0x400134, 0xffffffff);
+	nv_wr32(priv, 0x400054, 0x34ce3464);
+
+	ret = nve0_graph_init_ctxctl(priv);
+	if (ret)
+		return ret;
+
+	return 0;
+}
+
+struct nouveau_oclass
+nve0_graph_oclass = {
+	.handle = NV_ENGINE(GR, 0xe0),
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nve0_graph_ctor,
+		.dtor = nvc0_graph_dtor,
+		.init = nve0_graph_init,
+		.fini = _nouveau_graph_fini,
+	},
+};
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/regs.h b/drivers/gpu/drm/nouveau/core/engine/graph/regs.h
new file mode 100644
index 000000000000..9c715a25cecb
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/regs.h
@@ -0,0 +1,269 @@
+#ifndef __NOUVEAU_GRAPH_REGS_H__
+#define __NOUVEAU_GRAPH_REGS_H__
+
+#define NV04_PGRAPH_DEBUG_0                                0x00400080
+#define NV04_PGRAPH_DEBUG_1                                0x00400084
+#define NV04_PGRAPH_DEBUG_2                                0x00400088
+#define NV04_PGRAPH_DEBUG_3                                0x0040008c
+#define NV10_PGRAPH_DEBUG_4                                0x00400090
+#define NV03_PGRAPH_INTR                                   0x00400100
+#define NV03_PGRAPH_NSTATUS                                0x00400104
+#    define NV04_PGRAPH_NSTATUS_STATE_IN_USE                  (1<<11)
+#    define NV04_PGRAPH_NSTATUS_INVALID_STATE                 (1<<12)
+#    define NV04_PGRAPH_NSTATUS_BAD_ARGUMENT                  (1<<13)
+#    define NV04_PGRAPH_NSTATUS_PROTECTION_FAULT              (1<<14)
+#    define NV10_PGRAPH_NSTATUS_STATE_IN_USE                  (1<<23)
+#    define NV10_PGRAPH_NSTATUS_INVALID_STATE                 (1<<24)
+#    define NV10_PGRAPH_NSTATUS_BAD_ARGUMENT                  (1<<25)
+#    define NV10_PGRAPH_NSTATUS_PROTECTION_FAULT              (1<<26)
+#define NV03_PGRAPH_NSOURCE                                0x00400108
+#    define NV03_PGRAPH_NSOURCE_NOTIFICATION                   (1<<0)
+#    define NV03_PGRAPH_NSOURCE_DATA_ERROR                     (1<<1)
+#    define NV03_PGRAPH_NSOURCE_PROTECTION_ERROR               (1<<2)
+#    define NV03_PGRAPH_NSOURCE_RANGE_EXCEPTION                (1<<3)
+#    define NV03_PGRAPH_NSOURCE_LIMIT_COLOR                    (1<<4)
+#    define NV03_PGRAPH_NSOURCE_LIMIT_ZETA                     (1<<5)
+#    define NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD                   (1<<6)
+#    define NV03_PGRAPH_NSOURCE_DMA_R_PROTECTION               (1<<7)
+#    define NV03_PGRAPH_NSOURCE_DMA_W_PROTECTION               (1<<8)
+#    define NV03_PGRAPH_NSOURCE_FORMAT_EXCEPTION               (1<<9)
+#    define NV03_PGRAPH_NSOURCE_PATCH_EXCEPTION               (1<<10)
+#    define NV03_PGRAPH_NSOURCE_STATE_INVALID                 (1<<11)
+#    define NV03_PGRAPH_NSOURCE_DOUBLE_NOTIFY                 (1<<12)
+#    define NV03_PGRAPH_NSOURCE_NOTIFY_IN_USE                 (1<<13)
+#    define NV03_PGRAPH_NSOURCE_METHOD_CNT                    (1<<14)
+#    define NV03_PGRAPH_NSOURCE_BFR_NOTIFICATION              (1<<15)
+#    define NV03_PGRAPH_NSOURCE_DMA_VTX_PROTECTION            (1<<16)
+#    define NV03_PGRAPH_NSOURCE_DMA_WIDTH_A                   (1<<17)
+#    define NV03_PGRAPH_NSOURCE_DMA_WIDTH_B                   (1<<18)
+#define NV03_PGRAPH_INTR_EN                                0x00400140
+#define NV40_PGRAPH_INTR_EN                                0x0040013C
+#    define NV_PGRAPH_INTR_NOTIFY                              (1<<0)
+#    define NV_PGRAPH_INTR_MISSING_HW                          (1<<4)
+#    define NV_PGRAPH_INTR_CONTEXT_SWITCH                     (1<<12)
+#    define NV_PGRAPH_INTR_BUFFER_NOTIFY                      (1<<16)
+#    define NV_PGRAPH_INTR_ERROR                              (1<<20)
+#define NV10_PGRAPH_CTX_CONTROL                            0x00400144
+#define NV10_PGRAPH_CTX_USER                               0x00400148
+#define NV10_PGRAPH_CTX_SWITCH(i)                         (0x0040014C + 0x4*(i))
+#define NV04_PGRAPH_CTX_SWITCH1                            0x00400160
+#define NV10_PGRAPH_CTX_CACHE(i, j)                       (0x00400160	\
+							   + 0x4*(i) + 0x20*(j))
+#define NV04_PGRAPH_CTX_SWITCH2                            0x00400164
+#define NV04_PGRAPH_CTX_SWITCH3                            0x00400168
+#define NV04_PGRAPH_CTX_SWITCH4                            0x0040016C
+#define NV04_PGRAPH_CTX_CONTROL                            0x00400170
+#define NV04_PGRAPH_CTX_USER                               0x00400174
+#define NV04_PGRAPH_CTX_CACHE1                             0x00400180
+#define NV03_PGRAPH_CTX_CONTROL                            0x00400190
+#define NV03_PGRAPH_CTX_USER                               0x00400194
+#define NV04_PGRAPH_CTX_CACHE2                             0x004001A0
+#define NV04_PGRAPH_CTX_CACHE3                             0x004001C0
+#define NV04_PGRAPH_CTX_CACHE4                             0x004001E0
+#define NV40_PGRAPH_CTXCTL_0304                            0x00400304
+#define NV40_PGRAPH_CTXCTL_0304_XFER_CTX                   0x00000001
+#define NV40_PGRAPH_CTXCTL_UCODE_STAT                      0x00400308
+#define NV40_PGRAPH_CTXCTL_UCODE_STAT_IP_MASK              0xff000000
+#define NV40_PGRAPH_CTXCTL_UCODE_STAT_IP_SHIFT                     24
+#define NV40_PGRAPH_CTXCTL_UCODE_STAT_OP_MASK              0x00ffffff
+#define NV40_PGRAPH_CTXCTL_0310                            0x00400310
+#define NV40_PGRAPH_CTXCTL_0310_XFER_SAVE                  0x00000020
+#define NV40_PGRAPH_CTXCTL_0310_XFER_LOAD                  0x00000040
+#define NV40_PGRAPH_CTXCTL_030C                            0x0040030c
+#define NV40_PGRAPH_CTXCTL_UCODE_INDEX                     0x00400324
+#define NV40_PGRAPH_CTXCTL_UCODE_DATA                      0x00400328
+#define NV40_PGRAPH_CTXCTL_CUR                             0x0040032c
+#define NV40_PGRAPH_CTXCTL_CUR_LOADED                      0x01000000
+#define NV40_PGRAPH_CTXCTL_CUR_INSTANCE                    0x000FFFFF
+#define NV40_PGRAPH_CTXCTL_NEXT                            0x00400330
+#define NV40_PGRAPH_CTXCTL_NEXT_INSTANCE                   0x000fffff
+#define NV50_PGRAPH_CTXCTL_CUR                             0x0040032c
+#define NV50_PGRAPH_CTXCTL_CUR_LOADED                      0x80000000
+#define NV50_PGRAPH_CTXCTL_CUR_INSTANCE                    0x00ffffff
+#define NV50_PGRAPH_CTXCTL_NEXT                            0x00400330
+#define NV50_PGRAPH_CTXCTL_NEXT_INSTANCE                   0x00ffffff
+#define NV03_PGRAPH_ABS_X_RAM                              0x00400400
+#define NV03_PGRAPH_ABS_Y_RAM                              0x00400480
+#define NV03_PGRAPH_X_MISC                                 0x00400500
+#define NV03_PGRAPH_Y_MISC                                 0x00400504
+#define NV04_PGRAPH_VALID1                                 0x00400508
+#define NV04_PGRAPH_SOURCE_COLOR                           0x0040050C
+#define NV04_PGRAPH_MISC24_0                               0x00400510
+#define NV03_PGRAPH_XY_LOGIC_MISC0                         0x00400514
+#define NV03_PGRAPH_XY_LOGIC_MISC1                         0x00400518
+#define NV03_PGRAPH_XY_LOGIC_MISC2                         0x0040051C
+#define NV03_PGRAPH_XY_LOGIC_MISC3                         0x00400520
+#define NV03_PGRAPH_CLIPX_0                                0x00400524
+#define NV03_PGRAPH_CLIPX_1                                0x00400528
+#define NV03_PGRAPH_CLIPY_0                                0x0040052C
+#define NV03_PGRAPH_CLIPY_1                                0x00400530
+#define NV03_PGRAPH_ABS_ICLIP_XMAX                         0x00400534
+#define NV03_PGRAPH_ABS_ICLIP_YMAX                         0x00400538
+#define NV03_PGRAPH_ABS_UCLIP_XMIN                         0x0040053C
+#define NV03_PGRAPH_ABS_UCLIP_YMIN                         0x00400540
+#define NV03_PGRAPH_ABS_UCLIP_XMAX                         0x00400544
+#define NV03_PGRAPH_ABS_UCLIP_YMAX                         0x00400548
+#define NV03_PGRAPH_ABS_UCLIPA_XMIN                        0x00400560
+#define NV03_PGRAPH_ABS_UCLIPA_YMIN                        0x00400564
+#define NV03_PGRAPH_ABS_UCLIPA_XMAX                        0x00400568
+#define NV03_PGRAPH_ABS_UCLIPA_YMAX                        0x0040056C
+#define NV04_PGRAPH_MISC24_1                               0x00400570
+#define NV04_PGRAPH_MISC24_2                               0x00400574
+#define NV04_PGRAPH_VALID2                                 0x00400578
+#define NV04_PGRAPH_PASSTHRU_0                             0x0040057C
+#define NV04_PGRAPH_PASSTHRU_1                             0x00400580
+#define NV04_PGRAPH_PASSTHRU_2                             0x00400584
+#define NV10_PGRAPH_DIMX_TEXTURE                           0x00400588
+#define NV10_PGRAPH_WDIMX_TEXTURE                          0x0040058C
+#define NV04_PGRAPH_COMBINE_0_ALPHA                        0x00400590
+#define NV04_PGRAPH_COMBINE_0_COLOR                        0x00400594
+#define NV04_PGRAPH_COMBINE_1_ALPHA                        0x00400598
+#define NV04_PGRAPH_COMBINE_1_COLOR                        0x0040059C
+#define NV04_PGRAPH_FORMAT_0                               0x004005A8
+#define NV04_PGRAPH_FORMAT_1                               0x004005AC
+#define NV04_PGRAPH_FILTER_0                               0x004005B0
+#define NV04_PGRAPH_FILTER_1                               0x004005B4
+#define NV03_PGRAPH_MONO_COLOR0                            0x00400600
+#define NV04_PGRAPH_ROP3                                   0x00400604
+#define NV04_PGRAPH_BETA_AND                               0x00400608
+#define NV04_PGRAPH_BETA_PREMULT                           0x0040060C
+#define NV04_PGRAPH_LIMIT_VIOL_PIX                         0x00400610
+#define NV04_PGRAPH_FORMATS                                0x00400618
+#define NV10_PGRAPH_DEBUG_2                                0x00400620
+#define NV04_PGRAPH_BOFFSET0                               0x00400640
+#define NV04_PGRAPH_BOFFSET1                               0x00400644
+#define NV04_PGRAPH_BOFFSET2                               0x00400648
+#define NV04_PGRAPH_BOFFSET3                               0x0040064C
+#define NV04_PGRAPH_BOFFSET4                               0x00400650
+#define NV04_PGRAPH_BOFFSET5                               0x00400654
+#define NV04_PGRAPH_BBASE0                                 0x00400658
+#define NV04_PGRAPH_BBASE1                                 0x0040065C
+#define NV04_PGRAPH_BBASE2                                 0x00400660
+#define NV04_PGRAPH_BBASE3                                 0x00400664
+#define NV04_PGRAPH_BBASE4                                 0x00400668
+#define NV04_PGRAPH_BBASE5                                 0x0040066C
+#define NV04_PGRAPH_BPITCH0                                0x00400670
+#define NV04_PGRAPH_BPITCH1                                0x00400674
+#define NV04_PGRAPH_BPITCH2                                0x00400678
+#define NV04_PGRAPH_BPITCH3                                0x0040067C
+#define NV04_PGRAPH_BPITCH4                                0x00400680
+#define NV04_PGRAPH_BLIMIT0                                0x00400684
+#define NV04_PGRAPH_BLIMIT1                                0x00400688
+#define NV04_PGRAPH_BLIMIT2                                0x0040068C
+#define NV04_PGRAPH_BLIMIT3                                0x00400690
+#define NV04_PGRAPH_BLIMIT4                                0x00400694
+#define NV04_PGRAPH_BLIMIT5                                0x00400698
+#define NV04_PGRAPH_BSWIZZLE2                              0x0040069C
+#define NV04_PGRAPH_BSWIZZLE5                              0x004006A0
+#define NV03_PGRAPH_STATUS                                 0x004006B0
+#define NV04_PGRAPH_STATUS                                 0x00400700
+#    define NV40_PGRAPH_STATUS_SYNC_STALL                  0x00004000
+#define NV04_PGRAPH_TRAPPED_ADDR                           0x00400704
+#define NV04_PGRAPH_TRAPPED_DATA                           0x00400708
+#define NV04_PGRAPH_SURFACE                                0x0040070C
+#define NV10_PGRAPH_TRAPPED_DATA_HIGH                      0x0040070C
+#define NV04_PGRAPH_STATE                                  0x00400710
+#define NV10_PGRAPH_SURFACE                                0x00400710
+#define NV04_PGRAPH_NOTIFY                                 0x00400714
+#define NV10_PGRAPH_STATE                                  0x00400714
+#define NV10_PGRAPH_NOTIFY                                 0x00400718
+
+#define NV04_PGRAPH_FIFO                                   0x00400720
+
+#define NV04_PGRAPH_BPIXEL                                 0x00400724
+#define NV10_PGRAPH_RDI_INDEX                              0x00400750
+#define NV04_PGRAPH_FFINTFC_ST2                            0x00400754
+#define NV10_PGRAPH_RDI_DATA                               0x00400754
+#define NV04_PGRAPH_DMA_PITCH                              0x00400760
+#define NV10_PGRAPH_FFINTFC_FIFO_PTR                       0x00400760
+#define NV04_PGRAPH_DVD_COLORFMT                           0x00400764
+#define NV10_PGRAPH_FFINTFC_ST2                            0x00400764
+#define NV04_PGRAPH_SCALED_FORMAT                          0x00400768
+#define NV10_PGRAPH_FFINTFC_ST2_DL                         0x00400768
+#define NV10_PGRAPH_FFINTFC_ST2_DH                         0x0040076c
+#define NV10_PGRAPH_DMA_PITCH                              0x00400770
+#define NV10_PGRAPH_DVD_COLORFMT                           0x00400774
+#define NV10_PGRAPH_SCALED_FORMAT                          0x00400778
+#define NV20_PGRAPH_CHANNEL_CTX_TABLE                      0x00400780
+#define NV20_PGRAPH_CHANNEL_CTX_POINTER                    0x00400784
+#define NV20_PGRAPH_CHANNEL_CTX_XFER                       0x00400788
+#define NV20_PGRAPH_CHANNEL_CTX_XFER_LOAD                  0x00000001
+#define NV20_PGRAPH_CHANNEL_CTX_XFER_SAVE                  0x00000002
+#define NV04_PGRAPH_PATT_COLOR0                            0x00400800
+#define NV04_PGRAPH_PATT_COLOR1                            0x00400804
+#define NV04_PGRAPH_PATTERN                                0x00400808
+#define NV04_PGRAPH_PATTERN_SHAPE                          0x00400810
+#define NV04_PGRAPH_CHROMA                                 0x00400814
+#define NV04_PGRAPH_CONTROL0                               0x00400818
+#define NV04_PGRAPH_CONTROL1                               0x0040081C
+#define NV04_PGRAPH_CONTROL2                               0x00400820
+#define NV04_PGRAPH_BLEND                                  0x00400824
+#define NV04_PGRAPH_STORED_FMT                             0x00400830
+#define NV04_PGRAPH_PATT_COLORRAM                          0x00400900
+#define NV20_PGRAPH_TILE(i)                                (0x00400900 + (i*16))
+#define NV20_PGRAPH_TLIMIT(i)                              (0x00400904 + (i*16))
+#define NV20_PGRAPH_TSIZE(i)                               (0x00400908 + (i*16))
+#define NV20_PGRAPH_TSTATUS(i)                             (0x0040090C + (i*16))
+#define NV20_PGRAPH_ZCOMP(i)                               (0x00400980 + 4*(i))
+#define NV10_PGRAPH_TILE(i)                                (0x00400B00 + (i*16))
+#define NV10_PGRAPH_TLIMIT(i)                              (0x00400B04 + (i*16))
+#define NV10_PGRAPH_TSIZE(i)                               (0x00400B08 + (i*16))
+#define NV10_PGRAPH_TSTATUS(i)                             (0x00400B0C + (i*16))
+#define NV04_PGRAPH_U_RAM                                  0x00400D00
+#define NV47_PGRAPH_TILE(i)                                (0x00400D00 + (i*16))
+#define NV47_PGRAPH_TLIMIT(i)                              (0x00400D04 + (i*16))
+#define NV47_PGRAPH_TSIZE(i)                               (0x00400D08 + (i*16))
+#define NV47_PGRAPH_TSTATUS(i)                             (0x00400D0C + (i*16))
+#define NV04_PGRAPH_V_RAM                                  0x00400D40
+#define NV04_PGRAPH_W_RAM                                  0x00400D80
+#define NV10_PGRAPH_COMBINER0_IN_ALPHA                     0x00400E40
+#define NV10_PGRAPH_COMBINER1_IN_ALPHA                     0x00400E44
+#define NV10_PGRAPH_COMBINER0_IN_RGB                       0x00400E48
+#define NV10_PGRAPH_COMBINER1_IN_RGB                       0x00400E4C
+#define NV10_PGRAPH_COMBINER_COLOR0                        0x00400E50
+#define NV10_PGRAPH_COMBINER_COLOR1                        0x00400E54
+#define NV10_PGRAPH_COMBINER0_OUT_ALPHA                    0x00400E58
+#define NV10_PGRAPH_COMBINER1_OUT_ALPHA                    0x00400E5C
+#define NV10_PGRAPH_COMBINER0_OUT_RGB                      0x00400E60
+#define NV10_PGRAPH_COMBINER1_OUT_RGB                      0x00400E64
+#define NV10_PGRAPH_COMBINER_FINAL0                        0x00400E68
+#define NV10_PGRAPH_COMBINER_FINAL1                        0x00400E6C
+#define NV10_PGRAPH_WINDOWCLIP_HORIZONTAL                  0x00400F00
+#define NV10_PGRAPH_WINDOWCLIP_VERTICAL                    0x00400F20
+#define NV10_PGRAPH_XFMODE0                                0x00400F40
+#define NV10_PGRAPH_XFMODE1                                0x00400F44
+#define NV10_PGRAPH_GLOBALSTATE0                           0x00400F48
+#define NV10_PGRAPH_GLOBALSTATE1                           0x00400F4C
+#define NV10_PGRAPH_PIPE_ADDRESS                           0x00400F50
+#define NV10_PGRAPH_PIPE_DATA                              0x00400F54
+#define NV04_PGRAPH_DMA_START_0                            0x00401000
+#define NV04_PGRAPH_DMA_START_1                            0x00401004
+#define NV04_PGRAPH_DMA_LENGTH                             0x00401008
+#define NV04_PGRAPH_DMA_MISC                               0x0040100C
+#define NV04_PGRAPH_DMA_DATA_0                             0x00401020
+#define NV04_PGRAPH_DMA_DATA_1                             0x00401024
+#define NV04_PGRAPH_DMA_RM                                 0x00401030
+#define NV04_PGRAPH_DMA_A_XLATE_INST                       0x00401040
+#define NV04_PGRAPH_DMA_A_CONTROL                          0x00401044
+#define NV04_PGRAPH_DMA_A_LIMIT                            0x00401048
+#define NV04_PGRAPH_DMA_A_TLB_PTE                          0x0040104C
+#define NV04_PGRAPH_DMA_A_TLB_TAG                          0x00401050
+#define NV04_PGRAPH_DMA_A_ADJ_OFFSET                       0x00401054
+#define NV04_PGRAPH_DMA_A_OFFSET                           0x00401058
+#define NV04_PGRAPH_DMA_A_SIZE                             0x0040105C
+#define NV04_PGRAPH_DMA_A_Y_SIZE                           0x00401060
+#define NV04_PGRAPH_DMA_B_XLATE_INST                       0x00401080
+#define NV04_PGRAPH_DMA_B_CONTROL                          0x00401084
+#define NV04_PGRAPH_DMA_B_LIMIT                            0x00401088
+#define NV04_PGRAPH_DMA_B_TLB_PTE                          0x0040108C
+#define NV04_PGRAPH_DMA_B_TLB_TAG                          0x00401090
+#define NV04_PGRAPH_DMA_B_ADJ_OFFSET                       0x00401094
+#define NV04_PGRAPH_DMA_B_OFFSET                           0x00401098
+#define NV04_PGRAPH_DMA_B_SIZE                             0x0040109C
+#define NV04_PGRAPH_DMA_B_Y_SIZE                           0x004010A0
+#define NV40_PGRAPH_TILE1(i)                               (0x00406900 + (i*16))
+#define NV40_PGRAPH_TLIMIT1(i)                             (0x00406904 + (i*16))
+#define NV40_PGRAPH_TSIZE1(i)                              (0x00406908 + (i*16))
+#define NV40_PGRAPH_TSTATUS1(i)                            (0x0040690C + (i*16))
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/core/engine/mpeg/nv31.c b/drivers/gpu/drm/nouveau/core/engine/mpeg/nv31.c
new file mode 100644
index 000000000000..1f394a2629e7
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/mpeg/nv31.c
@@ -0,0 +1,308 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <core/os.h>
+#include <core/class.h>
+#include <core/engctx.h>
+#include <core/handle.h>
+
+#include <subdev/fb.h>
+#include <subdev/timer.h>
+#include <subdev/instmem.h>
+
+#include <engine/fifo.h>
+#include <engine/mpeg.h>
+#include <engine/graph/nv40.h>
+
+struct nv31_mpeg_priv {
+	struct nouveau_mpeg base;
+	atomic_t refcount;
+};
+
+struct nv31_mpeg_chan {
+	struct nouveau_object base;
+};
+
+/*******************************************************************************
+ * MPEG object classes
+ ******************************************************************************/
+
+static int
+nv31_mpeg_object_ctor(struct nouveau_object *parent,
+		      struct nouveau_object *engine,
+		      struct nouveau_oclass *oclass, void *data, u32 size,
+		      struct nouveau_object **pobject)
+{
+	struct nouveau_gpuobj *obj;
+	int ret;
+
+	ret = nouveau_gpuobj_create(parent, engine, oclass, 0, parent,
+				    20, 16, 0, &obj);
+	*pobject = nv_object(obj);
+	if (ret)
+		return ret;
+
+	nv_wo32(obj, 0x00, nv_mclass(obj));
+	nv_wo32(obj, 0x04, 0x00000000);
+	nv_wo32(obj, 0x08, 0x00000000);
+	nv_wo32(obj, 0x0c, 0x00000000);
+	return 0;
+}
+
+static int
+nv31_mpeg_mthd_dma(struct nouveau_object *object, u32 mthd, void *arg, u32 len)
+{
+	struct nouveau_instmem *imem = nouveau_instmem(object);
+	struct nv31_mpeg_priv *priv = (void *)object->engine;
+	u32 inst = *(u32 *)arg << 4;
+	u32 dma0 = nv_ro32(imem, inst + 0);
+	u32 dma1 = nv_ro32(imem, inst + 4);
+	u32 dma2 = nv_ro32(imem, inst + 8);
+	u32 base = (dma2 & 0xfffff000) | (dma0 >> 20);
+	u32 size = dma1 + 1;
+
+	/* only allow linear DMA objects */
+	if (!(dma0 & 0x00002000))
+		return -EINVAL;
+
+	if (mthd == 0x0190) {
+		/* DMA_CMD */
+		nv_mask(priv, 0x00b300, 0x00030000, (dma0 & 0x00030000));
+		nv_wr32(priv, 0x00b334, base);
+		nv_wr32(priv, 0x00b324, size);
+	} else
+	if (mthd == 0x01a0) {
+		/* DMA_DATA */
+		nv_mask(priv, 0x00b300, 0x000c0000, (dma0 & 0x00030000) << 2);
+		nv_wr32(priv, 0x00b360, base);
+		nv_wr32(priv, 0x00b364, size);
+	} else {
+		/* DMA_IMAGE, VRAM only */
+		if (dma0 & 0x000c0000)
+			return -EINVAL;
+
+		nv_wr32(priv, 0x00b370, base);
+		nv_wr32(priv, 0x00b374, size);
+	}
+
+	return 0;
+}
+
+static struct nouveau_ofuncs
+nv31_mpeg_ofuncs = {
+	.ctor = nv31_mpeg_object_ctor,
+	.dtor = _nouveau_gpuobj_dtor,
+	.init = _nouveau_gpuobj_init,
+	.fini = _nouveau_gpuobj_fini,
+	.rd32 = _nouveau_gpuobj_rd32,
+	.wr32 = _nouveau_gpuobj_wr32,
+};
+
+static struct nouveau_omthds
+nv31_mpeg_omthds[] = {
+	{ 0x0190, nv31_mpeg_mthd_dma },
+	{ 0x01a0, nv31_mpeg_mthd_dma },
+	{ 0x01b0, nv31_mpeg_mthd_dma },
+	{}
+};
+
+struct nouveau_oclass
+nv31_mpeg_sclass[] = {
+	{ 0x3174, &nv31_mpeg_ofuncs, nv31_mpeg_omthds },
+	{}
+};
+
+/*******************************************************************************
+ * PMPEG context
+ ******************************************************************************/
+
+static int
+nv31_mpeg_context_ctor(struct nouveau_object *parent,
+		       struct nouveau_object *engine,
+		       struct nouveau_oclass *oclass, void *data, u32 size,
+		       struct nouveau_object **pobject)
+{
+	struct nv31_mpeg_priv *priv = (void *)engine;
+	struct nv31_mpeg_chan *chan;
+	int ret;
+
+	if (!atomic_add_unless(&priv->refcount, 1, 1))
+		return -EBUSY;
+
+	ret = nouveau_object_create(parent, engine, oclass, 0, &chan);
+	*pobject = nv_object(chan);
+	if (ret)
+		return ret;
+
+	return 0;
+}
+
+static void
+nv31_mpeg_context_dtor(struct nouveau_object *object)
+{
+	struct nv31_mpeg_priv *priv = (void *)object->engine;
+	struct nv31_mpeg_chan *chan = (void *)object;
+	atomic_dec(&priv->refcount);
+	nouveau_object_destroy(&chan->base);
+}
+
+static struct nouveau_oclass
+nv31_mpeg_cclass = {
+	.handle = NV_ENGCTX(MPEG, 0x31),
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nv31_mpeg_context_ctor,
+		.dtor = nv31_mpeg_context_dtor,
+		.init = nouveau_object_init,
+		.fini = nouveau_object_fini,
+	},
+};
+
+/*******************************************************************************
+ * PMPEG engine/subdev functions
+ ******************************************************************************/
+
+void
+nv31_mpeg_tile_prog(struct nouveau_engine *engine, int i)
+{
+	struct nouveau_fb_tile *tile = &nouveau_fb(engine)->tile.region[i];
+	struct nv31_mpeg_priv *priv = (void *)engine;
+
+	nv_wr32(priv, 0x00b008 + (i * 0x10), tile->pitch);
+	nv_wr32(priv, 0x00b004 + (i * 0x10), tile->limit);
+	nv_wr32(priv, 0x00b000 + (i * 0x10), tile->addr);
+}
+
+void
+nv31_mpeg_intr(struct nouveau_subdev *subdev)
+{
+	struct nouveau_fifo *pfifo = nouveau_fifo(subdev);
+	struct nouveau_engine *engine = nv_engine(subdev);
+	struct nouveau_object *engctx;
+	struct nouveau_handle *handle;
+	struct nv31_mpeg_priv *priv = (void *)subdev;
+	u32 inst = nv_rd32(priv, 0x00b318) & 0x000fffff;
+	u32 stat = nv_rd32(priv, 0x00b100);
+	u32 type = nv_rd32(priv, 0x00b230);
+	u32 mthd = nv_rd32(priv, 0x00b234);
+	u32 data = nv_rd32(priv, 0x00b238);
+	u32 show = stat;
+	int chid;
+
+	engctx = nouveau_engctx_get(engine, inst);
+	chid   = pfifo->chid(pfifo, engctx);
+
+	if (stat & 0x01000000) {
+		/* happens on initial binding of the object */
+		if (type == 0x00000020 && mthd == 0x0000) {
+			nv_mask(priv, 0x00b308, 0x00000000, 0x00000000);
+			show &= ~0x01000000;
+		}
+
+		if (type == 0x00000010) {
+			handle = nouveau_handle_get_class(engctx, 0x3174);
+			if (handle && !nv_call(handle->object, mthd, data))
+				show &= ~0x01000000;
+			nouveau_handle_put(handle);
+		}
+	}
+
+	nv_wr32(priv, 0x00b100, stat);
+	nv_wr32(priv, 0x00b230, 0x00000001);
+
+	if (show) {
+		nv_error(priv, "ch %d [0x%08x] 0x%08x 0x%08x 0x%08x 0x%08x\n",
+			 chid, inst << 4, stat, type, mthd, data);
+	}
+
+	nouveau_engctx_put(engctx);
+}
+
+static int
+nv31_mpeg_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+	       struct nouveau_oclass *oclass, void *data, u32 size,
+	       struct nouveau_object **pobject)
+{
+	struct nv31_mpeg_priv *priv;
+	int ret;
+
+	ret = nouveau_mpeg_create(parent, engine, oclass, &priv);
+	*pobject = nv_object(priv);
+	if (ret)
+		return ret;
+
+	nv_subdev(priv)->unit = 0x00000002;
+	nv_subdev(priv)->intr = nv31_mpeg_intr;
+	nv_engine(priv)->cclass = &nv31_mpeg_cclass;
+	nv_engine(priv)->sclass = nv31_mpeg_sclass;
+	nv_engine(priv)->tile_prog = nv31_mpeg_tile_prog;
+	return 0;
+}
+
+int
+nv31_mpeg_init(struct nouveau_object *object)
+{
+	struct nouveau_engine *engine = nv_engine(object->engine);
+	struct nv31_mpeg_priv *priv = (void *)engine;
+	struct nouveau_fb *pfb = nouveau_fb(object);
+	int ret, i;
+
+	ret = nouveau_mpeg_init(&priv->base);
+	if (ret)
+		return ret;
+
+	/* VPE init */
+	nv_wr32(priv, 0x00b0e0, 0x00000020); /* nvidia: rd 0x01, wr 0x20 */
+	nv_wr32(priv, 0x00b0e8, 0x00000020); /* nvidia: rd 0x01, wr 0x20 */
+
+	for (i = 0; i < pfb->tile.regions; i++)
+		engine->tile_prog(engine, i);
+
+	/* PMPEG init */
+	nv_wr32(priv, 0x00b32c, 0x00000000);
+	nv_wr32(priv, 0x00b314, 0x00000100);
+	nv_wr32(priv, 0x00b220, nv44_graph_class(priv) ? 0x00000044 : 0x00000031);
+	nv_wr32(priv, 0x00b300, 0x02001ec1);
+	nv_mask(priv, 0x00b32c, 0x00000001, 0x00000001);
+
+	nv_wr32(priv, 0x00b100, 0xffffffff);
+	nv_wr32(priv, 0x00b140, 0xffffffff);
+
+	if (!nv_wait(priv, 0x00b200, 0x00000001, 0x00000000)) {
+		nv_error(priv, "timeout 0x%08x\n", nv_rd32(priv, 0x00b200));
+		return -EBUSY;
+	}
+
+	return 0;
+}
+
+struct nouveau_oclass
+nv31_mpeg_oclass = {
+	.handle = NV_ENGINE(MPEG, 0x31),
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nv31_mpeg_ctor,
+		.dtor = _nouveau_mpeg_dtor,
+		.init = nv31_mpeg_init,
+		.fini = _nouveau_mpeg_fini,
+	},
+};
diff --git a/drivers/gpu/drm/nouveau/core/engine/mpeg/nv40.c b/drivers/gpu/drm/nouveau/core/engine/mpeg/nv40.c
new file mode 100644
index 000000000000..12418574efea
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/mpeg/nv40.c
@@ -0,0 +1,144 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <core/os.h>
+#include <core/class.h>
+#include <core/engctx.h>
+
+#include <subdev/fb.h>
+#include <subdev/timer.h>
+#include <subdev/instmem.h>
+
+#include <engine/mpeg.h>
+#include <engine/graph/nv40.h>
+
+struct nv40_mpeg_priv {
+	struct nouveau_mpeg base;
+};
+
+struct nv40_mpeg_chan {
+	struct nouveau_mpeg base;
+};
+
+/*******************************************************************************
+ * PMPEG context
+ ******************************************************************************/
+
+static int
+nv40_mpeg_context_ctor(struct nouveau_object *parent,
+		       struct nouveau_object *engine,
+		       struct nouveau_oclass *oclass, void *data, u32 size,
+		       struct nouveau_object **pobject)
+{
+	struct nv40_mpeg_chan *chan;
+	int ret;
+
+	ret = nouveau_mpeg_context_create(parent, engine, oclass, NULL,
+					  264 * 4, 16,
+					  NVOBJ_FLAG_ZERO_ALLOC, &chan);
+	*pobject = nv_object(chan);
+	if (ret)
+		return ret;
+
+	return 0;
+}
+
+static int
+nv40_mpeg_context_fini(struct nouveau_object *object, bool suspend)
+{
+
+	struct nv40_mpeg_priv *priv = (void *)object->engine;
+	struct nv40_mpeg_chan *chan = (void *)object;
+	u32 inst = 0x80000000 | nv_gpuobj(chan)->addr >> 4;
+
+	nv_mask(priv, 0x00b32c, 0x00000001, 0x00000000);
+	if (nv_rd32(priv, 0x00b318) == inst)
+		nv_mask(priv, 0x00b318, 0x80000000, 0x00000000);
+	nv_mask(priv, 0x00b32c, 0x00000001, 0x00000001);
+	return 0;
+}
+
+static struct nouveau_oclass
+nv40_mpeg_cclass = {
+	.handle = NV_ENGCTX(MPEG, 0x40),
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nv40_mpeg_context_ctor,
+		.dtor = _nouveau_mpeg_context_dtor,
+		.init = _nouveau_mpeg_context_init,
+		.fini = nv40_mpeg_context_fini,
+		.rd32 = _nouveau_mpeg_context_rd32,
+		.wr32 = _nouveau_mpeg_context_wr32,
+	},
+};
+
+/*******************************************************************************
+ * PMPEG engine/subdev functions
+ ******************************************************************************/
+
+static void
+nv40_mpeg_intr(struct nouveau_subdev *subdev)
+{
+	struct nv40_mpeg_priv *priv = (void *)subdev;
+	u32 stat;
+
+	if ((stat = nv_rd32(priv, 0x00b100)))
+		nv31_mpeg_intr(subdev);
+
+	if ((stat = nv_rd32(priv, 0x00b800))) {
+		nv_error(priv, "PMSRCH 0x%08x\n", stat);
+		nv_wr32(priv, 0x00b800, stat);
+	}
+}
+
+static int
+nv40_mpeg_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+	       struct nouveau_oclass *oclass, void *data, u32 size,
+	       struct nouveau_object **pobject)
+{
+	struct nv40_mpeg_priv *priv;
+	int ret;
+
+	ret = nouveau_mpeg_create(parent, engine, oclass, &priv);
+	*pobject = nv_object(priv);
+	if (ret)
+		return ret;
+
+	nv_subdev(priv)->unit = 0x00000002;
+	nv_subdev(priv)->intr = nv40_mpeg_intr;
+	nv_engine(priv)->cclass = &nv40_mpeg_cclass;
+	nv_engine(priv)->sclass = nv31_mpeg_sclass;
+	nv_engine(priv)->tile_prog = nv31_mpeg_tile_prog;
+	return 0;
+}
+
+struct nouveau_oclass
+nv40_mpeg_oclass = {
+	.handle = NV_ENGINE(MPEG, 0x40),
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nv40_mpeg_ctor,
+		.dtor = _nouveau_mpeg_dtor,
+		.init = nv31_mpeg_init,
+		.fini = _nouveau_mpeg_fini,
+	},
+};
diff --git a/drivers/gpu/drm/nouveau/core/engine/mpeg/nv50.c b/drivers/gpu/drm/nouveau/core/engine/mpeg/nv50.c
new file mode 100644
index 000000000000..8678a9996d57
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/mpeg/nv50.c
@@ -0,0 +1,240 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <core/os.h>
+#include <core/class.h>
+#include <core/engctx.h>
+
+#include <subdev/vm.h>
+#include <subdev/bar.h>
+#include <subdev/timer.h>
+
+#include <engine/mpeg.h>
+
+struct nv50_mpeg_priv {
+	struct nouveau_mpeg base;
+};
+
+struct nv50_mpeg_chan {
+	struct nouveau_mpeg_chan base;
+};
+
+/*******************************************************************************
+ * MPEG object classes
+ ******************************************************************************/
+
+static int
+nv50_mpeg_object_ctor(struct nouveau_object *parent,
+		      struct nouveau_object *engine,
+		      struct nouveau_oclass *oclass, void *data, u32 size,
+		      struct nouveau_object **pobject)
+{
+	struct nouveau_gpuobj *obj;
+	int ret;
+
+	ret = nouveau_gpuobj_create(parent, engine, oclass, 0, parent,
+				    16, 16, 0, &obj);
+	*pobject = nv_object(obj);
+	if (ret)
+		return ret;
+
+	nv_wo32(obj, 0x00, nv_mclass(obj));
+	nv_wo32(obj, 0x04, 0x00000000);
+	nv_wo32(obj, 0x08, 0x00000000);
+	nv_wo32(obj, 0x0c, 0x00000000);
+	return 0;
+}
+
+struct nouveau_ofuncs
+nv50_mpeg_ofuncs = {
+	.ctor = nv50_mpeg_object_ctor,
+	.dtor = _nouveau_gpuobj_dtor,
+	.init = _nouveau_gpuobj_init,
+	.fini = _nouveau_gpuobj_fini,
+	.rd32 = _nouveau_gpuobj_rd32,
+	.wr32 = _nouveau_gpuobj_wr32,
+};
+
+static struct nouveau_oclass
+nv50_mpeg_sclass[] = {
+	{ 0x3174, &nv50_mpeg_ofuncs },
+	{}
+};
+
+/*******************************************************************************
+ * PMPEG context
+ ******************************************************************************/
+
+int
+nv50_mpeg_context_ctor(struct nouveau_object *parent,
+		       struct nouveau_object *engine,
+		       struct nouveau_oclass *oclass, void *data, u32 size,
+		       struct nouveau_object **pobject)
+{
+	struct nouveau_bar *bar = nouveau_bar(parent);
+	struct nv50_mpeg_chan *chan;
+	int ret;
+
+	ret = nouveau_mpeg_context_create(parent, engine, oclass, NULL, 128 * 4,
+					  0, NVOBJ_FLAG_ZERO_ALLOC, &chan);
+	*pobject = nv_object(chan);
+	if (ret)
+		return ret;
+
+	nv_wo32(chan, 0x0070, 0x00801ec1);
+	nv_wo32(chan, 0x007c, 0x0000037c);
+	bar->flush(bar);
+	return 0;
+}
+
+static struct nouveau_oclass
+nv50_mpeg_cclass = {
+	.handle = NV_ENGCTX(MPEG, 0x50),
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nv50_mpeg_context_ctor,
+		.dtor = _nouveau_mpeg_context_dtor,
+		.init = _nouveau_mpeg_context_init,
+		.fini = _nouveau_mpeg_context_fini,
+		.rd32 = _nouveau_mpeg_context_rd32,
+		.wr32 = _nouveau_mpeg_context_wr32,
+	},
+};
+
+/*******************************************************************************
+ * PMPEG engine/subdev functions
+ ******************************************************************************/
+
+int
+nv50_mpeg_tlb_flush(struct nouveau_engine *engine)
+{
+	nv50_vm_flush_engine(&engine->base, 0x08);
+	return 0;
+}
+
+void
+nv50_mpeg_intr(struct nouveau_subdev *subdev)
+{
+	struct nv50_mpeg_priv *priv = (void *)subdev;
+	u32 stat = nv_rd32(priv, 0x00b100);
+	u32 type = nv_rd32(priv, 0x00b230);
+	u32 mthd = nv_rd32(priv, 0x00b234);
+	u32 data = nv_rd32(priv, 0x00b238);
+	u32 show = stat;
+
+	if (stat & 0x01000000) {
+		/* happens on initial binding of the object */
+		if (type == 0x00000020 && mthd == 0x0000) {
+			nv_wr32(priv, 0x00b308, 0x00000100);
+			show &= ~0x01000000;
+		}
+	}
+
+	if (show) {
+		nv_info(priv, "0x%08x 0x%08x 0x%08x 0x%08x\n",
+			stat, type, mthd, data);
+	}
+
+	nv_wr32(priv, 0x00b100, stat);
+	nv_wr32(priv, 0x00b230, 0x00000001);
+	nv50_fb_trap(nouveau_fb(priv), 1);
+}
+
+static void
+nv50_vpe_intr(struct nouveau_subdev *subdev)
+{
+	struct nv50_mpeg_priv *priv = (void *)subdev;
+
+	if (nv_rd32(priv, 0x00b100))
+		nv50_mpeg_intr(subdev);
+
+	if (nv_rd32(priv, 0x00b800)) {
+		u32 stat = nv_rd32(priv, 0x00b800);
+		nv_info(priv, "PMSRCH: 0x%08x\n", stat);
+		nv_wr32(priv, 0xb800, stat);
+	}
+}
+
+static int
+nv50_mpeg_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+	       struct nouveau_oclass *oclass, void *data, u32 size,
+	       struct nouveau_object **pobject)
+{
+	struct nv50_mpeg_priv *priv;
+	int ret;
+
+	ret = nouveau_mpeg_create(parent, engine, oclass, &priv);
+	*pobject = nv_object(priv);
+	if (ret)
+		return ret;
+
+	nv_subdev(priv)->unit = 0x00400002;
+	nv_subdev(priv)->intr = nv50_vpe_intr;
+	nv_engine(priv)->cclass = &nv50_mpeg_cclass;
+	nv_engine(priv)->sclass = nv50_mpeg_sclass;
+	nv_engine(priv)->tlb_flush = nv50_mpeg_tlb_flush;
+	return 0;
+}
+
+int
+nv50_mpeg_init(struct nouveau_object *object)
+{
+	struct nv50_mpeg_priv *priv = (void *)object;
+	int ret;
+
+	ret = nouveau_mpeg_init(&priv->base);
+	if (ret)
+		return ret;
+
+	nv_wr32(priv, 0x00b32c, 0x00000000);
+	nv_wr32(priv, 0x00b314, 0x00000100);
+	nv_wr32(priv, 0x00b0e0, 0x0000001a);
+
+	nv_wr32(priv, 0x00b220, 0x00000044);
+	nv_wr32(priv, 0x00b300, 0x00801ec1);
+	nv_wr32(priv, 0x00b390, 0x00000000);
+	nv_wr32(priv, 0x00b394, 0x00000000);
+	nv_wr32(priv, 0x00b398, 0x00000000);
+	nv_mask(priv, 0x00b32c, 0x00000001, 0x00000001);
+
+	nv_wr32(priv, 0x00b100, 0xffffffff);
+	nv_wr32(priv, 0x00b140, 0xffffffff);
+
+	if (!nv_wait(priv, 0x00b200, 0x00000001, 0x00000000)) {
+		nv_error(priv, "timeout 0x%08x\n", nv_rd32(priv, 0x00b200));
+		return -EBUSY;
+	}
+
+	return 0;
+}
+
+struct nouveau_oclass
+nv50_mpeg_oclass = {
+	.handle = NV_ENGINE(MPEG, 0x50),
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nv50_mpeg_ctor,
+		.dtor = _nouveau_mpeg_dtor,
+		.init = nv50_mpeg_init,
+		.fini = _nouveau_mpeg_fini,
+	},
+};
diff --git a/drivers/gpu/drm/nouveau/core/engine/mpeg/nv84.c b/drivers/gpu/drm/nouveau/core/engine/mpeg/nv84.c
new file mode 100644
index 000000000000..8f805b44d59e
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/mpeg/nv84.c
@@ -0,0 +1,104 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <core/os.h>
+#include <core/class.h>
+#include <core/engctx.h>
+
+#include <subdev/vm.h>
+#include <subdev/bar.h>
+#include <subdev/timer.h>
+
+#include <engine/mpeg.h>
+
+struct nv84_mpeg_priv {
+	struct nouveau_mpeg base;
+};
+
+struct nv84_mpeg_chan {
+	struct nouveau_mpeg_chan base;
+};
+
+/*******************************************************************************
+ * MPEG object classes
+ ******************************************************************************/
+
+static struct nouveau_oclass
+nv84_mpeg_sclass[] = {
+	{ 0x8274, &nv50_mpeg_ofuncs },
+	{}
+};
+
+/*******************************************************************************
+ * PMPEG context
+ ******************************************************************************/
+
+static struct nouveau_oclass
+nv84_mpeg_cclass = {
+	.handle = NV_ENGCTX(MPEG, 0x84),
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nv50_mpeg_context_ctor,
+		.dtor = _nouveau_mpeg_context_dtor,
+		.init = _nouveau_mpeg_context_init,
+		.fini = _nouveau_mpeg_context_fini,
+		.rd32 = _nouveau_mpeg_context_rd32,
+		.wr32 = _nouveau_mpeg_context_wr32,
+	},
+};
+
+/*******************************************************************************
+ * PMPEG engine/subdev functions
+ ******************************************************************************/
+
+static int
+nv84_mpeg_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+	       struct nouveau_oclass *oclass, void *data, u32 size,
+	       struct nouveau_object **pobject)
+{
+	struct nv84_mpeg_priv *priv;
+	int ret;
+
+	ret = nouveau_mpeg_create(parent, engine, oclass, &priv);
+	*pobject = nv_object(priv);
+	if (ret)
+		return ret;
+
+	nv_subdev(priv)->unit = 0x00000002;
+	nv_subdev(priv)->intr = nv50_mpeg_intr;
+	nv_engine(priv)->cclass = &nv84_mpeg_cclass;
+	nv_engine(priv)->sclass = nv84_mpeg_sclass;
+	nv_engine(priv)->tlb_flush = nv50_mpeg_tlb_flush;
+	return 0;
+}
+
+struct nouveau_oclass
+nv84_mpeg_oclass = {
+	.handle = NV_ENGINE(MPEG, 0x84),
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nv84_mpeg_ctor,
+		.dtor = _nouveau_mpeg_dtor,
+		.init = nv50_mpeg_init,
+		.fini = _nouveau_mpeg_fini,
+	},
+};
diff --git a/drivers/gpu/drm/nouveau/core/engine/ppp/nv98.c b/drivers/gpu/drm/nouveau/core/engine/ppp/nv98.c
new file mode 100644
index 000000000000..50e7e0da1981
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/ppp/nv98.c
@@ -0,0 +1,175 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <core/os.h>
+#include <core/class.h>
+#include <core/engctx.h>
+
+#include <engine/ppp.h>
+
+struct nv98_ppp_priv {
+	struct nouveau_ppp base;
+};
+
+struct nv98_ppp_chan {
+	struct nouveau_ppp_chan base;
+};
+
+/*******************************************************************************
+ * PPP object classes
+ ******************************************************************************/
+
+static struct nouveau_oclass
+nv98_ppp_sclass[] = {
+	{},
+};
+
+/*******************************************************************************
+ * PPPP context
+ ******************************************************************************/
+
+static int
+nv98_ppp_context_ctor(struct nouveau_object *parent,
+		      struct nouveau_object *engine,
+		      struct nouveau_oclass *oclass, void *data, u32 size,
+		      struct nouveau_object **pobject)
+{
+	struct nv98_ppp_chan *priv;
+	int ret;
+
+	ret = nouveau_ppp_context_create(parent, engine, oclass, NULL,
+					 0, 0, 0, &priv);
+	*pobject = nv_object(priv);
+	if (ret)
+		return ret;
+
+	return 0;
+}
+
+static void
+nv98_ppp_context_dtor(struct nouveau_object *object)
+{
+	struct nv98_ppp_chan *priv = (void *)object;
+	nouveau_ppp_context_destroy(&priv->base);
+}
+
+static int
+nv98_ppp_context_init(struct nouveau_object *object)
+{
+	struct nv98_ppp_chan *priv = (void *)object;
+	int ret;
+
+	ret = nouveau_ppp_context_init(&priv->base);
+	if (ret)
+		return ret;
+
+	return 0;
+}
+
+static int
+nv98_ppp_context_fini(struct nouveau_object *object, bool suspend)
+{
+	struct nv98_ppp_chan *priv = (void *)object;
+	return nouveau_ppp_context_fini(&priv->base, suspend);
+}
+
+static struct nouveau_oclass
+nv98_ppp_cclass = {
+	.handle = NV_ENGCTX(PPP, 0x98),
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nv98_ppp_context_ctor,
+		.dtor = nv98_ppp_context_dtor,
+		.init = nv98_ppp_context_init,
+		.fini = nv98_ppp_context_fini,
+		.rd32 = _nouveau_ppp_context_rd32,
+		.wr32 = _nouveau_ppp_context_wr32,
+	},
+};
+
+/*******************************************************************************
+ * PPPP engine/subdev functions
+ ******************************************************************************/
+
+static void
+nv98_ppp_intr(struct nouveau_subdev *subdev)
+{
+}
+
+static int
+nv98_ppp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+	      struct nouveau_oclass *oclass, void *data, u32 size,
+	      struct nouveau_object **pobject)
+{
+	struct nv98_ppp_priv *priv;
+	int ret;
+
+	ret = nouveau_ppp_create(parent, engine, oclass, &priv);
+	*pobject = nv_object(priv);
+	if (ret)
+		return ret;
+
+	nv_subdev(priv)->unit = 0x00400002;
+	nv_subdev(priv)->intr = nv98_ppp_intr;
+	nv_engine(priv)->cclass = &nv98_ppp_cclass;
+	nv_engine(priv)->sclass = nv98_ppp_sclass;
+	return 0;
+}
+
+static void
+nv98_ppp_dtor(struct nouveau_object *object)
+{
+	struct nv98_ppp_priv *priv = (void *)object;
+	nouveau_ppp_destroy(&priv->base);
+}
+
+static int
+nv98_ppp_init(struct nouveau_object *object)
+{
+	struct nv98_ppp_priv *priv = (void *)object;
+	int ret;
+
+	ret = nouveau_ppp_init(&priv->base);
+	if (ret)
+		return ret;
+
+	return 0;
+}
+
+static int
+nv98_ppp_fini(struct nouveau_object *object, bool suspend)
+{
+	struct nv98_ppp_priv *priv = (void *)object;
+	return nouveau_ppp_fini(&priv->base, suspend);
+}
+
+struct nouveau_oclass
+nv98_ppp_oclass = {
+	.handle = NV_ENGINE(PPP, 0x98),
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nv98_ppp_ctor,
+		.dtor = nv98_ppp_dtor,
+		.init = nv98_ppp_init,
+		.fini = nv98_ppp_fini,
+	},
+};
diff --git a/drivers/gpu/drm/nouveau/core/engine/software/nv04.c b/drivers/gpu/drm/nouveau/core/engine/software/nv04.c
new file mode 100644
index 000000000000..3ca4c3aa90b7
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/software/nv04.c
@@ -0,0 +1,147 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <core/os.h>
+#include <core/class.h>
+#include <core/engctx.h>
+
+#include <engine/software.h>
+#include <engine/fifo.h>
+
+struct nv04_software_priv {
+	struct nouveau_software base;
+};
+
+struct nv04_software_chan {
+	struct nouveau_software_chan base;
+};
+
+/*******************************************************************************
+ * software object classes
+ ******************************************************************************/
+
+static int
+nv04_software_set_ref(struct nouveau_object *object, u32 mthd,
+		      void *data, u32 size)
+{
+	struct nouveau_object *channel = (void *)nv_engctx(object->parent);
+	struct nouveau_fifo_chan *fifo = (void *)channel->parent;
+	atomic_set(&fifo->refcnt, *(u32*)data);
+	return 0;
+}
+
+static int
+nv04_software_flip(struct nouveau_object *object, u32 mthd,
+		   void *args, u32 size)
+{
+	struct nv04_software_chan *chan = (void *)nv_engctx(object->parent);
+	if (chan->base.flip)
+		return chan->base.flip(chan->base.flip_data);
+	return -EINVAL;
+}
+
+static struct nouveau_omthds
+nv04_software_omthds[] = {
+	{ 0x0150, nv04_software_set_ref },
+	{ 0x0500, nv04_software_flip },
+	{}
+};
+
+static struct nouveau_oclass
+nv04_software_sclass[] = {
+	{ 0x006e, &nouveau_object_ofuncs, nv04_software_omthds },
+	{}
+};
+
+/*******************************************************************************
+ * software context
+ ******************************************************************************/
+
+static int
+nv04_software_context_ctor(struct nouveau_object *parent,
+		      struct nouveau_object *engine,
+		      struct nouveau_oclass *oclass, void *data, u32 size,
+		      struct nouveau_object **pobject)
+{
+	struct nv04_software_chan *chan;
+	int ret;
+
+	ret = nouveau_software_context_create(parent, engine, oclass, &chan);
+	*pobject = nv_object(chan);
+	if (ret)
+		return ret;
+
+	return 0;
+}
+
+static struct nouveau_oclass
+nv04_software_cclass = {
+	.handle = NV_ENGCTX(SW, 0x04),
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nv04_software_context_ctor,
+		.dtor = _nouveau_software_context_dtor,
+		.init = _nouveau_software_context_init,
+		.fini = _nouveau_software_context_fini,
+	},
+};
+
+/*******************************************************************************
+ * software engine/subdev functions
+ ******************************************************************************/
+
+void
+nv04_software_intr(struct nouveau_subdev *subdev)
+{
+	nv_mask(subdev, 0x000100, 0x80000000, 0x00000000);
+}
+
+static int
+nv04_software_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+	      struct nouveau_oclass *oclass, void *data, u32 size,
+	      struct nouveau_object **pobject)
+{
+	struct nv04_software_priv *priv;
+	int ret;
+
+	ret = nouveau_software_create(parent, engine, oclass, &priv);
+	*pobject = nv_object(priv);
+	if (ret)
+		return ret;
+
+	nv_engine(priv)->cclass = &nv04_software_cclass;
+	nv_engine(priv)->sclass = nv04_software_sclass;
+	nv_subdev(priv)->intr = nv04_software_intr;
+	return 0;
+}
+
+struct nouveau_oclass
+nv04_software_oclass = {
+	.handle = NV_ENGINE(SW, 0x04),
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nv04_software_ctor,
+		.dtor = _nouveau_software_dtor,
+		.init = _nouveau_software_init,
+		.fini = _nouveau_software_fini,
+	},
+};
diff --git a/drivers/gpu/drm/nouveau/core/engine/software/nv10.c b/drivers/gpu/drm/nouveau/core/engine/software/nv10.c
new file mode 100644
index 000000000000..6e699afbfdb7
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/software/nv10.c
@@ -0,0 +1,129 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <core/os.h>
+#include <core/class.h>
+#include <core/engctx.h>
+
+#include <engine/software.h>
+
+struct nv10_software_priv {
+	struct nouveau_software base;
+};
+
+struct nv10_software_chan {
+	struct nouveau_software_chan base;
+};
+
+/*******************************************************************************
+ * software object classes
+ ******************************************************************************/
+
+static int
+nv10_software_flip(struct nouveau_object *object, u32 mthd,
+		   void *args, u32 size)
+{
+	struct nv10_software_chan *chan = (void *)nv_engctx(object->parent);
+	if (chan->base.flip)
+		return chan->base.flip(chan->base.flip_data);
+	return -EINVAL;
+}
+
+static struct nouveau_omthds
+nv10_software_omthds[] = {
+	{ 0x0500, nv10_software_flip },
+	{}
+};
+
+static struct nouveau_oclass
+nv10_software_sclass[] = {
+	{ 0x016e, &nouveau_object_ofuncs, nv10_software_omthds },
+	{}
+};
+
+/*******************************************************************************
+ * software context
+ ******************************************************************************/
+
+static int
+nv10_software_context_ctor(struct nouveau_object *parent,
+		      struct nouveau_object *engine,
+		      struct nouveau_oclass *oclass, void *data, u32 size,
+		      struct nouveau_object **pobject)
+{
+	struct nv10_software_chan *chan;
+	int ret;
+
+	ret = nouveau_software_context_create(parent, engine, oclass, &chan);
+	*pobject = nv_object(chan);
+	if (ret)
+		return ret;
+
+	return 0;
+}
+
+static struct nouveau_oclass
+nv10_software_cclass = {
+	.handle = NV_ENGCTX(SW, 0x04),
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nv10_software_context_ctor,
+		.dtor = _nouveau_software_context_dtor,
+		.init = _nouveau_software_context_init,
+		.fini = _nouveau_software_context_fini,
+	},
+};
+
+/*******************************************************************************
+ * software engine/subdev functions
+ ******************************************************************************/
+
+static int
+nv10_software_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+	      struct nouveau_oclass *oclass, void *data, u32 size,
+	      struct nouveau_object **pobject)
+{
+	struct nv10_software_priv *priv;
+	int ret;
+
+	ret = nouveau_software_create(parent, engine, oclass, &priv);
+	*pobject = nv_object(priv);
+	if (ret)
+		return ret;
+
+	nv_engine(priv)->cclass = &nv10_software_cclass;
+	nv_engine(priv)->sclass = nv10_software_sclass;
+	nv_subdev(priv)->intr = nv04_software_intr;
+	return 0;
+}
+
+struct nouveau_oclass
+nv10_software_oclass = {
+	.handle = NV_ENGINE(SW, 0x10),
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nv10_software_ctor,
+		.dtor = _nouveau_software_dtor,
+		.init = _nouveau_software_init,
+		.fini = _nouveau_software_fini,
+	},
+};
diff --git a/drivers/gpu/drm/nouveau/core/engine/software/nv50.c b/drivers/gpu/drm/nouveau/core/engine/software/nv50.c
new file mode 100644
index 000000000000..a2edcd38544a
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/software/nv50.c
@@ -0,0 +1,199 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <core/os.h>
+#include <core/class.h>
+#include <core/engctx.h>
+#include <core/namedb.h>
+#include <core/handle.h>
+#include <core/gpuobj.h>
+
+#include <engine/software.h>
+#include <engine/disp.h>
+
+struct nv50_software_priv {
+	struct nouveau_software base;
+};
+
+struct nv50_software_chan {
+	struct nouveau_software_chan base;
+};
+
+/*******************************************************************************
+ * software object classes
+ ******************************************************************************/
+
+static int
+nv50_software_mthd_dma_vblsem(struct nouveau_object *object, u32 mthd,
+			      void *args, u32 size)
+{
+	struct nv50_software_chan *chan = (void *)nv_engctx(object->parent);
+	struct nouveau_fifo_chan *fifo = (void *)nv_object(chan)->parent;
+	struct nouveau_handle *handle;
+	int ret = -EINVAL;
+
+	handle = nouveau_namedb_get(nv_namedb(fifo), *(u32 *)args);
+	if (!handle)
+		return -ENOENT;
+
+	if (nv_iclass(handle->object, NV_GPUOBJ_CLASS)) {
+		struct nouveau_gpuobj *gpuobj = nv_gpuobj(handle->object);
+		chan->base.vblank.ctxdma = gpuobj->node->offset >> 4;
+		ret = 0;
+	}
+	nouveau_namedb_put(handle);
+	return ret;
+}
+
+static int
+nv50_software_mthd_vblsem_offset(struct nouveau_object *object, u32 mthd,
+				 void *args, u32 size)
+{
+	struct nv50_software_chan *chan = (void *)nv_engctx(object->parent);
+	chan->base.vblank.offset = *(u32 *)args;
+	return 0;
+}
+
+static int
+nv50_software_mthd_vblsem_value(struct nouveau_object *object, u32 mthd,
+				void *args, u32 size)
+{
+	struct nv50_software_chan *chan = (void *)nv_engctx(object->parent);
+	chan->base.vblank.value = *(u32 *)args;
+	return 0;
+}
+
+static int
+nv50_software_mthd_vblsem_release(struct nouveau_object *object, u32 mthd,
+				  void *args, u32 size)
+{
+	struct nv50_software_chan *chan = (void *)nv_engctx(object->parent);
+	struct nouveau_disp *disp = nouveau_disp(object);
+	unsigned long flags;
+	u32 crtc = *(u32 *)args;
+
+	if (crtc > 1)
+		return -EINVAL;
+
+	disp->vblank.get(disp->vblank.data, crtc);
+
+	spin_lock_irqsave(&disp->vblank.lock, flags);
+	list_add(&chan->base.vblank.head, &disp->vblank.list);
+	chan->base.vblank.crtc = crtc;
+	spin_unlock_irqrestore(&disp->vblank.lock, flags);
+	return 0;
+}
+
+static int
+nv50_software_mthd_flip(struct nouveau_object *object, u32 mthd,
+			void *args, u32 size)
+{
+	struct nv50_software_chan *chan = (void *)nv_engctx(object->parent);
+	if (chan->base.flip)
+		return chan->base.flip(chan->base.flip_data);
+	return -EINVAL;
+}
+
+static struct nouveau_omthds
+nv50_software_omthds[] = {
+	{ 0x018c, nv50_software_mthd_dma_vblsem },
+	{ 0x0400, nv50_software_mthd_vblsem_offset },
+	{ 0x0404, nv50_software_mthd_vblsem_value },
+	{ 0x0408, nv50_software_mthd_vblsem_release },
+	{ 0x0500, nv50_software_mthd_flip },
+	{}
+};
+
+static struct nouveau_oclass
+nv50_software_sclass[] = {
+	{ 0x506e, &nouveau_object_ofuncs, nv50_software_omthds },
+	{}
+};
+
+/*******************************************************************************
+ * software context
+ ******************************************************************************/
+
+static int
+nv50_software_context_ctor(struct nouveau_object *parent,
+			   struct nouveau_object *engine,
+			   struct nouveau_oclass *oclass, void *data, u32 size,
+			   struct nouveau_object **pobject)
+{
+	struct nv50_software_chan *chan;
+	int ret;
+
+	ret = nouveau_software_context_create(parent, engine, oclass, &chan);
+	*pobject = nv_object(chan);
+	if (ret)
+		return ret;
+
+	chan->base.vblank.channel = nv_gpuobj(parent->parent)->addr >> 12;
+	return 0;
+}
+
+static struct nouveau_oclass
+nv50_software_cclass = {
+	.handle = NV_ENGCTX(SW, 0x50),
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nv50_software_context_ctor,
+		.dtor = _nouveau_software_context_dtor,
+		.init = _nouveau_software_context_init,
+		.fini = _nouveau_software_context_fini,
+	},
+};
+
+/*******************************************************************************
+ * software engine/subdev functions
+ ******************************************************************************/
+
+static int
+nv50_software_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+	      struct nouveau_oclass *oclass, void *data, u32 size,
+	      struct nouveau_object **pobject)
+{
+	struct nv50_software_priv *priv;
+	int ret;
+
+	ret = nouveau_software_create(parent, engine, oclass, &priv);
+	*pobject = nv_object(priv);
+	if (ret)
+		return ret;
+
+	nv_engine(priv)->cclass = &nv50_software_cclass;
+	nv_engine(priv)->sclass = nv50_software_sclass;
+	nv_subdev(priv)->intr = nv04_software_intr;
+	return 0;
+}
+
+struct nouveau_oclass
+nv50_software_oclass = {
+	.handle = NV_ENGINE(SW, 0x50),
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nv50_software_ctor,
+		.dtor = _nouveau_software_dtor,
+		.init = _nouveau_software_init,
+		.fini = _nouveau_software_fini,
+	},
+};
diff --git a/drivers/gpu/drm/nouveau/core/engine/software/nvc0.c b/drivers/gpu/drm/nouveau/core/engine/software/nvc0.c
new file mode 100644
index 000000000000..b7b0d7e330d6
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/software/nvc0.c
@@ -0,0 +1,181 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <core/os.h>
+#include <core/class.h>
+#include <core/engctx.h>
+
+#include <engine/software.h>
+#include <engine/disp.h>
+
+struct nvc0_software_priv {
+	struct nouveau_software base;
+};
+
+struct nvc0_software_chan {
+	struct nouveau_software_chan base;
+};
+
+/*******************************************************************************
+ * software object classes
+ ******************************************************************************/
+
+static int
+nvc0_software_mthd_vblsem_offset(struct nouveau_object *object, u32 mthd,
+				 void *args, u32 size)
+{
+	struct nvc0_software_chan *chan = (void *)nv_engctx(object->parent);
+	u64 data = *(u32 *)args;
+	if (mthd == 0x0400) {
+		chan->base.vblank.offset &= 0x00ffffffffULL;
+		chan->base.vblank.offset |= data << 32;
+	} else {
+		chan->base.vblank.offset &= 0xff00000000ULL;
+		chan->base.vblank.offset |= data;
+	}
+	return 0;
+}
+
+static int
+nvc0_software_mthd_vblsem_value(struct nouveau_object *object, u32 mthd,
+				void *args, u32 size)
+{
+	struct nvc0_software_chan *chan = (void *)nv_engctx(object->parent);
+	chan->base.vblank.value = *(u32 *)args;
+	return 0;
+}
+
+static int
+nvc0_software_mthd_vblsem_release(struct nouveau_object *object, u32 mthd,
+				  void *args, u32 size)
+{
+	struct nvc0_software_chan *chan = (void *)nv_engctx(object->parent);
+	struct nouveau_disp *disp = nouveau_disp(object);
+	unsigned long flags;
+	u32 crtc = *(u32 *)args;
+
+	if ((nv_device(object)->card_type < NV_E0 && crtc > 1) || crtc > 3)
+		return -EINVAL;
+
+	disp->vblank.get(disp->vblank.data, crtc);
+
+	spin_lock_irqsave(&disp->vblank.lock, flags);
+	list_add(&chan->base.vblank.head, &disp->vblank.list);
+	chan->base.vblank.crtc = crtc;
+	spin_unlock_irqrestore(&disp->vblank.lock, flags);
+	return 0;
+}
+
+static int
+nvc0_software_mthd_flip(struct nouveau_object *object, u32 mthd,
+			void *args, u32 size)
+{
+	struct nvc0_software_chan *chan = (void *)nv_engctx(object->parent);
+	if (chan->base.flip)
+		return chan->base.flip(chan->base.flip_data);
+	return -EINVAL;
+}
+
+static struct nouveau_omthds
+nvc0_software_omthds[] = {
+	{ 0x0400, nvc0_software_mthd_vblsem_offset },
+	{ 0x0404, nvc0_software_mthd_vblsem_offset },
+	{ 0x0408, nvc0_software_mthd_vblsem_value },
+	{ 0x040c, nvc0_software_mthd_vblsem_release },
+	{ 0x0500, nvc0_software_mthd_flip },
+	{}
+};
+
+static struct nouveau_oclass
+nvc0_software_sclass[] = {
+	{ 0x906e, &nouveau_object_ofuncs, nvc0_software_omthds },
+	{}
+};
+
+/*******************************************************************************
+ * software context
+ ******************************************************************************/
+
+static int
+nvc0_software_context_ctor(struct nouveau_object *parent,
+			   struct nouveau_object *engine,
+			   struct nouveau_oclass *oclass, void *data, u32 size,
+			   struct nouveau_object **pobject)
+{
+	struct nvc0_software_chan *chan;
+	int ret;
+
+	ret = nouveau_software_context_create(parent, engine, oclass, &chan);
+	*pobject = nv_object(chan);
+	if (ret)
+		return ret;
+
+	chan->base.vblank.channel = nv_gpuobj(parent->parent)->addr >> 12;
+	return 0;
+}
+
+static struct nouveau_oclass
+nvc0_software_cclass = {
+	.handle = NV_ENGCTX(SW, 0xc0),
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nvc0_software_context_ctor,
+		.dtor = _nouveau_software_context_dtor,
+		.init = _nouveau_software_context_init,
+		.fini = _nouveau_software_context_fini,
+	},
+};
+
+/*******************************************************************************
+ * software engine/subdev functions
+ ******************************************************************************/
+
+static int
+nvc0_software_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+		   struct nouveau_oclass *oclass, void *data, u32 size,
+		   struct nouveau_object **pobject)
+{
+	struct nvc0_software_priv *priv;
+	int ret;
+
+	ret = nouveau_software_create(parent, engine, oclass, &priv);
+	*pobject = nv_object(priv);
+	if (ret)
+		return ret;
+
+	nv_engine(priv)->cclass = &nvc0_software_cclass;
+	nv_engine(priv)->sclass = nvc0_software_sclass;
+	nv_subdev(priv)->intr = nv04_software_intr;
+	return 0;
+}
+
+struct nouveau_oclass
+nvc0_software_oclass = {
+	.handle = NV_ENGINE(SW, 0xc0),
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nvc0_software_ctor,
+		.dtor = _nouveau_software_dtor,
+		.init = _nouveau_software_init,
+		.fini = _nouveau_software_fini,
+	},
+};
diff --git a/drivers/gpu/drm/nouveau/core/engine/vp/nv84.c b/drivers/gpu/drm/nouveau/core/engine/vp/nv84.c
new file mode 100644
index 000000000000..dd23c80e5405
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/vp/nv84.c
@@ -0,0 +1,175 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <core/os.h>
+#include <core/class.h>
+#include <core/engctx.h>
+
+#include <engine/vp.h>
+
+struct nv84_vp_priv {
+	struct nouveau_vp base;
+};
+
+struct nv84_vp_chan {
+	struct nouveau_vp_chan base;
+};
+
+/*******************************************************************************
+ * VP object classes
+ ******************************************************************************/
+
+static struct nouveau_oclass
+nv84_vp_sclass[] = {
+	{},
+};
+
+/*******************************************************************************
+ * PVP context
+ ******************************************************************************/
+
+static int
+nv84_vp_context_ctor(struct nouveau_object *parent,
+		     struct nouveau_object *engine,
+		     struct nouveau_oclass *oclass, void *data, u32 size,
+		     struct nouveau_object **pobject)
+{
+	struct nv84_vp_chan *priv;
+	int ret;
+
+	ret = nouveau_vp_context_create(parent, engine, oclass, NULL,
+					0, 0, 0, &priv);
+	*pobject = nv_object(priv);
+	if (ret)
+		return ret;
+
+	return 0;
+}
+
+static void
+nv84_vp_context_dtor(struct nouveau_object *object)
+{
+	struct nv84_vp_chan *priv = (void *)object;
+	nouveau_vp_context_destroy(&priv->base);
+}
+
+static int
+nv84_vp_context_init(struct nouveau_object *object)
+{
+	struct nv84_vp_chan *priv = (void *)object;
+	int ret;
+
+	ret = nouveau_vp_context_init(&priv->base);
+	if (ret)
+		return ret;
+
+	return 0;
+}
+
+static int
+nv84_vp_context_fini(struct nouveau_object *object, bool suspend)
+{
+	struct nv84_vp_chan *priv = (void *)object;
+	return nouveau_vp_context_fini(&priv->base, suspend);
+}
+
+static struct nouveau_oclass
+nv84_vp_cclass = {
+	.handle = NV_ENGCTX(VP, 0x84),
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nv84_vp_context_ctor,
+		.dtor = nv84_vp_context_dtor,
+		.init = nv84_vp_context_init,
+		.fini = nv84_vp_context_fini,
+		.rd32 = _nouveau_vp_context_rd32,
+		.wr32 = _nouveau_vp_context_wr32,
+	},
+};
+
+/*******************************************************************************
+ * PVP engine/subdev functions
+ ******************************************************************************/
+
+static void
+nv84_vp_intr(struct nouveau_subdev *subdev)
+{
+}
+
+static int
+nv84_vp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+	     struct nouveau_oclass *oclass, void *data, u32 size,
+	     struct nouveau_object **pobject)
+{
+	struct nv84_vp_priv *priv;
+	int ret;
+
+	ret = nouveau_vp_create(parent, engine, oclass, &priv);
+	*pobject = nv_object(priv);
+	if (ret)
+		return ret;
+
+	nv_subdev(priv)->unit = 0x01020000;
+	nv_subdev(priv)->intr = nv84_vp_intr;
+	nv_engine(priv)->cclass = &nv84_vp_cclass;
+	nv_engine(priv)->sclass = nv84_vp_sclass;
+	return 0;
+}
+
+static void
+nv84_vp_dtor(struct nouveau_object *object)
+{
+	struct nv84_vp_priv *priv = (void *)object;
+	nouveau_vp_destroy(&priv->base);
+}
+
+static int
+nv84_vp_init(struct nouveau_object *object)
+{
+	struct nv84_vp_priv *priv = (void *)object;
+	int ret;
+
+	ret = nouveau_vp_init(&priv->base);
+	if (ret)
+		return ret;
+
+	return 0;
+}
+
+static int
+nv84_vp_fini(struct nouveau_object *object, bool suspend)
+{
+	struct nv84_vp_priv *priv = (void *)object;
+	return nouveau_vp_fini(&priv->base, suspend);
+}
+
+struct nouveau_oclass
+nv84_vp_oclass = {
+	.handle = NV_ENGINE(VP, 0x84),
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nv84_vp_ctor,
+		.dtor = nv84_vp_dtor,
+		.init = nv84_vp_init,
+		.fini = nv84_vp_fini,
+	},
+};
diff --git a/drivers/gpu/drm/nouveau/core/include/core/class.h b/drivers/gpu/drm/nouveau/core/include/core/class.h
new file mode 100644
index 000000000000..6180ae9800fc
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/include/core/class.h
@@ -0,0 +1,118 @@
+#ifndef __NOUVEAU_CLASS_H__
+#define __NOUVEAU_CLASS_H__
+
+/* Device class
+ *
+ * 0080: NV_DEVICE
+ */
+#define NV_DEVICE_CLASS                                              0x00000080
+
+#define NV_DEVICE_DISABLE_IDENTIFY                        0x0000000000000001ULL
+#define NV_DEVICE_DISABLE_MMIO                            0x0000000000000002ULL
+#define NV_DEVICE_DISABLE_VBIOS                           0x0000000000000004ULL
+#define NV_DEVICE_DISABLE_CORE                            0x0000000000000008ULL
+#define NV_DEVICE_DISABLE_DISP                            0x0000000000010000ULL
+#define NV_DEVICE_DISABLE_FIFO                            0x0000000000020000ULL
+#define NV_DEVICE_DISABLE_GRAPH                           0x0000000100000000ULL
+#define NV_DEVICE_DISABLE_MPEG                            0x0000000200000000ULL
+#define NV_DEVICE_DISABLE_ME                              0x0000000400000000ULL
+#define NV_DEVICE_DISABLE_VP                              0x0000000800000000ULL
+#define NV_DEVICE_DISABLE_CRYPT                           0x0000001000000000ULL
+#define NV_DEVICE_DISABLE_BSP                             0x0000002000000000ULL
+#define NV_DEVICE_DISABLE_PPP                             0x0000004000000000ULL
+#define NV_DEVICE_DISABLE_COPY0                           0x0000008000000000ULL
+#define NV_DEVICE_DISABLE_COPY1                           0x0000010000000000ULL
+#define NV_DEVICE_DISABLE_UNK1C1                          0x0000020000000000ULL
+
+struct nv_device_class {
+	u64 device;	/* device identifier, ~0 for client default */
+	u64 disable;	/* disable particular subsystems */
+	u64 debug0;	/* as above, but *internal* ids, and *NOT* ABI */
+};
+
+/* DMA object classes
+ *
+ * 0002: NV_DMA_FROM_MEMORY
+ * 0003: NV_DMA_TO_MEMORY
+ * 003d: NV_DMA_IN_MEMORY
+ */
+#define NV_DMA_FROM_MEMORY_CLASS                                     0x00000002
+#define NV_DMA_TO_MEMORY_CLASS                                       0x00000003
+#define NV_DMA_IN_MEMORY_CLASS                                       0x0000003d
+
+#define NV_DMA_TARGET_MASK                                           0x000000ff
+#define NV_DMA_TARGET_VM                                             0x00000000
+#define NV_DMA_TARGET_VRAM                                           0x00000001
+#define NV_DMA_TARGET_PCI                                            0x00000002
+#define NV_DMA_TARGET_PCI_US                                         0x00000003
+#define NV_DMA_TARGET_AGP                                            0x00000004
+#define NV_DMA_ACCESS_MASK                                           0x00000f00
+#define NV_DMA_ACCESS_VM                                             0x00000000
+#define NV_DMA_ACCESS_RD                                             0x00000100
+#define NV_DMA_ACCESS_WR                                             0x00000200
+#define NV_DMA_ACCESS_RDWR                                           0x00000300
+
+struct nv_dma_class {
+	u32 flags;
+	u32 pad0;
+	u64 start;
+	u64 limit;
+};
+
+/* DMA FIFO channel classes
+ *
+ * 006b: NV03_CHANNEL_DMA
+ * 006e: NV10_CHANNEL_DMA
+ * 176e: NV17_CHANNEL_DMA
+ * 406e: NV40_CHANNEL_DMA
+ * 506e: NV50_CHANNEL_DMA
+ * 826e: NV84_CHANNEL_DMA
+ */
+#define NV03_CHANNEL_DMA_CLASS                                       0x0000006b
+#define NV10_CHANNEL_DMA_CLASS                                       0x0000006e
+#define NV17_CHANNEL_DMA_CLASS                                       0x0000176e
+#define NV40_CHANNEL_DMA_CLASS                                       0x0000406e
+#define NV50_CHANNEL_DMA_CLASS                                       0x0000506e
+#define NV84_CHANNEL_DMA_CLASS                                       0x0000826e
+
+struct nv03_channel_dma_class {
+	u32 pushbuf;
+	u32 pad0;
+	u64 offset;
+};
+
+/* Indirect FIFO channel classes
+ *
+ * 506f: NV50_CHANNEL_IND
+ * 826f: NV84_CHANNEL_IND
+ * 906f: NVC0_CHANNEL_IND
+ * a06f: NVE0_CHANNEL_IND
+ */
+
+#define NV50_CHANNEL_IND_CLASS                                       0x0000506f
+#define NV84_CHANNEL_IND_CLASS                                       0x0000826f
+#define NVC0_CHANNEL_IND_CLASS                                       0x0000906f
+#define NVE0_CHANNEL_IND_CLASS                                       0x0000a06f
+
+struct nv50_channel_ind_class {
+	u32 pushbuf;
+	u32 ilength;
+	u64 ioffset;
+};
+
+#define NVE0_CHANNEL_IND_ENGINE_GR                                   0x00000001
+#define NVE0_CHANNEL_IND_ENGINE_VP                                   0x00000002
+#define NVE0_CHANNEL_IND_ENGINE_PPP                                  0x00000004
+#define NVE0_CHANNEL_IND_ENGINE_BSP                                  0x00000008
+#define NVE0_CHANNEL_IND_ENGINE_CE0                                  0x00000010
+#define NVE0_CHANNEL_IND_ENGINE_CE1                                  0x00000020
+#define NVE0_CHANNEL_IND_ENGINE_ENC                                  0x00000040
+
+struct nve0_channel_ind_class {
+	u32 pushbuf;
+	u32 ilength;
+	u64 ioffset;
+	u32 engine;
+};
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/core/client.h b/drivers/gpu/drm/nouveau/core/include/core/client.h
new file mode 100644
index 000000000000..0193532ceac9
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/include/core/client.h
@@ -0,0 +1,42 @@
+#ifndef __NOUVEAU_CLIENT_H__
+#define __NOUVEAU_CLIENT_H__
+
+#include <core/namedb.h>
+
+struct nouveau_client {
+	struct nouveau_namedb base;
+	struct nouveau_handle *root;
+	struct nouveau_object *device;
+	char name[16];
+	u32 debug;
+	struct nouveau_vm *vm;
+};
+
+static inline struct nouveau_client *
+nv_client(void *obj)
+{
+#if CONFIG_NOUVEAU_DEBUG >= NV_DBG_PARANOIA
+	if (unlikely(!nv_iclass(obj, NV_CLIENT_CLASS)))
+		nv_assert("BAD CAST -> NvClient, %08x", nv_hclass(obj));
+#endif
+	return obj;
+}
+
+static inline struct nouveau_client *
+nouveau_client(void *obj)
+{
+	struct nouveau_object *client = nv_object(obj);
+	while (client && !(nv_iclass(client, NV_CLIENT_CLASS)))
+		client = client->parent;
+	return (void *)client;
+}
+
+#define nouveau_client_create(n,c,oc,od,d)                                     \
+	nouveau_client_create_((n), (c), (oc), (od), sizeof(**d), (void **)d)
+
+int  nouveau_client_create_(const char *name, u64 device, const char *cfg,
+			    const char *dbg, int, void **);
+int  nouveau_client_init(struct nouveau_client *);
+int  nouveau_client_fini(struct nouveau_client *, bool suspend);
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/core/debug.h b/drivers/gpu/drm/nouveau/core/include/core/debug.h
new file mode 100644
index 000000000000..9ea18dfcb4d0
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/include/core/debug.h
@@ -0,0 +1,13 @@
+#ifndef __NOUVEAU_DEBUG_H__
+#define __NOUVEAU_DEBUG_H__
+
+#define NV_DBG_FATAL    0
+#define NV_DBG_ERROR    1
+#define NV_DBG_WARN     2
+#define NV_DBG_INFO     3
+#define NV_DBG_DEBUG    4
+#define NV_DBG_TRACE    5
+#define NV_DBG_PARANOIA 6
+#define NV_DBG_SPAM     7
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/core/device.h b/drivers/gpu/drm/nouveau/core/include/core/device.h
new file mode 100644
index 000000000000..e58b6f0984c1
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/include/core/device.h
@@ -0,0 +1,136 @@
+#ifndef __NOUVEAU_DEVICE_H__
+#define __NOUVEAU_DEVICE_H__
+
+#include <core/object.h>
+#include <core/subdev.h>
+#include <core/engine.h>
+
+enum nv_subdev_type {
+	NVDEV_SUBDEV_DEVICE,
+	NVDEV_SUBDEV_VBIOS,
+
+	/* All subdevs from DEVINIT to DEVINIT_LAST will be created before
+	 * *any* of them are initialised.  This subdev category is used
+	 * for any subdevs that the VBIOS init table parsing may call out
+	 * to during POST.
+	 */
+	NVDEV_SUBDEV_DEVINIT,
+	NVDEV_SUBDEV_GPIO,
+	NVDEV_SUBDEV_I2C,
+	NVDEV_SUBDEV_CLOCK,
+	NVDEV_SUBDEV_DEVINIT_LAST = NVDEV_SUBDEV_CLOCK,
+
+	/* This grouping of subdevs are initialised right after they've
+	 * been created, and are allowed to assume any subdevs in the
+	 * list above them exist and have been initialised.
+	 */
+	NVDEV_SUBDEV_MXM,
+	NVDEV_SUBDEV_MC,
+	NVDEV_SUBDEV_TIMER,
+	NVDEV_SUBDEV_FB,
+	NVDEV_SUBDEV_LTCG,
+	NVDEV_SUBDEV_IBUS,
+	NVDEV_SUBDEV_INSTMEM,
+	NVDEV_SUBDEV_VM,
+	NVDEV_SUBDEV_BAR,
+	NVDEV_SUBDEV_VOLT,
+	NVDEV_SUBDEV_THERM,
+
+	NVDEV_ENGINE_DMAOBJ,
+	NVDEV_ENGINE_FIFO,
+	NVDEV_ENGINE_SW,
+	NVDEV_ENGINE_GR,
+	NVDEV_ENGINE_MPEG,
+	NVDEV_ENGINE_ME,
+	NVDEV_ENGINE_VP,
+	NVDEV_ENGINE_CRYPT,
+	NVDEV_ENGINE_BSP,
+	NVDEV_ENGINE_PPP,
+	NVDEV_ENGINE_COPY0,
+	NVDEV_ENGINE_COPY1,
+	NVDEV_ENGINE_UNK1C1,
+	NVDEV_ENGINE_VENC,
+	NVDEV_ENGINE_DISP,
+
+	NVDEV_SUBDEV_NR,
+};
+
+struct nouveau_device {
+	struct nouveau_subdev base;
+	struct list_head head;
+
+	struct pci_dev *pdev;
+	u64 handle;
+
+	const char *cfgopt;
+	const char *dbgopt;
+	const char *name;
+	const char *cname;
+
+	enum {
+		NV_04    = 0x04,
+		NV_10    = 0x10,
+		NV_20    = 0x20,
+		NV_30    = 0x30,
+		NV_40    = 0x40,
+		NV_50    = 0x50,
+		NV_C0    = 0xc0,
+		NV_D0    = 0xd0,
+		NV_E0    = 0xe0,
+	} card_type;
+	u32 chipset;
+	u32 crystal;
+
+	struct nouveau_oclass *oclass[NVDEV_SUBDEV_NR];
+	struct nouveau_object *subdev[NVDEV_SUBDEV_NR];
+};
+
+static inline struct nouveau_device *
+nv_device(void *obj)
+{
+	struct nouveau_object *object = nv_object(obj);
+	struct nouveau_object *device = object;
+
+	if (device->engine)
+		device = device->engine;
+	if (device->parent)
+		device = device->parent;
+
+#if CONFIG_NOUVEAU_DEBUG >= NV_DBG_PARANOIA
+	if (unlikely(!nv_iclass(device, NV_SUBDEV_CLASS) ||
+		     (nv_hclass(device) & 0xff) != NVDEV_SUBDEV_DEVICE)) {
+		nv_assert("BAD CAST -> NvDevice, 0x%08x 0x%08x",
+			  nv_hclass(object), nv_hclass(device));
+	}
+#endif
+
+	return (void *)device;
+}
+
+static inline struct nouveau_subdev *
+nouveau_subdev(void *obj, int sub)
+{
+	if (nv_device(obj)->subdev[sub])
+		return nv_subdev(nv_device(obj)->subdev[sub]);
+	return NULL;
+}
+
+static inline struct nouveau_engine *
+nouveau_engine(void *obj, int sub)
+{
+	struct nouveau_subdev *subdev = nouveau_subdev(obj, sub);
+	if (subdev && nv_iclass(subdev, NV_ENGINE_CLASS))
+		return nv_engine(subdev);
+	return NULL;
+}
+
+static inline bool
+nv_device_match(struct nouveau_object *object, u16 dev, u16 ven, u16 sub)
+{
+	struct nouveau_device *device = nv_device(object);
+	return device->pdev->device == dev &&
+	       device->pdev->subsystem_vendor == ven &&
+	       device->pdev->subsystem_device == sub;
+}
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/core/engctx.h b/drivers/gpu/drm/nouveau/core/include/core/engctx.h
new file mode 100644
index 000000000000..8a947b6872eb
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/include/core/engctx.h
@@ -0,0 +1,51 @@
+#ifndef __NOUVEAU_ENGCTX_H__
+#define __NOUVEAU_ENGCTX_H__
+
+#include <core/object.h>
+#include <core/gpuobj.h>
+
+#include <subdev/vm.h>
+
+#define NV_ENGCTX_(eng,var) (NV_ENGCTX_CLASS | ((var) << 8) | (eng))
+#define NV_ENGCTX(name,var)  NV_ENGCTX_(NVDEV_ENGINE_##name, (var))
+
+struct nouveau_engctx {
+	struct nouveau_gpuobj base;
+	struct nouveau_vma vma;
+	struct list_head head;
+	unsigned long save;
+	u64 addr;
+};
+
+static inline struct nouveau_engctx *
+nv_engctx(void *obj)
+{
+#if CONFIG_NOUVEAU_DEBUG >= NV_DBG_PARANOIA
+	if (unlikely(!nv_iclass(obj, NV_ENGCTX_CLASS)))
+		nv_assert("BAD CAST -> NvEngCtx, %08x", nv_hclass(obj));
+#endif
+	return obj;
+}
+
+#define nouveau_engctx_create(p,e,c,g,s,a,f,d)                                 \
+	nouveau_engctx_create_((p), (e), (c), (g), (s), (a), (f),              \
+			       sizeof(**d), (void **)d)
+
+int  nouveau_engctx_create_(struct nouveau_object *, struct nouveau_object *,
+			    struct nouveau_oclass *, struct nouveau_object *,
+			    u32 size, u32 align, u32 flags,
+			    int length, void **data);
+void nouveau_engctx_destroy(struct nouveau_engctx *);
+int  nouveau_engctx_init(struct nouveau_engctx *);
+int  nouveau_engctx_fini(struct nouveau_engctx *, bool suspend);
+
+void _nouveau_engctx_dtor(struct nouveau_object *);
+int  _nouveau_engctx_init(struct nouveau_object *);
+int  _nouveau_engctx_fini(struct nouveau_object *, bool suspend);
+#define _nouveau_engctx_rd32 _nouveau_gpuobj_rd32
+#define _nouveau_engctx_wr32 _nouveau_gpuobj_wr32
+
+struct nouveau_object *nouveau_engctx_get(struct nouveau_engine *, u64 addr);
+void nouveau_engctx_put(struct nouveau_object *);
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/core/engine.h b/drivers/gpu/drm/nouveau/core/include/core/engine.h
new file mode 100644
index 000000000000..666d06de77ec
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/include/core/engine.h
@@ -0,0 +1,57 @@
+#ifndef __NOUVEAU_ENGINE_H__
+#define __NOUVEAU_ENGINE_H__
+
+#include <core/object.h>
+#include <core/subdev.h>
+
+#define NV_ENGINE_(eng,var) (NV_ENGINE_CLASS | ((var) << 8) | (eng))
+#define NV_ENGINE(name,var)  NV_ENGINE_(NVDEV_ENGINE_##name, (var))
+
+struct nouveau_engine {
+	struct nouveau_subdev base;
+	struct nouveau_oclass *cclass;
+	struct nouveau_oclass *sclass;
+
+	struct list_head contexts;
+	spinlock_t lock;
+
+	void (*tile_prog)(struct nouveau_engine *, int region);
+	int  (*tlb_flush)(struct nouveau_engine *);
+};
+
+static inline struct nouveau_engine *
+nv_engine(void *obj)
+{
+#if CONFIG_NOUVEAU_DEBUG >= NV_DBG_PARANOIA
+	if (unlikely(!nv_iclass(obj, NV_ENGINE_CLASS)))
+		nv_assert("BAD CAST -> NvEngine, %08x", nv_hclass(obj));
+#endif
+	return obj;
+}
+
+static inline int
+nv_engidx(struct nouveau_object *object)
+{
+	return nv_subidx(object);
+}
+
+#define nouveau_engine_create(p,e,c,d,i,f,r)                                   \
+	nouveau_engine_create_((p), (e), (c), (d), (i), (f),                   \
+			       sizeof(**r),(void **)r)
+
+#define nouveau_engine_destroy(p)                                              \
+	nouveau_subdev_destroy(&(p)->base)
+#define nouveau_engine_init(p)                                                 \
+	nouveau_subdev_init(&(p)->base)
+#define nouveau_engine_fini(p,s)                                               \
+	nouveau_subdev_fini(&(p)->base, (s))
+
+int nouveau_engine_create_(struct nouveau_object *, struct nouveau_object *,
+			   struct nouveau_oclass *, bool, const char *,
+			   const char *, int, void **);
+
+#define _nouveau_engine_dtor _nouveau_subdev_dtor
+#define _nouveau_engine_init _nouveau_subdev_init
+#define _nouveau_engine_fini _nouveau_subdev_fini
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/core/enum.h b/drivers/gpu/drm/nouveau/core/include/core/enum.h
new file mode 100644
index 000000000000..e7b1e181943b
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/include/core/enum.h
@@ -0,0 +1,23 @@
+#ifndef __NOUVEAU_ENUM_H__
+#define __NOUVEAU_ENUM_H__
+
+struct nouveau_enum {
+	u32 value;
+	const char *name;
+	const void *data;
+};
+
+const struct nouveau_enum *
+nouveau_enum_find(const struct nouveau_enum *, u32 value);
+
+void
+nouveau_enum_print(const struct nouveau_enum *en, u32 value);
+
+struct nouveau_bitfield {
+	u32 mask;
+	const char *name;
+};
+
+void nouveau_bitfield_print(const struct nouveau_bitfield *, u32 value);
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/core/gpuobj.h b/drivers/gpu/drm/nouveau/core/include/core/gpuobj.h
new file mode 100644
index 000000000000..6eaff79377ae
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/include/core/gpuobj.h
@@ -0,0 +1,71 @@
+#ifndef __NOUVEAU_GPUOBJ_H__
+#define __NOUVEAU_GPUOBJ_H__
+
+#include <core/object.h>
+#include <core/device.h>
+#include <core/parent.h>
+#include <core/mm.h>
+
+struct nouveau_vma;
+struct nouveau_vm;
+
+#define NVOBJ_FLAG_ZERO_ALLOC 0x00000001
+#define NVOBJ_FLAG_ZERO_FREE  0x00000002
+#define NVOBJ_FLAG_HEAP       0x00000004
+
+struct nouveau_gpuobj {
+	struct nouveau_object base;
+	struct nouveau_object *parent;
+	struct nouveau_mm_node *node;
+	struct nouveau_mm heap;
+
+	u32 flags;
+	u64 addr;
+	u32 size;
+};
+
+static inline struct nouveau_gpuobj *
+nv_gpuobj(void *obj)
+{
+#if CONFIG_NOUVEAU_DEBUG >= NV_DBG_PARANOIA
+	if (unlikely(!nv_iclass(obj, NV_GPUOBJ_CLASS)))
+		nv_assert("BAD CAST -> NvGpuObj, %08x", nv_hclass(obj));
+#endif
+	return obj;
+}
+
+#define nouveau_gpuobj_create(p,e,c,v,g,s,a,f,d)                               \
+	nouveau_gpuobj_create_((p), (e), (c), (v), (g), (s), (a), (f),         \
+			       sizeof(**d), (void **)d)
+#define nouveau_gpuobj_init(p) nouveau_object_init(&(p)->base)
+#define nouveau_gpuobj_fini(p,s) nouveau_object_fini(&(p)->base, (s))
+int  nouveau_gpuobj_create_(struct nouveau_object *, struct nouveau_object *,
+			    struct nouveau_oclass *, u32 pclass,
+			    struct nouveau_object *, u32 size, u32 align,
+			    u32 flags, int length, void **);
+void nouveau_gpuobj_destroy(struct nouveau_gpuobj *);
+
+int nouveau_gpuobj_new(struct nouveau_object *, struct nouveau_object *,
+		       u32 size, u32 align, u32 flags,
+		       struct nouveau_gpuobj **);
+int nouveau_gpuobj_dup(struct nouveau_object *, struct nouveau_gpuobj *,
+		       struct nouveau_gpuobj **);
+
+int nouveau_gpuobj_map(struct nouveau_gpuobj *, u32 acc, struct nouveau_vma *);
+int nouveau_gpuobj_map_vm(struct nouveau_gpuobj *, struct nouveau_vm *,
+			  u32 access, struct nouveau_vma *);
+void nouveau_gpuobj_unmap(struct nouveau_vma *);
+
+static inline void
+nouveau_gpuobj_ref(struct nouveau_gpuobj *obj, struct nouveau_gpuobj **ref)
+{
+	nouveau_object_ref(&obj->base, (struct nouveau_object **)ref);
+}
+
+void _nouveau_gpuobj_dtor(struct nouveau_object *);
+int  _nouveau_gpuobj_init(struct nouveau_object *);
+int  _nouveau_gpuobj_fini(struct nouveau_object *, bool);
+u32  _nouveau_gpuobj_rd32(struct nouveau_object *, u32);
+void _nouveau_gpuobj_wr32(struct nouveau_object *, u32, u32);
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/core/handle.h b/drivers/gpu/drm/nouveau/core/include/core/handle.h
new file mode 100644
index 000000000000..363674cdf8ab
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/include/core/handle.h
@@ -0,0 +1,31 @@
+#ifndef __NOUVEAU_HANDLE_H__
+#define __NOUVEAU_HANDLE_H__
+
+struct nouveau_handle {
+	struct nouveau_namedb *namedb;
+	struct list_head node;
+
+	struct list_head head;
+	struct list_head tree;
+	u32 name;
+	u32 priv;
+
+	struct nouveau_handle *parent;
+	struct nouveau_object *object;
+};
+
+int  nouveau_handle_create(struct nouveau_object *, u32 parent, u32 handle,
+			   struct nouveau_object *, struct nouveau_handle **);
+void nouveau_handle_destroy(struct nouveau_handle *);
+int  nouveau_handle_init(struct nouveau_handle *);
+int  nouveau_handle_fini(struct nouveau_handle *, bool suspend);
+
+struct nouveau_object *
+nouveau_handle_ref(struct nouveau_object *, u32 name);
+
+struct nouveau_handle *nouveau_handle_get_class(struct nouveau_object *, u16);
+struct nouveau_handle *nouveau_handle_get_vinst(struct nouveau_object *, u64);
+struct nouveau_handle *nouveau_handle_get_cinst(struct nouveau_object *, u32);
+void nouveau_handle_put(struct nouveau_handle *);
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/core/math.h b/drivers/gpu/drm/nouveau/core/include/core/math.h
new file mode 100644
index 000000000000..f808131c5cd8
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/include/core/math.h
@@ -0,0 +1,16 @@
+#ifndef __NOUVEAU_MATH_H__
+#define __NOUVEAU_MATH_H__
+
+static inline int
+log2i(u64 base)
+{
+	u64 temp = base >> 1;
+	int log2;
+
+	for (log2 = 0; temp; log2++, temp >>= 1) {
+	}
+
+	return (base & (base - 1)) ? log2 + 1: log2;
+}
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/core/mm.h b/drivers/gpu/drm/nouveau/core/include/core/mm.h
new file mode 100644
index 000000000000..9ee9bf4028ca
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/include/core/mm.h
@@ -0,0 +1,33 @@
+#ifndef __NOUVEAU_MM_H__
+#define __NOUVEAU_MM_H__
+
+struct nouveau_mm_node {
+	struct list_head nl_entry;
+	struct list_head fl_entry;
+	struct list_head rl_entry;
+
+	u8  type;
+	u32 offset;
+	u32 length;
+};
+
+struct nouveau_mm {
+	struct list_head nodes;
+	struct list_head free;
+
+	struct mutex mutex;
+
+	u32 block_size;
+	int heap_nodes;
+	u32 heap_size;
+};
+
+int  nouveau_mm_init(struct nouveau_mm *, u32 offset, u32 length, u32 block);
+int  nouveau_mm_fini(struct nouveau_mm *);
+int  nouveau_mm_head(struct nouveau_mm *, u8 type, u32 size_max, u32 size_min,
+		     u32 align, struct nouveau_mm_node **);
+int  nouveau_mm_tail(struct nouveau_mm *, u8 type, u32 size_max, u32 size_min,
+		     u32 align, struct nouveau_mm_node **);
+void nouveau_mm_free(struct nouveau_mm *, struct nouveau_mm_node **);
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/core/namedb.h b/drivers/gpu/drm/nouveau/core/include/core/namedb.h
new file mode 100644
index 000000000000..8897e0886085
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/include/core/namedb.h
@@ -0,0 +1,56 @@
+#ifndef __NOUVEAU_NAMEDB_H__
+#define __NOUVEAU_NAMEDB_H__
+
+#include <core/parent.h>
+
+struct nouveau_handle;
+
+struct nouveau_namedb {
+	struct nouveau_parent base;
+	rwlock_t lock;
+	struct list_head list;
+};
+
+static inline struct nouveau_namedb *
+nv_namedb(void *obj)
+{
+#if CONFIG_NOUVEAU_DEBUG >= NV_DBG_PARANOIA
+	if (unlikely(!nv_iclass(obj, NV_NAMEDB_CLASS)))
+		nv_assert("BAD CAST -> NvNameDB, %08x", nv_hclass(obj));
+#endif
+	return obj;
+}
+
+#define nouveau_namedb_create(p,e,c,v,s,m,d)                                   \
+	nouveau_namedb_create_((p), (e), (c), (v), (s), (m),                   \
+			       sizeof(**d), (void **)d)
+#define nouveau_namedb_init(p)                                                 \
+	nouveau_parent_init(&(p)->base)
+#define nouveau_namedb_fini(p,s)                                               \
+	nouveau_parent_fini(&(p)->base, (s))
+#define nouveau_namedb_destroy(p)                                              \
+	nouveau_parent_destroy(&(p)->base)
+
+int  nouveau_namedb_create_(struct nouveau_object *, struct nouveau_object *,
+			    struct nouveau_oclass *, u32 pclass,
+			    struct nouveau_oclass *, u32 engcls,
+			    int size, void **);
+
+int  _nouveau_namedb_ctor(struct nouveau_object *, struct nouveau_object *,
+			  struct nouveau_oclass *, void *, u32,
+			  struct nouveau_object **);
+#define _nouveau_namedb_dtor _nouveau_parent_dtor
+#define _nouveau_namedb_init _nouveau_parent_init
+#define _nouveau_namedb_fini _nouveau_parent_fini
+
+int  nouveau_namedb_insert(struct nouveau_namedb *, u32 name,
+			   struct nouveau_object *, struct nouveau_handle *);
+void nouveau_namedb_remove(struct nouveau_handle *);
+
+struct nouveau_handle *nouveau_namedb_get(struct nouveau_namedb *, u32);
+struct nouveau_handle *nouveau_namedb_get_class(struct nouveau_namedb *, u16);
+struct nouveau_handle *nouveau_namedb_get_vinst(struct nouveau_namedb *, u64);
+struct nouveau_handle *nouveau_namedb_get_cinst(struct nouveau_namedb *, u32);
+void nouveau_namedb_put(struct nouveau_handle *);
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/core/object.h b/drivers/gpu/drm/nouveau/core/include/core/object.h
new file mode 100644
index 000000000000..818feabbf4a0
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/include/core/object.h
@@ -0,0 +1,188 @@
+#ifndef __NOUVEAU_OBJECT_H__
+#define __NOUVEAU_OBJECT_H__
+
+#include <core/os.h>
+#include <core/printk.h>
+
+#define NV_PARENT_CLASS 0x80000000
+#define NV_NAMEDB_CLASS 0x40000000
+#define NV_CLIENT_CLASS 0x20000000
+#define NV_SUBDEV_CLASS 0x10000000
+#define NV_ENGINE_CLASS 0x08000000
+#define NV_MEMOBJ_CLASS 0x04000000
+#define NV_GPUOBJ_CLASS 0x02000000
+#define NV_ENGCTX_CLASS 0x01000000
+#define NV_OBJECT_CLASS 0x0000ffff
+
+struct nouveau_object {
+	struct nouveau_oclass *oclass;
+	struct nouveau_object *parent;
+	struct nouveau_object *engine;
+	atomic_t refcount;
+	atomic_t usecount;
+#if CONFIG_NOUVEAU_DEBUG >= NV_DBG_PARANOIA
+#define NOUVEAU_OBJECT_MAGIC 0x75ef0bad
+	struct list_head list;
+	u32 _magic;
+#endif
+};
+
+static inline struct nouveau_object *
+nv_object(void *obj)
+{
+#if CONFIG_NOUVEAU_DEBUG >= NV_DBG_PARANOIA
+	if (likely(obj)) {
+		struct nouveau_object *object = obj;
+		if (unlikely(object->_magic != NOUVEAU_OBJECT_MAGIC))
+			nv_assert("BAD CAST -> NvObject, invalid magic");
+	}
+#endif
+	return obj;
+}
+
+#define nouveau_object_create(p,e,c,s,d)                                       \
+	nouveau_object_create_((p), (e), (c), (s), sizeof(**d), (void **)d)
+int  nouveau_object_create_(struct nouveau_object *, struct nouveau_object *,
+			    struct nouveau_oclass *, u32, int size, void **);
+void nouveau_object_destroy(struct nouveau_object *);
+int  nouveau_object_init(struct nouveau_object *);
+int  nouveau_object_fini(struct nouveau_object *, bool suspend);
+
+extern struct nouveau_ofuncs nouveau_object_ofuncs;
+
+struct nouveau_oclass {
+	u32 handle;
+	struct nouveau_ofuncs *ofuncs;
+	struct nouveau_omthds *omthds;
+};
+
+#define nv_oclass(o)    nv_object(o)->oclass
+#define nv_hclass(o)    nv_oclass(o)->handle
+#define nv_iclass(o,i) (nv_hclass(o) & (i))
+#define nv_mclass(o)    nv_iclass(o, NV_OBJECT_CLASS)
+
+static inline struct nouveau_object *
+nv_pclass(struct nouveau_object *parent, u32 oclass)
+{
+	while (parent && !nv_iclass(parent, oclass))
+		parent = parent->parent;
+	return parent;
+}
+
+struct nouveau_omthds {
+	u32 method;
+	int (*call)(struct nouveau_object *, u32, void *, u32);
+};
+
+struct nouveau_ofuncs {
+	int  (*ctor)(struct nouveau_object *, struct nouveau_object *,
+		     struct nouveau_oclass *, void *data, u32 size,
+		     struct nouveau_object **);
+	void (*dtor)(struct nouveau_object *);
+	int  (*init)(struct nouveau_object *);
+	int  (*fini)(struct nouveau_object *, bool suspend);
+	u8   (*rd08)(struct nouveau_object *, u32 offset);
+	u16  (*rd16)(struct nouveau_object *, u32 offset);
+	u32  (*rd32)(struct nouveau_object *, u32 offset);
+	void (*wr08)(struct nouveau_object *, u32 offset, u8 data);
+	void (*wr16)(struct nouveau_object *, u32 offset, u16 data);
+	void (*wr32)(struct nouveau_object *, u32 offset, u32 data);
+};
+
+static inline struct nouveau_ofuncs *
+nv_ofuncs(void *obj)
+{
+	return nv_oclass(obj)->ofuncs;
+}
+
+int  nouveau_object_ctor(struct nouveau_object *, struct nouveau_object *,
+			 struct nouveau_oclass *, void *, u32,
+			 struct nouveau_object **);
+void nouveau_object_ref(struct nouveau_object *, struct nouveau_object **);
+int nouveau_object_inc(struct nouveau_object *);
+int nouveau_object_dec(struct nouveau_object *, bool suspend);
+
+int nouveau_object_new(struct nouveau_object *, u32 parent, u32 handle,
+		       u16 oclass, void *data, u32 size,
+		       struct nouveau_object **);
+int nouveau_object_del(struct nouveau_object *, u32 parent, u32 handle);
+void nouveau_object_debug(void);
+
+static inline int
+nv_call(void *obj, u32 mthd, u32 data)
+{
+	struct nouveau_omthds *method = nv_oclass(obj)->omthds;
+
+	while (method && method->call) {
+		if (method->method == mthd)
+			return method->call(obj, mthd, &data, sizeof(data));
+		method++;
+	}
+
+	return -EINVAL;
+}
+
+static inline u8
+nv_ro08(void *obj, u32 addr)
+{
+	u8 data = nv_ofuncs(obj)->rd08(obj, addr);
+	nv_spam(obj, "nv_ro08 0x%08x 0x%02x\n", addr, data);
+	return data;
+}
+
+static inline u16
+nv_ro16(void *obj, u32 addr)
+{
+	u16 data = nv_ofuncs(obj)->rd16(obj, addr);
+	nv_spam(obj, "nv_ro16 0x%08x 0x%04x\n", addr, data);
+	return data;
+}
+
+static inline u32
+nv_ro32(void *obj, u32 addr)
+{
+	u32 data = nv_ofuncs(obj)->rd32(obj, addr);
+	nv_spam(obj, "nv_ro32 0x%08x 0x%08x\n", addr, data);
+	return data;
+}
+
+static inline void
+nv_wo08(void *obj, u32 addr, u8 data)
+{
+	nv_spam(obj, "nv_wo08 0x%08x 0x%02x\n", addr, data);
+	nv_ofuncs(obj)->wr08(obj, addr, data);
+}
+
+static inline void
+nv_wo16(void *obj, u32 addr, u16 data)
+{
+	nv_spam(obj, "nv_wo16 0x%08x 0x%04x\n", addr, data);
+	nv_ofuncs(obj)->wr16(obj, addr, data);
+}
+
+static inline void
+nv_wo32(void *obj, u32 addr, u32 data)
+{
+	nv_spam(obj, "nv_wo32 0x%08x 0x%08x\n", addr, data);
+	nv_ofuncs(obj)->wr32(obj, addr, data);
+}
+
+static inline u32
+nv_mo32(void *obj, u32 addr, u32 mask, u32 data)
+{
+	u32 temp = nv_ro32(obj, addr);
+	nv_wo32(obj, addr, (temp & ~mask) | data);
+	return temp;
+}
+
+static inline bool
+nv_strncmp(void *obj, u32 addr, u32 len, const char *str)
+{
+	while (len--) {
+		if (nv_ro08(obj, addr++) != *(str++))
+			return false;
+	}
+	return true;
+}
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/core/option.h b/drivers/gpu/drm/nouveau/core/include/core/option.h
new file mode 100644
index 000000000000..27074957fd21
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/include/core/option.h
@@ -0,0 +1,11 @@
+#ifndef __NOUVEAU_OPTION_H__
+#define __NOUVEAU_OPTION_H__
+
+#include <core/os.h>
+
+const char *nouveau_stropt(const char *optstr, const char *opt, int *len);
+bool nouveau_boolopt(const char *optstr, const char *opt, bool value);
+
+int nouveau_dbgopt(const char *optstr, const char *sub);
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/core/parent.h b/drivers/gpu/drm/nouveau/core/include/core/parent.h
new file mode 100644
index 000000000000..d3aa251a5eb6
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/include/core/parent.h
@@ -0,0 +1,64 @@
+#ifndef __NOUVEAU_PARENT_H__
+#define __NOUVEAU_PARENT_H__
+
+#include <core/device.h>
+#include <core/object.h>
+
+struct nouveau_sclass {
+	struct nouveau_sclass *sclass;
+	struct nouveau_engine *engine;
+	struct nouveau_oclass *oclass;
+};
+
+struct nouveau_parent {
+	struct nouveau_object base;
+
+	struct nouveau_sclass *sclass;
+	u32 engine;
+
+	int  (*context_attach)(struct nouveau_object *,
+			       struct nouveau_object *);
+	int  (*context_detach)(struct nouveau_object *, bool suspend,
+			       struct nouveau_object *);
+
+	int  (*object_attach)(struct nouveau_object *parent,
+			      struct nouveau_object *object, u32 name);
+	void (*object_detach)(struct nouveau_object *parent, int cookie);
+};
+
+static inline struct nouveau_parent *
+nv_parent(void *obj)
+{
+#if CONFIG_NOUVEAU_DEBUG >= NV_DBG_PARANOIA
+	if (unlikely(!(nv_iclass(obj, NV_PARENT_CLASS))))
+		nv_assert("BAD CAST -> NvParent, %08x", nv_hclass(obj));
+#endif
+	return obj;
+}
+
+#define nouveau_parent_create(p,e,c,v,s,m,d)                                   \
+	nouveau_parent_create_((p), (e), (c), (v), (s), (m),                   \
+			       sizeof(**d), (void **)d)
+#define nouveau_parent_init(p)                                                 \
+	nouveau_object_init(&(p)->base)
+#define nouveau_parent_fini(p,s)                                               \
+	nouveau_object_fini(&(p)->base, (s))
+
+int  nouveau_parent_create_(struct nouveau_object *, struct nouveau_object *,
+			    struct nouveau_oclass *, u32 pclass,
+			    struct nouveau_oclass *, u64 engcls,
+			    int size, void **);
+void nouveau_parent_destroy(struct nouveau_parent *);
+
+int  _nouveau_parent_ctor(struct nouveau_object *, struct nouveau_object *,
+			  struct nouveau_oclass *, void *, u32,
+			  struct nouveau_object **);
+void _nouveau_parent_dtor(struct nouveau_object *);
+#define _nouveau_parent_init _nouveau_object_init
+#define _nouveau_parent_fini _nouveau_object_fini
+
+int nouveau_parent_sclass(struct nouveau_object *, u16 handle,
+			  struct nouveau_object **pengine,
+			  struct nouveau_oclass **poclass);
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/core/printk.h b/drivers/gpu/drm/nouveau/core/include/core/printk.h
new file mode 100644
index 000000000000..1d629664f32d
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/include/core/printk.h
@@ -0,0 +1,39 @@
+#ifndef __NOUVEAU_PRINTK_H__
+#define __NOUVEAU_PRINTK_H__
+
+#include <core/os.h>
+#include <core/debug.h>
+
+struct nouveau_object;
+
+#define NV_PRINTK_FATAL    KERN_CRIT
+#define NV_PRINTK_ERROR    KERN_ERR
+#define NV_PRINTK_WARN     KERN_WARNING
+#define NV_PRINTK_INFO     KERN_INFO
+#define NV_PRINTK_DEBUG    KERN_DEBUG
+#define NV_PRINTK_PARANOIA KERN_DEBUG
+#define NV_PRINTK_TRACE    KERN_DEBUG
+#define NV_PRINTK_SPAM     KERN_DEBUG
+
+void nv_printk_(struct nouveau_object *, const char *, int, const char *, ...);
+
+#define nv_printk(o,l,f,a...) do {                                             \
+	if (NV_DBG_##l <= CONFIG_NOUVEAU_DEBUG)                                \
+		nv_printk_(nv_object(o), NV_PRINTK_##l, NV_DBG_##l, f, ##a);   \
+} while(0)
+
+#define nv_fatal(o,f,a...) nv_printk((o), FATAL, f, ##a)
+#define nv_error(o,f,a...) nv_printk((o), ERROR, f, ##a)
+#define nv_warn(o,f,a...) nv_printk((o), WARN, f, ##a)
+#define nv_info(o,f,a...) nv_printk((o), INFO, f, ##a)
+#define nv_debug(o,f,a...) nv_printk((o), DEBUG, f, ##a)
+#define nv_trace(o,f,a...) nv_printk((o), TRACE, f, ##a)
+#define nv_spam(o,f,a...) nv_printk((o), SPAM, f, ##a)
+
+#define nv_assert(f,a...) do {                                                 \
+	if (NV_DBG_FATAL <= CONFIG_NOUVEAU_DEBUG)                              \
+		nv_printk_(NULL, NV_PRINTK_FATAL, NV_DBG_FATAL, f "\n", ##a);  \
+	BUG_ON(1);                                                             \
+} while(0)
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/core/ramht.h b/drivers/gpu/drm/nouveau/core/include/core/ramht.h
new file mode 100644
index 000000000000..47e4cacbca37
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/include/core/ramht.h
@@ -0,0 +1,23 @@
+#ifndef __NOUVEAU_RAMHT_H__
+#define __NOUVEAU_RAMHT_H__
+
+#include <core/gpuobj.h>
+
+struct nouveau_ramht {
+	struct nouveau_gpuobj base;
+	int bits;
+};
+
+int  nouveau_ramht_insert(struct nouveau_ramht *, int chid,
+			  u32 handle, u32 context);
+void nouveau_ramht_remove(struct nouveau_ramht *, int cookie);
+int  nouveau_ramht_new(struct nouveau_object *, struct nouveau_object *,
+		       u32 size, u32 align, struct nouveau_ramht **);
+
+static inline void
+nouveau_ramht_ref(struct nouveau_ramht *obj, struct nouveau_ramht **ref)
+{
+	nouveau_gpuobj_ref(&obj->base, (struct nouveau_gpuobj **)ref);
+}
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/core/subdev.h b/drivers/gpu/drm/nouveau/core/include/core/subdev.h
new file mode 100644
index 000000000000..e9632e931616
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/include/core/subdev.h
@@ -0,0 +1,118 @@
+#ifndef __NOUVEAU_SUBDEV_H__
+#define __NOUVEAU_SUBDEV_H__
+
+#include <core/object.h>
+
+#define NV_SUBDEV_(sub,var) (NV_SUBDEV_CLASS | ((var) << 8) | (sub))
+#define NV_SUBDEV(name,var)  NV_SUBDEV_(NVDEV_SUBDEV_##name, (var))
+
+struct nouveau_subdev {
+	struct nouveau_object base;
+	struct mutex mutex;
+	const char *name;
+	void __iomem *mmio;
+	u32 debug;
+	u32 unit;
+
+	void (*intr)(struct nouveau_subdev *);
+};
+
+static inline struct nouveau_subdev *
+nv_subdev(void *obj)
+{
+#if CONFIG_NOUVEAU_DEBUG >= NV_DBG_PARANOIA
+	if (unlikely(!nv_iclass(obj, NV_SUBDEV_CLASS)))
+		nv_assert("BAD CAST -> NvSubDev, %08x", nv_hclass(obj));
+#endif
+	return obj;
+}
+
+static inline int
+nv_subidx(struct nouveau_object *object)
+{
+	return nv_hclass(nv_subdev(object)) & 0xff;
+}
+
+#define nouveau_subdev_create(p,e,o,v,s,f,d)                                   \
+	nouveau_subdev_create_((p), (e), (o), (v), (s), (f),                   \
+			       sizeof(**d),(void **)d)
+
+int  nouveau_subdev_create_(struct nouveau_object *, struct nouveau_object *,
+			    struct nouveau_oclass *, u32 pclass,
+			    const char *sname, const char *fname,
+			    int size, void **);
+void nouveau_subdev_destroy(struct nouveau_subdev *);
+int  nouveau_subdev_init(struct nouveau_subdev *);
+int  nouveau_subdev_fini(struct nouveau_subdev *, bool suspend);
+void nouveau_subdev_reset(struct nouveau_object *);
+
+void _nouveau_subdev_dtor(struct nouveau_object *);
+int  _nouveau_subdev_init(struct nouveau_object *);
+int  _nouveau_subdev_fini(struct nouveau_object *, bool suspend);
+
+#define s_printk(s,l,f,a...) do {                                              \
+	if ((s)->debug >= OS_DBG_##l) {                                        \
+		nv_printk((s)->base.parent, (s)->name, l, f, ##a);             \
+	}                                                                      \
+} while(0)
+
+static inline u8
+nv_rd08(void *obj, u32 addr)
+{
+	struct nouveau_subdev *subdev = nv_subdev(obj);
+	u8 data = ioread8(subdev->mmio + addr);
+	nv_spam(subdev, "nv_rd08 0x%06x 0x%02x\n", addr, data);
+	return data;
+}
+
+static inline u16
+nv_rd16(void *obj, u32 addr)
+{
+	struct nouveau_subdev *subdev = nv_subdev(obj);
+	u16 data = ioread16_native(subdev->mmio + addr);
+	nv_spam(subdev, "nv_rd16 0x%06x 0x%04x\n", addr, data);
+	return data;
+}
+
+static inline u32
+nv_rd32(void *obj, u32 addr)
+{
+	struct nouveau_subdev *subdev = nv_subdev(obj);
+	u32 data = ioread32_native(subdev->mmio + addr);
+	nv_spam(subdev, "nv_rd32 0x%06x 0x%08x\n", addr, data);
+	return data;
+}
+
+static inline void
+nv_wr08(void *obj, u32 addr, u8 data)
+{
+	struct nouveau_subdev *subdev = nv_subdev(obj);
+	nv_spam(subdev, "nv_wr08 0x%06x 0x%02x\n", addr, data);
+	iowrite8(data, subdev->mmio + addr);
+}
+
+static inline void
+nv_wr16(void *obj, u32 addr, u16 data)
+{
+	struct nouveau_subdev *subdev = nv_subdev(obj);
+	nv_spam(subdev, "nv_wr16 0x%06x 0x%04x\n", addr, data);
+	iowrite16_native(data, subdev->mmio + addr);
+}
+
+static inline void
+nv_wr32(void *obj, u32 addr, u32 data)
+{
+	struct nouveau_subdev *subdev = nv_subdev(obj);
+	nv_spam(subdev, "nv_wr32 0x%06x 0x%08x\n", addr, data);
+	iowrite32_native(data, subdev->mmio + addr);
+}
+
+static inline u32
+nv_mask(void *obj, u32 addr, u32 mask, u32 data)
+{
+	u32 temp = nv_rd32(obj, addr);
+	nv_wr32(obj, addr, (temp & ~mask) | data);
+	return temp;
+}
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/engine/bsp.h b/drivers/gpu/drm/nouveau/core/include/engine/bsp.h
new file mode 100644
index 000000000000..75d1ed5f85fd
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/include/engine/bsp.h
@@ -0,0 +1,45 @@
+#ifndef __NOUVEAU_BSP_H__
+#define __NOUVEAU_BSP_H__
+
+#include <core/engine.h>
+#include <core/engctx.h>
+
+struct nouveau_bsp_chan {
+	struct nouveau_engctx base;
+};
+
+#define nouveau_bsp_context_create(p,e,c,g,s,a,f,d)                            \
+	nouveau_engctx_create((p), (e), (c), (g), (s), (a), (f), (d))
+#define nouveau_bsp_context_destroy(d)                                         \
+	nouveau_engctx_destroy(&(d)->base)
+#define nouveau_bsp_context_init(d)                                            \
+	nouveau_engctx_init(&(d)->base)
+#define nouveau_bsp_context_fini(d,s)                                          \
+	nouveau_engctx_fini(&(d)->base, (s))
+
+#define _nouveau_bsp_context_dtor _nouveau_engctx_dtor
+#define _nouveau_bsp_context_init _nouveau_engctx_init
+#define _nouveau_bsp_context_fini _nouveau_engctx_fini
+#define _nouveau_bsp_context_rd32 _nouveau_engctx_rd32
+#define _nouveau_bsp_context_wr32 _nouveau_engctx_wr32
+
+struct nouveau_bsp {
+	struct nouveau_engine base;
+};
+
+#define nouveau_bsp_create(p,e,c,d)                                            \
+	nouveau_engine_create((p), (e), (c), true, "PBSP", "bsp", (d))
+#define nouveau_bsp_destroy(d)                                                 \
+	nouveau_engine_destroy(&(d)->base)
+#define nouveau_bsp_init(d)                                                    \
+	nouveau_engine_init(&(d)->base)
+#define nouveau_bsp_fini(d,s)                                                  \
+	nouveau_engine_fini(&(d)->base, (s))
+
+#define _nouveau_bsp_dtor _nouveau_engine_dtor
+#define _nouveau_bsp_init _nouveau_engine_init
+#define _nouveau_bsp_fini _nouveau_engine_fini
+
+extern struct nouveau_oclass nv84_bsp_oclass;
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/engine/copy.h b/drivers/gpu/drm/nouveau/core/include/engine/copy.h
new file mode 100644
index 000000000000..70b9d8c5fcf5
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/include/engine/copy.h
@@ -0,0 +1,49 @@
+#ifndef __NOUVEAU_COPY_H__
+#define __NOUVEAU_COPY_H__
+
+#include <core/engine.h>
+#include <core/engctx.h>
+
+struct nouveau_copy_chan {
+	struct nouveau_engctx base;
+};
+
+#define nouveau_copy_context_create(p,e,c,g,s,a,f,d)                           \
+	nouveau_engctx_create((p), (e), (c), (g), (s), (a), (f), (d))
+#define nouveau_copy_context_destroy(d)                                        \
+	nouveau_engctx_destroy(&(d)->base)
+#define nouveau_copy_context_init(d)                                           \
+	nouveau_engctx_init(&(d)->base)
+#define nouveau_copy_context_fini(d,s)                                         \
+	nouveau_engctx_fini(&(d)->base, (s))
+
+#define _nouveau_copy_context_dtor _nouveau_engctx_dtor
+#define _nouveau_copy_context_init _nouveau_engctx_init
+#define _nouveau_copy_context_fini _nouveau_engctx_fini
+#define _nouveau_copy_context_rd32 _nouveau_engctx_rd32
+#define _nouveau_copy_context_wr32 _nouveau_engctx_wr32
+
+struct nouveau_copy {
+	struct nouveau_engine base;
+};
+
+#define nouveau_copy_create(p,e,c,y,i,d)                                       \
+	nouveau_engine_create((p), (e), (c), (y), "PCE"#i, "copy"#i, (d))
+#define nouveau_copy_destroy(d)                                                \
+	nouveau_engine_destroy(&(d)->base)
+#define nouveau_copy_init(d)                                                   \
+	nouveau_engine_init(&(d)->base)
+#define nouveau_copy_fini(d,s)                                                 \
+	nouveau_engine_fini(&(d)->base, (s))
+
+#define _nouveau_copy_dtor _nouveau_engine_dtor
+#define _nouveau_copy_init _nouveau_engine_init
+#define _nouveau_copy_fini _nouveau_engine_fini
+
+extern struct nouveau_oclass nva3_copy_oclass;
+extern struct nouveau_oclass nvc0_copy0_oclass;
+extern struct nouveau_oclass nvc0_copy1_oclass;
+extern struct nouveau_oclass nve0_copy0_oclass;
+extern struct nouveau_oclass nve0_copy1_oclass;
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/engine/crypt.h b/drivers/gpu/drm/nouveau/core/include/engine/crypt.h
new file mode 100644
index 000000000000..e3674743baaa
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/include/engine/crypt.h
@@ -0,0 +1,46 @@
+#ifndef __NOUVEAU_CRYPT_H__
+#define __NOUVEAU_CRYPT_H__
+
+#include <core/engine.h>
+#include <core/engctx.h>
+
+struct nouveau_crypt_chan {
+	struct nouveau_engctx base;
+};
+
+#define nouveau_crypt_context_create(p,e,c,g,s,a,f,d)                          \
+	nouveau_engctx_create((p), (e), (c), (g), (s), (a), (f), (d))
+#define nouveau_crypt_context_destroy(d)                                       \
+	nouveau_engctx_destroy(&(d)->base)
+#define nouveau_crypt_context_init(d)                                          \
+	nouveau_engctx_init(&(d)->base)
+#define nouveau_crypt_context_fini(d,s)                                        \
+	nouveau_engctx_fini(&(d)->base, (s))
+
+#define _nouveau_crypt_context_dtor _nouveau_engctx_dtor
+#define _nouveau_crypt_context_init _nouveau_engctx_init
+#define _nouveau_crypt_context_fini _nouveau_engctx_fini
+#define _nouveau_crypt_context_rd32 _nouveau_engctx_rd32
+#define _nouveau_crypt_context_wr32 _nouveau_engctx_wr32
+
+struct nouveau_crypt {
+	struct nouveau_engine base;
+};
+
+#define nouveau_crypt_create(p,e,c,d)                                          \
+	nouveau_engine_create((p), (e), (c), true, "PCRYPT", "crypt", (d))
+#define nouveau_crypt_destroy(d)                                               \
+	nouveau_engine_destroy(&(d)->base)
+#define nouveau_crypt_init(d)                                                  \
+	nouveau_engine_init(&(d)->base)
+#define nouveau_crypt_fini(d,s)                                                \
+	nouveau_engine_fini(&(d)->base, (s))
+
+#define _nouveau_crypt_dtor _nouveau_engine_dtor
+#define _nouveau_crypt_init _nouveau_engine_init
+#define _nouveau_crypt_fini _nouveau_engine_fini
+
+extern struct nouveau_oclass nv84_crypt_oclass;
+extern struct nouveau_oclass nv98_crypt_oclass;
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/engine/disp.h b/drivers/gpu/drm/nouveau/core/include/engine/disp.h
new file mode 100644
index 000000000000..38ec1252cbaa
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/include/engine/disp.h
@@ -0,0 +1,44 @@
+#ifndef __NOUVEAU_DISP_H__
+#define __NOUVEAU_DISP_H__
+
+#include <core/object.h>
+#include <core/engine.h>
+#include <core/device.h>
+
+struct nouveau_disp {
+	struct nouveau_engine base;
+
+	struct {
+		struct list_head list;
+		spinlock_t lock;
+		void (*notify)(void *, int);
+		void (*get)(void *, int);
+		void (*put)(void *, int);
+		void *data;
+	} vblank;
+};
+
+static inline struct nouveau_disp *
+nouveau_disp(void *obj)
+{
+	return (void *)nv_device(obj)->subdev[NVDEV_ENGINE_DISP];
+}
+
+#define nouveau_disp_create(p,e,c,i,x,d)                                       \
+	nouveau_engine_create((p), (e), (c), true, (i), (x), (d))
+#define nouveau_disp_destroy(d)                                                \
+	nouveau_engine_destroy(&(d)->base)
+#define nouveau_disp_init(d)                                                   \
+	nouveau_engine_init(&(d)->base)
+#define nouveau_disp_fini(d,s)                                                 \
+	nouveau_engine_fini(&(d)->base, (s))
+
+#define _nouveau_disp_dtor _nouveau_engine_dtor
+#define _nouveau_disp_init _nouveau_engine_init
+#define _nouveau_disp_fini _nouveau_engine_fini
+
+extern struct nouveau_oclass nv04_disp_oclass;
+extern struct nouveau_oclass nv50_disp_oclass;
+extern struct nouveau_oclass nvd0_disp_oclass;
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/engine/dmaobj.h b/drivers/gpu/drm/nouveau/core/include/engine/dmaobj.h
new file mode 100644
index 000000000000..700ccbb1941f
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/include/engine/dmaobj.h
@@ -0,0 +1,57 @@
+#ifndef __NOUVEAU_DMAOBJ_H__
+#define __NOUVEAU_DMAOBJ_H__
+
+#include <core/object.h>
+#include <core/engine.h>
+
+struct nouveau_gpuobj;
+
+struct nouveau_dmaobj {
+	struct nouveau_object base;
+	u32 target;
+	u32 access;
+	u64 start;
+	u64 limit;
+};
+
+#define nouveau_dmaobj_create(p,e,c,a,s,d)                                     \
+	nouveau_dmaobj_create_((p), (e), (c), (a), (s), sizeof(**d), (void **)d)
+#define nouveau_dmaobj_destroy(p)                                              \
+	nouveau_object_destroy(&(p)->base)
+#define nouveau_dmaobj_init(p)                                                 \
+	nouveau_object_init(&(p)->base)
+#define nouveau_dmaobj_fini(p,s)                                               \
+	nouveau_object_fini(&(p)->base, (s))
+
+int nouveau_dmaobj_create_(struct nouveau_object *, struct nouveau_object *,
+			   struct nouveau_oclass *, void *data, u32 size,
+			   int length, void **);
+
+#define _nouveau_dmaobj_dtor nouveau_object_destroy
+#define _nouveau_dmaobj_init nouveau_object_init
+#define _nouveau_dmaobj_fini nouveau_object_fini
+
+struct nouveau_dmaeng {
+	struct nouveau_engine base;
+	int (*bind)(struct nouveau_dmaeng *, struct nouveau_object *parent,
+		    struct nouveau_dmaobj *, struct nouveau_gpuobj **);
+};
+
+#define nouveau_dmaeng_create(p,e,c,d)                                         \
+	nouveau_engine_create((p), (e), (c), true, "DMAOBJ", "dmaobj", (d))
+#define nouveau_dmaeng_destroy(p)                                              \
+	nouveau_engine_destroy(&(p)->base)
+#define nouveau_dmaeng_init(p)                                                 \
+	nouveau_engine_init(&(p)->base)
+#define nouveau_dmaeng_fini(p,s)                                               \
+	nouveau_engine_fini(&(p)->base, (s))
+
+#define _nouveau_dmaeng_dtor _nouveau_engine_dtor
+#define _nouveau_dmaeng_init _nouveau_engine_init
+#define _nouveau_dmaeng_fini _nouveau_engine_fini
+
+extern struct nouveau_oclass nv04_dmaeng_oclass;
+extern struct nouveau_oclass nv50_dmaeng_oclass;
+extern struct nouveau_oclass nvc0_dmaeng_oclass;
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/engine/fifo.h b/drivers/gpu/drm/nouveau/core/include/engine/fifo.h
new file mode 100644
index 000000000000..d67fed1e3970
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/include/engine/fifo.h
@@ -0,0 +1,111 @@
+#ifndef __NOUVEAU_FIFO_H__
+#define __NOUVEAU_FIFO_H__
+
+#include <core/namedb.h>
+#include <core/gpuobj.h>
+#include <core/engine.h>
+
+struct nouveau_fifo_chan {
+	struct nouveau_namedb base;
+	struct nouveau_dmaobj *pushdma;
+	struct nouveau_gpuobj *pushgpu;
+	void __iomem *user;
+	u32 size;
+	u16 chid;
+	atomic_t refcnt; /* NV04_NVSW_SET_REF */
+};
+
+static inline struct nouveau_fifo_chan *
+nouveau_fifo_chan(void *obj)
+{
+	return (void *)nv_namedb(obj);
+}
+
+#define nouveau_fifo_channel_create(p,e,c,b,a,s,n,m,d)                         \
+	nouveau_fifo_channel_create_((p), (e), (c), (b), (a), (s), (n),        \
+				     (m), sizeof(**d), (void **)d)
+#define nouveau_fifo_channel_init(p)                                           \
+	nouveau_namedb_init(&(p)->base)
+#define nouveau_fifo_channel_fini(p,s)                                         \
+	nouveau_namedb_fini(&(p)->base, (s))
+
+int  nouveau_fifo_channel_create_(struct nouveau_object *,
+				  struct nouveau_object *,
+				  struct nouveau_oclass *,
+				  int bar, u32 addr, u32 size, u32 push,
+				  u32 engmask, int len, void **);
+void nouveau_fifo_channel_destroy(struct nouveau_fifo_chan *);
+
+#define _nouveau_fifo_channel_init _nouveau_namedb_init
+#define _nouveau_fifo_channel_fini _nouveau_namedb_fini
+
+void _nouveau_fifo_channel_dtor(struct nouveau_object *);
+u32  _nouveau_fifo_channel_rd32(struct nouveau_object *, u32);
+void _nouveau_fifo_channel_wr32(struct nouveau_object *, u32, u32);
+
+struct nouveau_fifo_base {
+	struct nouveau_gpuobj base;
+};
+
+#define nouveau_fifo_context_create(p,e,c,g,s,a,f,d)                           \
+	nouveau_gpuobj_create((p), (e), (c), 0, (g), (s), (a), (f), (d))
+#define nouveau_fifo_context_destroy(p)                                        \
+	nouveau_gpuobj_destroy(&(p)->base)
+#define nouveau_fifo_context_init(p)                                           \
+	nouveau_gpuobj_init(&(p)->base)
+#define nouveau_fifo_context_fini(p,s)                                         \
+	nouveau_gpuobj_fini(&(p)->base, (s))
+
+#define _nouveau_fifo_context_dtor _nouveau_gpuobj_dtor
+#define _nouveau_fifo_context_init _nouveau_gpuobj_init
+#define _nouveau_fifo_context_fini _nouveau_gpuobj_fini
+#define _nouveau_fifo_context_rd32 _nouveau_gpuobj_rd32
+#define _nouveau_fifo_context_wr32 _nouveau_gpuobj_wr32
+
+struct nouveau_fifo {
+	struct nouveau_engine base;
+
+	struct nouveau_object **channel;
+	spinlock_t lock;
+	u16 min;
+	u16 max;
+
+	int  (*chid)(struct nouveau_fifo *, struct nouveau_object *);
+	void (*pause)(struct nouveau_fifo *, unsigned long *);
+	void (*start)(struct nouveau_fifo *, unsigned long *);
+};
+
+static inline struct nouveau_fifo *
+nouveau_fifo(void *obj)
+{
+	return (void *)nv_device(obj)->subdev[NVDEV_ENGINE_FIFO];
+}
+
+#define nouveau_fifo_create(o,e,c,fc,lc,d)                                     \
+	nouveau_fifo_create_((o), (e), (c), (fc), (lc), sizeof(**d), (void **)d)
+#define nouveau_fifo_init(p)                                                   \
+	nouveau_engine_init(&(p)->base)
+#define nouveau_fifo_fini(p,s)                                                 \
+	nouveau_engine_fini(&(p)->base, (s))
+
+int nouveau_fifo_create_(struct nouveau_object *, struct nouveau_object *,
+			 struct nouveau_oclass *, int min, int max,
+			 int size, void **);
+void nouveau_fifo_destroy(struct nouveau_fifo *);
+
+#define _nouveau_fifo_init _nouveau_engine_init
+#define _nouveau_fifo_fini _nouveau_engine_fini
+
+extern struct nouveau_oclass nv04_fifo_oclass;
+extern struct nouveau_oclass nv10_fifo_oclass;
+extern struct nouveau_oclass nv17_fifo_oclass;
+extern struct nouveau_oclass nv40_fifo_oclass;
+extern struct nouveau_oclass nv50_fifo_oclass;
+extern struct nouveau_oclass nv84_fifo_oclass;
+extern struct nouveau_oclass nvc0_fifo_oclass;
+extern struct nouveau_oclass nve0_fifo_oclass;
+
+void nv04_fifo_intr(struct nouveau_subdev *);
+int  nv04_fifo_context_attach(struct nouveau_object *, struct nouveau_object *);
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/engine/graph.h b/drivers/gpu/drm/nouveau/core/include/engine/graph.h
new file mode 100644
index 000000000000..6943b40d0817
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/include/engine/graph.h
@@ -0,0 +1,72 @@
+#ifndef __NOUVEAU_GRAPH_H__
+#define __NOUVEAU_GRAPH_H__
+
+#include <core/engine.h>
+#include <core/engctx.h>
+#include <core/enum.h>
+
+struct nouveau_graph_chan {
+	struct nouveau_engctx base;
+};
+
+#define nouveau_graph_context_create(p,e,c,g,s,a,f,d)                          \
+	nouveau_engctx_create((p), (e), (c), (g), (s), (a), (f), (d))
+#define nouveau_graph_context_destroy(d)                                       \
+	nouveau_engctx_destroy(&(d)->base)
+#define nouveau_graph_context_init(d)                                          \
+	nouveau_engctx_init(&(d)->base)
+#define nouveau_graph_context_fini(d,s)                                        \
+	nouveau_engctx_fini(&(d)->base, (s))
+
+#define _nouveau_graph_context_dtor _nouveau_engctx_dtor
+#define _nouveau_graph_context_init _nouveau_engctx_init
+#define _nouveau_graph_context_fini _nouveau_engctx_fini
+#define _nouveau_graph_context_rd32 _nouveau_engctx_rd32
+#define _nouveau_graph_context_wr32 _nouveau_engctx_wr32
+
+struct nouveau_graph {
+	struct nouveau_engine base;
+};
+
+static inline struct nouveau_graph *
+nouveau_graph(void *obj)
+{
+	return (void *)nv_device(obj)->subdev[NVDEV_ENGINE_GR];
+}
+
+#define nouveau_graph_create(p,e,c,y,d)                                        \
+	nouveau_engine_create((p), (e), (c), (y), "PGRAPH", "graphics", (d))
+#define nouveau_graph_destroy(d)                                               \
+	nouveau_engine_destroy(&(d)->base)
+#define nouveau_graph_init(d)                                                  \
+	nouveau_engine_init(&(d)->base)
+#define nouveau_graph_fini(d,s)                                                \
+	nouveau_engine_fini(&(d)->base, (s))
+
+#define _nouveau_graph_dtor _nouveau_engine_dtor
+#define _nouveau_graph_init _nouveau_engine_init
+#define _nouveau_graph_fini _nouveau_engine_fini
+
+extern struct nouveau_oclass nv04_graph_oclass;
+extern struct nouveau_oclass nv10_graph_oclass;
+extern struct nouveau_oclass nv20_graph_oclass;
+extern struct nouveau_oclass nv25_graph_oclass;
+extern struct nouveau_oclass nv2a_graph_oclass;
+extern struct nouveau_oclass nv30_graph_oclass;
+extern struct nouveau_oclass nv34_graph_oclass;
+extern struct nouveau_oclass nv35_graph_oclass;
+extern struct nouveau_oclass nv40_graph_oclass;
+extern struct nouveau_oclass nv50_graph_oclass;
+extern struct nouveau_oclass nvc0_graph_oclass;
+extern struct nouveau_oclass nve0_graph_oclass;
+
+extern const struct nouveau_bitfield nv04_graph_nsource[];
+extern struct nouveau_ofuncs nv04_graph_ofuncs;
+bool nv04_graph_idle(void *obj);
+
+extern const struct nouveau_bitfield nv10_graph_intr_name[];
+extern const struct nouveau_bitfield nv10_graph_nstatus[];
+
+extern const struct nouveau_enum nv50_data_error_names[];
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/engine/mpeg.h b/drivers/gpu/drm/nouveau/core/include/engine/mpeg.h
new file mode 100644
index 000000000000..bbf0d4a5bbd7
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/include/engine/mpeg.h
@@ -0,0 +1,61 @@
+#ifndef __NOUVEAU_MPEG_H__
+#define __NOUVEAU_MPEG_H__
+
+#include <core/engine.h>
+#include <core/engctx.h>
+
+struct nouveau_mpeg_chan {
+	struct nouveau_engctx base;
+};
+
+#define nouveau_mpeg_context_create(p,e,c,g,s,a,f,d)                           \
+	nouveau_engctx_create((p), (e), (c), (g), (s), (a), (f), (d))
+#define nouveau_mpeg_context_destroy(d)                                        \
+	nouveau_engctx_destroy(&(d)->base)
+#define nouveau_mpeg_context_init(d)                                           \
+	nouveau_engctx_init(&(d)->base)
+#define nouveau_mpeg_context_fini(d,s)                                         \
+	nouveau_engctx_fini(&(d)->base, (s))
+
+#define _nouveau_mpeg_context_dtor _nouveau_engctx_dtor
+#define _nouveau_mpeg_context_init _nouveau_engctx_init
+#define _nouveau_mpeg_context_fini _nouveau_engctx_fini
+#define _nouveau_mpeg_context_rd32 _nouveau_engctx_rd32
+#define _nouveau_mpeg_context_wr32 _nouveau_engctx_wr32
+
+struct nouveau_mpeg {
+	struct nouveau_engine base;
+};
+
+#define nouveau_mpeg_create(p,e,c,d)                                           \
+	nouveau_engine_create((p), (e), (c), true, "PMPEG", "mpeg", (d))
+#define nouveau_mpeg_destroy(d)                                                \
+	nouveau_engine_destroy(&(d)->base)
+#define nouveau_mpeg_init(d)                                                   \
+	nouveau_engine_init(&(d)->base)
+#define nouveau_mpeg_fini(d,s)                                                 \
+	nouveau_engine_fini(&(d)->base, (s))
+
+#define _nouveau_mpeg_dtor _nouveau_engine_dtor
+#define _nouveau_mpeg_init _nouveau_engine_init
+#define _nouveau_mpeg_fini _nouveau_engine_fini
+
+extern struct nouveau_oclass nv31_mpeg_oclass;
+extern struct nouveau_oclass nv40_mpeg_oclass;
+extern struct nouveau_oclass nv50_mpeg_oclass;
+extern struct nouveau_oclass nv84_mpeg_oclass;
+
+extern struct nouveau_oclass nv31_mpeg_sclass[];
+void nv31_mpeg_intr(struct nouveau_subdev *);
+void nv31_mpeg_tile_prog(struct nouveau_engine *, int);
+int  nv31_mpeg_init(struct nouveau_object *);
+
+extern struct nouveau_ofuncs nv50_mpeg_ofuncs;
+int  nv50_mpeg_context_ctor(struct nouveau_object *, struct nouveau_object *,
+			    struct nouveau_oclass *, void *, u32,
+			    struct nouveau_object **);
+int  nv50_mpeg_tlb_flush(struct nouveau_engine *);
+void nv50_mpeg_intr(struct nouveau_subdev *);
+int  nv50_mpeg_init(struct nouveau_object *);
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/engine/ppp.h b/drivers/gpu/drm/nouveau/core/include/engine/ppp.h
new file mode 100644
index 000000000000..74d554fb3281
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/include/engine/ppp.h
@@ -0,0 +1,45 @@
+#ifndef __NOUVEAU_PPP_H__
+#define __NOUVEAU_PPP_H__
+
+#include <core/engine.h>
+#include <core/engctx.h>
+
+struct nouveau_ppp_chan {
+	struct nouveau_engctx base;
+};
+
+#define nouveau_ppp_context_create(p,e,c,g,s,a,f,d)                            \
+	nouveau_engctx_create((p), (e), (c), (g), (s), (a), (f), (d))
+#define nouveau_ppp_context_destroy(d)                                         \
+	nouveau_engctx_destroy(&(d)->base)
+#define nouveau_ppp_context_init(d)                                            \
+	nouveau_engctx_init(&(d)->base)
+#define nouveau_ppp_context_fini(d,s)                                          \
+	nouveau_engctx_fini(&(d)->base, (s))
+
+#define _nouveau_ppp_context_dtor _nouveau_engctx_dtor
+#define _nouveau_ppp_context_init _nouveau_engctx_init
+#define _nouveau_ppp_context_fini _nouveau_engctx_fini
+#define _nouveau_ppp_context_rd32 _nouveau_engctx_rd32
+#define _nouveau_ppp_context_wr32 _nouveau_engctx_wr32
+
+struct nouveau_ppp {
+	struct nouveau_engine base;
+};
+
+#define nouveau_ppp_create(p,e,c,d)                                            \
+	nouveau_engine_create((p), (e), (c), true, "PPPP", "ppp", (d))
+#define nouveau_ppp_destroy(d)                                                 \
+	nouveau_engine_destroy(&(d)->base)
+#define nouveau_ppp_init(d)                                                    \
+	nouveau_engine_init(&(d)->base)
+#define nouveau_ppp_fini(d,s)                                                  \
+	nouveau_engine_fini(&(d)->base, (s))
+
+#define _nouveau_ppp_dtor _nouveau_engine_dtor
+#define _nouveau_ppp_init _nouveau_engine_init
+#define _nouveau_ppp_fini _nouveau_engine_fini
+
+extern struct nouveau_oclass nv98_ppp_oclass;
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/engine/software.h b/drivers/gpu/drm/nouveau/core/include/engine/software.h
new file mode 100644
index 000000000000..c945691c8564
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/include/engine/software.h
@@ -0,0 +1,60 @@
+#ifndef __NOUVEAU_SOFTWARE_H__
+#define __NOUVEAU_SOFTWARE_H__
+
+#include <core/engine.h>
+#include <core/engctx.h>
+
+struct nouveau_software_chan {
+	struct nouveau_engctx base;
+
+	struct {
+		struct list_head head;
+		u32 channel;
+		u32 ctxdma;
+		u64 offset;
+		u32 value;
+		u32 crtc;
+	} vblank;
+
+	int (*flip)(void *);
+	void *flip_data;
+};
+
+#define nouveau_software_context_create(p,e,c,d)                               \
+	nouveau_engctx_create((p), (e), (c), (p), 0, 0, 0, (d))
+#define nouveau_software_context_destroy(d)                                    \
+	nouveau_engctx_destroy(&(d)->base)
+#define nouveau_software_context_init(d)                                       \
+	nouveau_engctx_init(&(d)->base)
+#define nouveau_software_context_fini(d,s)                                     \
+	nouveau_engctx_fini(&(d)->base, (s))
+
+#define _nouveau_software_context_dtor _nouveau_engctx_dtor
+#define _nouveau_software_context_init _nouveau_engctx_init
+#define _nouveau_software_context_fini _nouveau_engctx_fini
+
+struct nouveau_software {
+	struct nouveau_engine base;
+};
+
+#define nouveau_software_create(p,e,c,d)                                       \
+	nouveau_engine_create((p), (e), (c), true, "SW", "software", (d))
+#define nouveau_software_destroy(d)                                            \
+	nouveau_engine_destroy(&(d)->base)
+#define nouveau_software_init(d)                                               \
+	nouveau_engine_init(&(d)->base)
+#define nouveau_software_fini(d,s)                                             \
+	nouveau_engine_fini(&(d)->base, (s))
+
+#define _nouveau_software_dtor _nouveau_engine_dtor
+#define _nouveau_software_init _nouveau_engine_init
+#define _nouveau_software_fini _nouveau_engine_fini
+
+extern struct nouveau_oclass nv04_software_oclass;
+extern struct nouveau_oclass nv10_software_oclass;
+extern struct nouveau_oclass nv50_software_oclass;
+extern struct nouveau_oclass nvc0_software_oclass;
+
+void nv04_software_intr(struct nouveau_subdev *);
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/engine/vp.h b/drivers/gpu/drm/nouveau/core/include/engine/vp.h
new file mode 100644
index 000000000000..05cd08fba377
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/include/engine/vp.h
@@ -0,0 +1,45 @@
+#ifndef __NOUVEAU_VP_H__
+#define __NOUVEAU_VP_H__
+
+#include <core/engine.h>
+#include <core/engctx.h>
+
+struct nouveau_vp_chan {
+	struct nouveau_engctx base;
+};
+
+#define nouveau_vp_context_create(p,e,c,g,s,a,f,d)                             \
+	nouveau_engctx_create((p), (e), (c), (g), (s), (a), (f), (d))
+#define nouveau_vp_context_destroy(d)                                          \
+	nouveau_engctx_destroy(&(d)->base)
+#define nouveau_vp_context_init(d)                                             \
+	nouveau_engctx_init(&(d)->base)
+#define nouveau_vp_context_fini(d,s)                                           \
+	nouveau_engctx_fini(&(d)->base, (s))
+
+#define _nouveau_vp_context_dtor _nouveau_engctx_dtor
+#define _nouveau_vp_context_init _nouveau_engctx_init
+#define _nouveau_vp_context_fini _nouveau_engctx_fini
+#define _nouveau_vp_context_rd32 _nouveau_engctx_rd32
+#define _nouveau_vp_context_wr32 _nouveau_engctx_wr32
+
+struct nouveau_vp {
+	struct nouveau_engine base;
+};
+
+#define nouveau_vp_create(p,e,c,d)                                             \
+	nouveau_engine_create((p), (e), (c), true, "PVP", "vp", (d))
+#define nouveau_vp_destroy(d)                                                  \
+	nouveau_engine_destroy(&(d)->base)
+#define nouveau_vp_init(d)                                                     \
+	nouveau_engine_init(&(d)->base)
+#define nouveau_vp_fini(d,s)                                                   \
+	nouveau_engine_fini(&(d)->base, (s))
+
+#define _nouveau_vp_dtor _nouveau_engine_dtor
+#define _nouveau_vp_init _nouveau_engine_init
+#define _nouveau_vp_fini _nouveau_engine_fini
+
+extern struct nouveau_oclass nv84_vp_oclass;
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/bar.h b/drivers/gpu/drm/nouveau/core/include/subdev/bar.h
new file mode 100644
index 000000000000..4f4ff4502c3d
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/bar.h
@@ -0,0 +1,55 @@
+#ifndef __NOUVEAU_BAR_H__
+#define __NOUVEAU_BAR_H__
+
+#include <core/subdev.h>
+#include <core/device.h>
+
+#include <subdev/fb.h>
+
+struct nouveau_vma;
+
+struct nouveau_bar {
+	struct nouveau_subdev base;
+
+	int (*alloc)(struct nouveau_bar *, struct nouveau_object *,
+		     struct nouveau_mem *, struct nouveau_object **);
+	void __iomem *iomem;
+
+	int (*kmap)(struct nouveau_bar *, struct nouveau_mem *,
+		    u32 flags, struct nouveau_vma *);
+	int (*umap)(struct nouveau_bar *, struct nouveau_mem *,
+		    u32 flags, struct nouveau_vma *);
+	void (*unmap)(struct nouveau_bar *, struct nouveau_vma *);
+	void (*flush)(struct nouveau_bar *);
+};
+
+static inline struct nouveau_bar *
+nouveau_bar(void *obj)
+{
+	return (void *)nv_device(obj)->subdev[NVDEV_SUBDEV_BAR];
+}
+
+#define nouveau_bar_create(p,e,o,d)                                            \
+	nouveau_bar_create_((p), (e), (o), sizeof(**d), (void **)d)
+#define nouveau_bar_init(p)                                                    \
+	nouveau_subdev_init(&(p)->base)
+#define nouveau_bar_fini(p,s)                                                  \
+	nouveau_subdev_fini(&(p)->base, (s))
+
+int nouveau_bar_create_(struct nouveau_object *, struct nouveau_object *,
+			struct nouveau_oclass *, int, void **);
+void nouveau_bar_destroy(struct nouveau_bar *);
+
+void _nouveau_bar_dtor(struct nouveau_object *);
+#define _nouveau_bar_init _nouveau_subdev_init
+#define _nouveau_bar_fini _nouveau_subdev_fini
+
+extern struct nouveau_oclass nv50_bar_oclass;
+extern struct nouveau_oclass nvc0_bar_oclass;
+
+int nouveau_bar_alloc(struct nouveau_bar *, struct nouveau_object *,
+		      struct nouveau_mem *, struct nouveau_object **);
+
+void nv84_bar_flush(struct nouveau_bar *);
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/bios.h b/drivers/gpu/drm/nouveau/core/include/subdev/bios.h
new file mode 100644
index 000000000000..d145b25e6be4
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/bios.h
@@ -0,0 +1,34 @@
+#ifndef __NOUVEAU_BIOS_H__
+#define __NOUVEAU_BIOS_H__
+
+#include <core/subdev.h>
+#include <core/device.h>
+
+struct nouveau_bios {
+	struct nouveau_subdev base;
+	u32 size;
+	u8 *data;
+
+	u32 bmp_offset;
+	u32 bit_offset;
+
+	struct {
+		u8 major;
+		u8 chip;
+		u8 minor;
+		u8 micro;
+	} version;
+};
+
+static inline struct nouveau_bios *
+nouveau_bios(void *obj)
+{
+	return (void *)nv_device(obj)->subdev[NVDEV_SUBDEV_VBIOS];
+}
+
+u8  nvbios_checksum(const u8 *data, int size);
+u16 nvbios_findstr(const u8 *data, int size, const char *str, int len);
+
+extern struct nouveau_oclass nouveau_bios_oclass;
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/bios/bit.h b/drivers/gpu/drm/nouveau/core/include/subdev/bios/bit.h
new file mode 100644
index 000000000000..73f060b07981
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/bios/bit.h
@@ -0,0 +1,13 @@
+#ifndef __NVBIOS_BIT_H__
+#define __NVBIOS_BIT_H__
+
+struct bit_entry {
+	u8  id;
+	u8  version;
+	u16 length;
+	u16 offset;
+};
+
+int bit_entry(struct nouveau_bios *, u8 id, struct bit_entry *);
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/bios/bmp.h b/drivers/gpu/drm/nouveau/core/include/subdev/bios/bmp.h
new file mode 100644
index 000000000000..10e4dbca649a
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/bios/bmp.h
@@ -0,0 +1,39 @@
+#ifndef __NVBIOS_BMP_H__
+#define __NVBIOS_BMP_H__
+
+static inline u16
+bmp_version(struct nouveau_bios *bios)
+{
+	if (bios->bmp_offset) {
+		return nv_ro08(bios, bios->bmp_offset + 5) << 8 |
+		       nv_ro08(bios, bios->bmp_offset + 6);
+	}
+
+	return 0x0000;
+}
+
+static inline u16
+bmp_mem_init_table(struct nouveau_bios *bios)
+{
+	if (bmp_version(bios) >= 0x0300)
+		return nv_ro16(bios, bios->bmp_offset + 24);
+	return 0x0000;
+}
+
+static inline u16
+bmp_sdr_seq_table(struct nouveau_bios *bios)
+{
+	if (bmp_version(bios) >= 0x0300)
+		return nv_ro16(bios, bios->bmp_offset + 26);
+	return 0x0000;
+}
+
+static inline u16
+bmp_ddr_seq_table(struct nouveau_bios *bios)
+{
+	if (bmp_version(bios) >= 0x0300)
+		return nv_ro16(bios, bios->bmp_offset + 28);
+	return 0x0000;
+}
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/bios/conn.h b/drivers/gpu/drm/nouveau/core/include/subdev/bios/conn.h
new file mode 100644
index 000000000000..c1270548fd0d
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/bios/conn.h
@@ -0,0 +1,27 @@
+#ifndef __NVBIOS_CONN_H__
+#define __NVBIOS_CONN_H__
+
+enum dcb_connector_type {
+	DCB_CONNECTOR_VGA = 0x00,
+	DCB_CONNECTOR_TV_0 = 0x10,
+	DCB_CONNECTOR_TV_1 = 0x11,
+	DCB_CONNECTOR_TV_3 = 0x13,
+	DCB_CONNECTOR_DVI_I = 0x30,
+	DCB_CONNECTOR_DVI_D = 0x31,
+	DCB_CONNECTOR_DMS59_0 = 0x38,
+	DCB_CONNECTOR_DMS59_1 = 0x39,
+	DCB_CONNECTOR_LVDS = 0x40,
+	DCB_CONNECTOR_LVDS_SPWG = 0x41,
+	DCB_CONNECTOR_DP = 0x46,
+	DCB_CONNECTOR_eDP = 0x47,
+	DCB_CONNECTOR_HDMI_0 = 0x60,
+	DCB_CONNECTOR_HDMI_1 = 0x61,
+	DCB_CONNECTOR_DMS59_DP0 = 0x64,
+	DCB_CONNECTOR_DMS59_DP1 = 0x65,
+	DCB_CONNECTOR_NONE = 0xff
+};
+
+u16 dcb_conntab(struct nouveau_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len);
+u16 dcb_conn(struct nouveau_bios *bios, u8 idx, u8 *ver, u8 *len);
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/bios/dcb.h b/drivers/gpu/drm/nouveau/core/include/subdev/bios/dcb.h
new file mode 100644
index 000000000000..d682fb625833
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/bios/dcb.h
@@ -0,0 +1,90 @@
+#ifndef __NVBIOS_DCB_H__
+#define __NVBIOS_DCB_H__
+
+struct nouveau_bios;
+
+enum dcb_output_type {
+	DCB_OUTPUT_ANALOG	= 0x0,
+	DCB_OUTPUT_TV		= 0x1,
+	DCB_OUTPUT_TMDS		= 0x2,
+	DCB_OUTPUT_LVDS		= 0x3,
+	DCB_OUTPUT_DP		= 0x6,
+	DCB_OUTPUT_EOL		= 0xe,
+	DCB_OUTPUT_UNUSED	= 0xf,
+	DCB_OUTPUT_ANY = -1,
+};
+
+struct dcb_output {
+	int index;	/* may not be raw dcb index if merging has happened */
+	enum dcb_output_type type;
+	uint8_t i2c_index;
+	uint8_t heads;
+	uint8_t connector;
+	uint8_t bus;
+	uint8_t location;
+	uint8_t or;
+	bool duallink_possible;
+	union {
+		struct sor_conf {
+			int link;
+		} sorconf;
+		struct {
+			int maxfreq;
+		} crtconf;
+		struct {
+			struct sor_conf sor;
+			bool use_straps_for_mode;
+			bool use_acpi_for_edid;
+			bool use_power_scripts;
+		} lvdsconf;
+		struct {
+			bool has_component_output;
+		} tvconf;
+		struct {
+			struct sor_conf sor;
+			int link_nr;
+			int link_bw;
+		} dpconf;
+		struct {
+			struct sor_conf sor;
+			int slave_addr;
+		} tmdsconf;
+	};
+	bool i2c_upper_default;
+};
+
+u16 dcb_table(struct nouveau_bios *, u8 *ver, u8 *hdr, u8 *ent, u8 *len);
+u16 dcb_outp(struct nouveau_bios *, u8 idx, u8 *ver, u8 *len);
+int dcb_outp_foreach(struct nouveau_bios *, void *data, int (*exec)
+		     (struct nouveau_bios *, void *, int index, u16 entry));
+
+
+/* BIT 'U'/'d' table encoder subtables have hashes matching them to
+ * a particular set of encoders.
+ *
+ * This function returns true if a particular DCB entry matches.
+ */
+static inline bool
+dcb_hash_match(struct dcb_output *dcb, u32 hash)
+{
+	if ((hash & 0x000000f0) != (dcb->location << 4))
+		return false;
+	if ((hash & 0x0000000f) != dcb->type)
+		return false;
+	if (!(hash & (dcb->or << 16)))
+		return false;
+
+	switch (dcb->type) {
+	case DCB_OUTPUT_TMDS:
+	case DCB_OUTPUT_LVDS:
+	case DCB_OUTPUT_DP:
+		if (hash & 0x00c00000) {
+			if (!(hash & (dcb->sorconf.link << 22)))
+				return false;
+		}
+	default:
+		return true;
+	}
+}
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/bios/dp.h b/drivers/gpu/drm/nouveau/core/include/subdev/bios/dp.h
new file mode 100644
index 000000000000..73b5e5d3e75a
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/bios/dp.h
@@ -0,0 +1,8 @@
+#ifndef __NVBIOS_DP_H__
+#define __NVBIOS_DP_H__
+
+u16 dp_table(struct nouveau_bios *, u8 *ver, u8 *hdr, u8 *cnt, u8 *len);
+u16 dp_outp(struct nouveau_bios *, u8 idx, u8 *ver, u8 *len);
+u16 dp_outp_match(struct nouveau_bios *, struct dcb_output *, u8 *ver, u8 *len);
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/bios/extdev.h b/drivers/gpu/drm/nouveau/core/include/subdev/bios/extdev.h
new file mode 100644
index 000000000000..949fee3af8fb
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/bios/extdev.h
@@ -0,0 +1,30 @@
+#ifndef __NVBIOS_EXTDEV_H__
+#define __NVBIOS_EXTDEV_H__
+
+struct nouveau_bios;
+
+enum nvbios_extdev_type {
+	NVBIOS_EXTDEV_LM89		= 0x02,
+	NVBIOS_EXTDEV_VT1103M		= 0x40,
+	NVBIOS_EXTDEV_PX3540		= 0x41,
+	NVBIOS_EXTDEV_VT1105M		= 0x42, /* or close enough... */
+	NVBIOS_EXTDEV_ADT7473		= 0x70, /* can also be a LM64 */
+	NVBIOS_EXTDEV_HDCP_EEPROM	= 0x90,
+	NVBIOS_EXTDEV_NONE		= 0xff,
+};
+
+struct nvbios_extdev_func {
+	u8 type;
+	u8 addr;
+	u8 bus;
+};
+
+int
+nvbios_extdev_parse(struct nouveau_bios *, int, struct nvbios_extdev_func *);
+
+int
+nvbios_extdev_find(struct nouveau_bios *, enum nvbios_extdev_type,
+		   struct nvbios_extdev_func *);
+
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/bios/gpio.h b/drivers/gpu/drm/nouveau/core/include/subdev/bios/gpio.h
new file mode 100644
index 000000000000..2bf178082a36
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/bios/gpio.h
@@ -0,0 +1,33 @@
+#ifndef __NVBIOS_GPIO_H__
+#define __NVBIOS_GPIO_H__
+
+struct nouveau_bios;
+
+enum dcb_gpio_func_name {
+	DCB_GPIO_PANEL_POWER = 0x01,
+	DCB_GPIO_TVDAC0 = 0x0c,
+	DCB_GPIO_TVDAC1 = 0x2d,
+	DCB_GPIO_PWM_FAN = 0x09,
+	DCB_GPIO_FAN_SENSE = 0x3d,
+	DCB_GPIO_UNUSED = 0xff
+};
+
+struct dcb_gpio_func {
+	u8 func;
+	u8 line;
+	u8 log[2];
+
+	/* so far, "param" seems to only have an influence on PWM-related
+	 * GPIOs such as FAN_CONTROL and PANEL_BACKLIGHT_LEVEL.
+	 * if param equals 1, hardware PWM is available
+	 * if param equals 0, the host should toggle the GPIO itself
+	 */
+	u8 param;
+};
+
+u16 dcb_gpio_table(struct nouveau_bios *);
+u16 dcb_gpio_entry(struct nouveau_bios *, int idx, int ent, u8 *ver);
+int dcb_gpio_parse(struct nouveau_bios *, int idx, u8 func, u8 line,
+		   struct dcb_gpio_func *);
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/bios/i2c.h b/drivers/gpu/drm/nouveau/core/include/subdev/bios/i2c.h
new file mode 100644
index 000000000000..5079bedfd985
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/bios/i2c.h
@@ -0,0 +1,25 @@
+#ifndef __NVBIOS_I2C_H__
+#define __NVBIOS_I2C_H__
+
+struct nouveau_bios;
+
+enum dcb_i2c_type {
+	DCB_I2C_NV04_BIT = 0,
+	DCB_I2C_NV4E_BIT = 4,
+	DCB_I2C_NVIO_BIT = 5,
+	DCB_I2C_NVIO_AUX = 6,
+	DCB_I2C_UNUSED = 0xff
+};
+
+struct dcb_i2c_entry {
+	enum dcb_i2c_type type;
+	u8 drive;
+	u8 sense;
+	u32 data;
+};
+
+u16 dcb_i2c_table(struct nouveau_bios *, u8 *ver, u8 *hdr, u8 *cnt, u8 *len);
+u16 dcb_i2c_entry(struct nouveau_bios *, u8 index, u8 *ver, u8 *len);
+int dcb_i2c_parse(struct nouveau_bios *, u8 index, struct dcb_i2c_entry *);
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/bios/init.h b/drivers/gpu/drm/nouveau/core/include/subdev/bios/init.h
new file mode 100644
index 000000000000..e69a8bdc6e97
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/bios/init.h
@@ -0,0 +1,21 @@
+#ifndef __NVBIOS_INIT_H__
+#define __NVBIOS_INIT_H__
+
+struct nvbios_init {
+	struct nouveau_subdev *subdev;
+	struct nouveau_bios *bios;
+	u16 offset;
+	struct dcb_output *outp;
+	int crtc;
+
+	/* internal state used during parsing */
+	u8 execute;
+	u32 nested;
+	u16 repeat;
+	u16 repend;
+};
+
+int nvbios_exec(struct nvbios_init *);
+int nvbios_init(struct nouveau_subdev *, bool execute);
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/bios/mxm.h b/drivers/gpu/drm/nouveau/core/include/subdev/bios/mxm.h
new file mode 100644
index 000000000000..5572e60414e8
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/bios/mxm.h
@@ -0,0 +1,9 @@
+#ifndef __NVBIOS_MXM_H__
+#define __NVBIOS_MXM_H__
+
+u16 mxm_table(struct nouveau_bios *, u8 *ver, u8 *hdr);
+
+u8  mxm_sor_map(struct nouveau_bios *, u8 conn);
+u8  mxm_ddc_map(struct nouveau_bios *, u8 port);
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/bios/perf.h b/drivers/gpu/drm/nouveau/core/include/subdev/bios/perf.h
new file mode 100644
index 000000000000..0b285e99be5a
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/bios/perf.h
@@ -0,0 +1,14 @@
+#ifndef __NVBIOS_PERF_H__
+#define __NVBIOS_PERF_H__
+
+struct nouveau_bios;
+
+struct nvbios_perf_fan {
+	u32 pwm_divisor;
+};
+
+int
+nvbios_perf_fan_parse(struct nouveau_bios *, struct nvbios_perf_fan *);
+
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/bios/pll.h b/drivers/gpu/drm/nouveau/core/include/subdev/bios/pll.h
new file mode 100644
index 000000000000..c345097592f2
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/bios/pll.h
@@ -0,0 +1,77 @@
+#ifndef __NVBIOS_PLL_H__
+#define __NVBIOS_PLL_H__
+
+/*XXX: kill me */
+struct nouveau_pll_vals {
+	union {
+		struct {
+#ifdef __BIG_ENDIAN
+			uint8_t N1, M1, N2, M2;
+#else
+			uint8_t M1, N1, M2, N2;
+#endif
+		};
+		struct {
+			uint16_t NM1, NM2;
+		} __attribute__((packed));
+	};
+	int log2P;
+
+	int refclk;
+};
+
+struct nouveau_bios;
+
+/* these match types in pll limits table version 0x40,
+ * nouveau uses them on all chipsets internally where a
+ * specific pll needs to be referenced, but the exact
+ * register isn't known.
+ */
+enum nvbios_pll_type {
+	PLL_CORE   = 0x01,
+	PLL_SHADER = 0x02,
+	PLL_UNK03  = 0x03,
+	PLL_MEMORY = 0x04,
+	PLL_VDEC   = 0x05,
+	PLL_UNK40  = 0x40,
+	PLL_UNK41  = 0x41,
+	PLL_UNK42  = 0x42,
+	PLL_VPLL0  = 0x80,
+	PLL_VPLL1  = 0x81,
+	PLL_MAX    = 0xff
+};
+
+struct nvbios_pll {
+	enum nvbios_pll_type type;
+	u32 reg;
+	u32 refclk;
+
+	u8 min_p;
+	u8 max_p;
+	u8 bias_p;
+
+	/*
+	 * for most pre nv50 cards setting a log2P of 7 (the common max_log2p
+	 * value) is no different to 6 (at least for vplls) so allowing the MNP
+	 * calc to use 7 causes the generated clock to be out by a factor of 2.
+	 * however, max_log2p cannot be fixed-up during parsing as the
+	 * unmodified max_log2p value is still needed for setting mplls, hence
+	 * an additional max_usable_log2p member
+	 */
+	u8 max_p_usable;
+
+	struct {
+		u32 min_freq;
+		u32 max_freq;
+		u32 min_inputfreq;
+		u32 max_inputfreq;
+		u8  min_m;
+		u8  max_m;
+		u8  min_n;
+		u8  max_n;
+	} vco1, vco2;
+};
+
+int nvbios_pll_parse(struct nouveau_bios *, u32 type, struct nvbios_pll *);
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/bios/therm.h b/drivers/gpu/drm/nouveau/core/include/subdev/bios/therm.h
new file mode 100644
index 000000000000..a2c4296fc5f6
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/bios/therm.h
@@ -0,0 +1,46 @@
+#ifndef __NVBIOS_THERM_H__
+#define __NVBIOS_THERM_H__
+
+struct nouveau_bios;
+
+struct nvbios_therm_threshold {
+	u8 temp;
+	u8 hysteresis;
+};
+
+struct nvbios_therm_sensor {
+	/* diode */
+	s16 slope_mult;
+	s16 slope_div;
+	s16 offset_num;
+	s16 offset_den;
+	s8 offset_constant;
+
+	/* thresholds */
+	struct nvbios_therm_threshold thrs_fan_boost;
+	struct nvbios_therm_threshold thrs_down_clock;
+	struct nvbios_therm_threshold thrs_critical;
+	struct nvbios_therm_threshold thrs_shutdown;
+};
+
+struct nvbios_therm_fan {
+	u16 pwm_freq;
+
+	u8 min_duty;
+	u8 max_duty;
+};
+
+enum nvbios_therm_domain {
+	NVBIOS_THERM_DOMAIN_CORE,
+	NVBIOS_THERM_DOMAIN_AMBIENT,
+};
+
+int
+nvbios_therm_sensor_parse(struct nouveau_bios *, enum nvbios_therm_domain,
+			  struct nvbios_therm_sensor *);
+
+int
+nvbios_therm_fan_parse(struct nouveau_bios *, struct nvbios_therm_fan *);
+
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/clock.h b/drivers/gpu/drm/nouveau/core/include/subdev/clock.h
new file mode 100644
index 000000000000..39e73b91d360
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/clock.h
@@ -0,0 +1,59 @@
+#ifndef __NOUVEAU_CLOCK_H__
+#define __NOUVEAU_CLOCK_H__
+
+#include <core/device.h>
+#include <core/subdev.h>
+
+struct nouveau_pll_vals;
+struct nvbios_pll;
+
+struct nouveau_clock {
+	struct nouveau_subdev base;
+
+	int (*pll_set)(struct nouveau_clock *, u32 type, u32 freq);
+
+	/*XXX: die, these are here *only* to support the completely
+	 *     bat-shit insane what-was-nouveau_hw.c code
+	 */
+	int (*pll_calc)(struct nouveau_clock *, struct nvbios_pll *,
+			int clk, struct nouveau_pll_vals *pv);
+	int (*pll_prog)(struct nouveau_clock *, u32 reg1,
+			struct nouveau_pll_vals *pv);
+};
+
+static inline struct nouveau_clock *
+nouveau_clock(void *obj)
+{
+	return (void *)nv_device(obj)->subdev[NVDEV_SUBDEV_CLOCK];
+}
+
+#define nouveau_clock_create(p,e,o,d)                                          \
+	nouveau_subdev_create((p), (e), (o), 0, "CLOCK", "clock", d)
+#define nouveau_clock_destroy(p)                                               \
+	nouveau_subdev_destroy(&(p)->base)
+#define nouveau_clock_init(p)                                                  \
+	nouveau_subdev_init(&(p)->base)
+#define nouveau_clock_fini(p,s)                                                \
+	nouveau_subdev_fini(&(p)->base, (s))
+
+int  nouveau_clock_create_(struct nouveau_object *, struct nouveau_object *,
+			   struct nouveau_oclass *, void *, u32, int, void **);
+
+#define _nouveau_clock_dtor _nouveau_subdev_dtor
+#define _nouveau_clock_init _nouveau_subdev_init
+#define _nouveau_clock_fini _nouveau_subdev_fini
+
+extern struct nouveau_oclass nv04_clock_oclass;
+extern struct nouveau_oclass nv40_clock_oclass;
+extern struct nouveau_oclass nv50_clock_oclass;
+extern struct nouveau_oclass nva3_clock_oclass;
+extern struct nouveau_oclass nvc0_clock_oclass;
+
+int nv04_clock_pll_set(struct nouveau_clock *, u32 type, u32 freq);
+int nv04_clock_pll_calc(struct nouveau_clock *, struct nvbios_pll *,
+			int clk, struct nouveau_pll_vals *);
+int nv04_clock_pll_prog(struct nouveau_clock *, u32 reg1,
+			struct nouveau_pll_vals *);
+
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/device.h b/drivers/gpu/drm/nouveau/core/include/subdev/device.h
new file mode 100644
index 000000000000..c9e4c4afa50e
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/device.h
@@ -0,0 +1,24 @@
+#ifndef __NOUVEAU_SUBDEV_DEVICE_H__
+#define __NOUVEAU_SUBDEV_DEVICE_H__
+
+#include <core/device.h>
+
+#define nouveau_device_create(p,n,s,c,d,u)                                     \
+	nouveau_device_create_((p), (n), (s), (c), (d), sizeof(**u), (void **)u)
+
+int  nouveau_device_create_(struct pci_dev *, u64 name, const char *sname,
+			    const char *cfg, const char *dbg, int, void **);
+
+int nv04_identify(struct nouveau_device *);
+int nv10_identify(struct nouveau_device *);
+int nv20_identify(struct nouveau_device *);
+int nv30_identify(struct nouveau_device *);
+int nv40_identify(struct nouveau_device *);
+int nv50_identify(struct nouveau_device *);
+int nvc0_identify(struct nouveau_device *);
+int nve0_identify(struct nouveau_device *);
+
+extern struct nouveau_oclass nouveau_device_sclass[];
+struct nouveau_device *nouveau_device_find(u64 name);
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/devinit.h b/drivers/gpu/drm/nouveau/core/include/subdev/devinit.h
new file mode 100644
index 000000000000..29e4cc1f6cc0
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/devinit.h
@@ -0,0 +1,40 @@
+#ifndef __NOUVEAU_DEVINIT_H__
+#define __NOUVEAU_DEVINIT_H__
+
+#include <core/subdev.h>
+#include <core/device.h>
+
+struct nouveau_devinit {
+	struct nouveau_subdev base;
+	bool post;
+	void (*meminit)(struct nouveau_devinit *);
+};
+
+static inline struct nouveau_devinit *
+nouveau_devinit(void *obj)
+{
+	return (void *)nv_device(obj)->subdev[NVDEV_SUBDEV_DEVINIT];
+}
+
+#define nouveau_devinit_create(p,e,o,d)                                        \
+	nouveau_devinit_create_((p), (e), (o), sizeof(**d), (void **)d)
+#define nouveau_devinit_destroy(p)                                             \
+	nouveau_subdev_destroy(&(p)->base)
+
+int nouveau_devinit_create_(struct nouveau_object *, struct nouveau_object *,
+			    struct nouveau_oclass *, int, void **);
+int nouveau_devinit_init(struct nouveau_devinit *);
+int nouveau_devinit_fini(struct nouveau_devinit *, bool suspend);
+
+extern struct nouveau_oclass nv04_devinit_oclass;
+extern struct nouveau_oclass nv05_devinit_oclass;
+extern struct nouveau_oclass nv10_devinit_oclass;
+extern struct nouveau_oclass nv1a_devinit_oclass;
+extern struct nouveau_oclass nv20_devinit_oclass;
+extern struct nouveau_oclass nv50_devinit_oclass;
+
+void nv04_devinit_dtor(struct nouveau_object *);
+int  nv04_devinit_init(struct nouveau_object *);
+int  nv04_devinit_fini(struct nouveau_object *, bool);
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/fb.h b/drivers/gpu/drm/nouveau/core/include/subdev/fb.h
new file mode 100644
index 000000000000..5c1b5e1904f9
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/fb.h
@@ -0,0 +1,134 @@
+#ifndef __NOUVEAU_FB_H__
+#define __NOUVEAU_FB_H__
+
+#include <core/subdev.h>
+#include <core/device.h>
+#include <core/mm.h>
+
+#include <subdev/vm.h>
+
+/* memory type/access flags, do not match hardware values */
+#define NV_MEM_ACCESS_RO  1
+#define NV_MEM_ACCESS_WO  2
+#define NV_MEM_ACCESS_RW (NV_MEM_ACCESS_RO | NV_MEM_ACCESS_WO)
+#define NV_MEM_ACCESS_SYS 4
+#define NV_MEM_ACCESS_VM  8
+#define NV_MEM_ACCESS_NOSNOOP 16
+
+#define NV_MEM_TARGET_VRAM        0
+#define NV_MEM_TARGET_PCI         1
+#define NV_MEM_TARGET_PCI_NOSNOOP 2
+#define NV_MEM_TARGET_VM          3
+#define NV_MEM_TARGET_GART        4
+
+#define NV_MEM_TYPE_VM 0x7f
+#define NV_MEM_COMP_VM 0x03
+
+struct nouveau_mem {
+	struct drm_device *dev;
+
+	struct nouveau_vma bar_vma;
+	struct nouveau_vma vma[2];
+	u8  page_shift;
+
+	struct nouveau_mm_node *tag;
+	struct list_head regions;
+	dma_addr_t *pages;
+	u32 memtype;
+	u64 offset;
+	u64 size;
+	struct sg_table *sg;
+};
+
+struct nouveau_fb_tile {
+	struct nouveau_mm_node *tag;
+	u32 addr;
+	u32 limit;
+	u32 pitch;
+	u32 zcomp;
+};
+
+struct nouveau_fb {
+	struct nouveau_subdev base;
+
+	bool (*memtype_valid)(struct nouveau_fb *, u32 memtype);
+
+	struct {
+		enum {
+			NV_MEM_TYPE_UNKNOWN = 0,
+			NV_MEM_TYPE_STOLEN,
+			NV_MEM_TYPE_SGRAM,
+			NV_MEM_TYPE_SDRAM,
+			NV_MEM_TYPE_DDR1,
+			NV_MEM_TYPE_DDR2,
+			NV_MEM_TYPE_DDR3,
+			NV_MEM_TYPE_GDDR2,
+			NV_MEM_TYPE_GDDR3,
+			NV_MEM_TYPE_GDDR4,
+			NV_MEM_TYPE_GDDR5
+		} type;
+		u64 stolen;
+		u64 size;
+		int ranks;
+
+		int  (*get)(struct nouveau_fb *, u64 size, u32 align,
+			    u32 size_nc, u32 type, struct nouveau_mem **);
+		void (*put)(struct nouveau_fb *, struct nouveau_mem **);
+	} ram;
+
+	struct nouveau_mm vram;
+	struct nouveau_mm tags;
+
+	struct {
+		struct nouveau_fb_tile region[16];
+		int regions;
+		void (*init)(struct nouveau_fb *, int i, u32 addr, u32 size,
+			     u32 pitch, u32 flags, struct nouveau_fb_tile *);
+		void (*fini)(struct nouveau_fb *, int i,
+			     struct nouveau_fb_tile *);
+		void (*prog)(struct nouveau_fb *, int i,
+			     struct nouveau_fb_tile *);
+	} tile;
+};
+
+static inline struct nouveau_fb *
+nouveau_fb(void *obj)
+{
+	return (void *)nv_device(obj)->subdev[NVDEV_SUBDEV_FB];
+}
+
+#define nouveau_fb_create(p,e,c,d)                                             \
+	nouveau_subdev_create((p), (e), (c), 0, "PFB", "fb", (d))
+int  nouveau_fb_created(struct nouveau_fb *);
+void nouveau_fb_destroy(struct nouveau_fb *);
+int  nouveau_fb_init(struct nouveau_fb *);
+#define nouveau_fb_fini(p,s)                                                   \
+	nouveau_subdev_fini(&(p)->base, (s))
+
+void _nouveau_fb_dtor(struct nouveau_object *);
+int  _nouveau_fb_init(struct nouveau_object *);
+#define _nouveau_fb_fini _nouveau_subdev_fini
+
+extern struct nouveau_oclass nv04_fb_oclass;
+extern struct nouveau_oclass nv10_fb_oclass;
+extern struct nouveau_oclass nv20_fb_oclass;
+extern struct nouveau_oclass nv30_fb_oclass;
+extern struct nouveau_oclass nv40_fb_oclass;
+extern struct nouveau_oclass nv50_fb_oclass;
+extern struct nouveau_oclass nvc0_fb_oclass;
+
+struct nouveau_bios;
+int  nouveau_fb_bios_memtype(struct nouveau_bios *);
+
+bool nv04_fb_memtype_valid(struct nouveau_fb *, u32 memtype);
+
+void nv10_fb_tile_prog(struct nouveau_fb *, int, struct nouveau_fb_tile *);
+
+void nv30_fb_tile_init(struct nouveau_fb *, int i, u32 addr, u32 size,
+		       u32 pitch, u32 flags, struct nouveau_fb_tile *);
+void nv30_fb_tile_fini(struct nouveau_fb *, int i, struct nouveau_fb_tile *);
+
+void nv50_fb_vram_del(struct nouveau_fb *, struct nouveau_mem **);
+void nv50_fb_trap(struct nouveau_fb *, int display);
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/gpio.h b/drivers/gpu/drm/nouveau/core/include/subdev/gpio.h
new file mode 100644
index 000000000000..9ea2b12cc15d
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/gpio.h
@@ -0,0 +1,64 @@
+#ifndef __NOUVEAU_GPIO_H__
+#define __NOUVEAU_GPIO_H__
+
+#include <core/subdev.h>
+#include <core/device.h>
+
+#include <subdev/bios.h>
+#include <subdev/bios/gpio.h>
+
+struct nouveau_gpio {
+	struct nouveau_subdev base;
+
+	/* hardware interfaces */
+	void (*reset)(struct nouveau_gpio *);
+	int  (*drive)(struct nouveau_gpio *, int line, int dir, int out);
+	int  (*sense)(struct nouveau_gpio *, int line);
+	void (*irq_enable)(struct nouveau_gpio *, int line, bool);
+
+	/* software interfaces */
+	int  (*find)(struct nouveau_gpio *, int idx, u8 tag, u8 line,
+		     struct dcb_gpio_func *);
+	int  (*set)(struct nouveau_gpio *, int idx, u8 tag, u8 line, int state);
+	int  (*get)(struct nouveau_gpio *, int idx, u8 tag, u8 line);
+	int  (*irq)(struct nouveau_gpio *, int idx, u8 tag, u8 line, bool on);
+
+	/* interrupt handling */
+	struct list_head isr;
+	spinlock_t lock;
+
+	void (*isr_run)(struct nouveau_gpio *, int idx, u32 mask);
+	int  (*isr_add)(struct nouveau_gpio *, int idx, u8 tag, u8 line,
+			void (*)(void *, int state), void *data);
+	void (*isr_del)(struct nouveau_gpio *, int idx, u8 tag, u8 line,
+			void (*)(void *, int state), void *data);
+};
+
+static inline struct nouveau_gpio *
+nouveau_gpio(void *obj)
+{
+	return (void *)nv_device(obj)->subdev[NVDEV_SUBDEV_GPIO];
+}
+
+#define nouveau_gpio_create(p,e,o,d)                                           \
+	nouveau_gpio_create_((p), (e), (o), sizeof(**d), (void **)d)
+#define nouveau_gpio_destroy(p)                                                \
+	nouveau_subdev_destroy(&(p)->base)
+#define nouveau_gpio_fini(p,s)                                                 \
+	nouveau_subdev_fini(&(p)->base, (s))
+
+int nouveau_gpio_create_(struct nouveau_object *, struct nouveau_object *,
+			 struct nouveau_oclass *, int, void **);
+int nouveau_gpio_init(struct nouveau_gpio *);
+
+extern struct nouveau_oclass nv10_gpio_oclass;
+extern struct nouveau_oclass nv50_gpio_oclass;
+extern struct nouveau_oclass nvd0_gpio_oclass;
+
+void nv50_gpio_dtor(struct nouveau_object *);
+int  nv50_gpio_init(struct nouveau_object *);
+int  nv50_gpio_fini(struct nouveau_object *, bool);
+void nv50_gpio_intr(struct nouveau_subdev *);
+void nv50_gpio_irq_enable(struct nouveau_gpio *, int line, bool);
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/i2c.h b/drivers/gpu/drm/nouveau/core/include/subdev/i2c.h
new file mode 100644
index 000000000000..b93ab01e3785
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/i2c.h
@@ -0,0 +1,60 @@
+#ifndef __NOUVEAU_I2C_H__
+#define __NOUVEAU_I2C_H__
+
+#include <core/subdev.h>
+#include <core/device.h>
+
+#include <subdev/bios.h>
+#include <subdev/bios/i2c.h>
+
+#define NV_I2C_PORT(n)    (0x00 + (n))
+#define NV_I2C_DEFAULT(n) (0x80 + (n))
+
+struct nouveau_i2c_port {
+	struct i2c_adapter adapter;
+	struct nouveau_i2c *i2c;
+	struct i2c_algo_bit_data bit;
+	struct list_head head;
+	u8  index;
+	u8  type;
+	u32 dcb;
+	u32 drive;
+	u32 sense;
+	u32 state;
+};
+
+struct nouveau_i2c {
+	struct nouveau_subdev base;
+
+	struct nouveau_i2c_port *(*find)(struct nouveau_i2c *, u8 index);
+	int (*identify)(struct nouveau_i2c *, int index,
+			const char *what, struct i2c_board_info *,
+			bool (*match)(struct nouveau_i2c_port *,
+				      struct i2c_board_info *));
+	struct list_head ports;
+};
+
+static inline struct nouveau_i2c *
+nouveau_i2c(void *obj)
+{
+	return (void *)nv_device(obj)->subdev[NVDEV_SUBDEV_I2C];
+}
+
+extern struct nouveau_oclass nouveau_i2c_oclass;
+
+void nouveau_i2c_drive_scl(void *, int);
+void nouveau_i2c_drive_sda(void *, int);
+int  nouveau_i2c_sense_scl(void *);
+int  nouveau_i2c_sense_sda(void *);
+
+int  nv_rdi2cr(struct nouveau_i2c_port *, u8 addr, u8 reg);
+int  nv_wri2cr(struct nouveau_i2c_port *, u8 addr, u8 reg, u8 val);
+bool nv_probe_i2c(struct nouveau_i2c_port *, u8 addr);
+
+int nv_rdaux(struct nouveau_i2c_port *, u32 addr, u8 *data, u8 size);
+int nv_wraux(struct nouveau_i2c_port *, u32 addr, u8 *data, u8 size);
+
+extern const struct i2c_algorithm nouveau_i2c_bit_algo;
+extern const struct i2c_algorithm nouveau_i2c_aux_algo;
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/ibus.h b/drivers/gpu/drm/nouveau/core/include/subdev/ibus.h
new file mode 100644
index 000000000000..88814f159d89
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/ibus.h
@@ -0,0 +1,34 @@
+#ifndef __NOUVEAU_IBUS_H__
+#define __NOUVEAU_IBUS_H__
+
+#include <core/subdev.h>
+#include <core/device.h>
+
+struct nouveau_ibus {
+	struct nouveau_subdev base;
+};
+
+static inline struct nouveau_ibus *
+nouveau_ibus(void *obj)
+{
+	return (void *)nv_device(obj)->subdev[NVDEV_SUBDEV_IBUS];
+}
+
+#define nouveau_ibus_create(p,e,o,d)                                           \
+	nouveau_subdev_create_((p), (e), (o), 0, "PIBUS", "ibus",              \
+			       sizeof(**d), (void **)d)
+#define nouveau_ibus_destroy(p)                                                \
+	nouveau_subdev_destroy(&(p)->base)
+#define nouveau_ibus_init(p)                                                   \
+	nouveau_subdev_init(&(p)->base)
+#define nouveau_ibus_fini(p,s)                                                 \
+	nouveau_subdev_fini(&(p)->base, (s))
+
+#define _nouveau_ibus_dtor _nouveau_subdev_dtor
+#define _nouveau_ibus_init _nouveau_subdev_init
+#define _nouveau_ibus_fini _nouveau_subdev_fini
+
+extern struct nouveau_oclass nvc0_ibus_oclass;
+extern struct nouveau_oclass nve0_ibus_oclass;
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/instmem.h b/drivers/gpu/drm/nouveau/core/include/subdev/instmem.h
new file mode 100644
index 000000000000..ec7a54e91a08
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/instmem.h
@@ -0,0 +1,73 @@
+#ifndef __NOUVEAU_INSTMEM_H__
+#define __NOUVEAU_INSTMEM_H__
+
+#include <core/subdev.h>
+#include <core/device.h>
+#include <core/mm.h>
+
+struct nouveau_instobj {
+	struct nouveau_object base;
+	struct list_head head;
+	u32 *suspend;
+	u64 addr;
+	u32 size;
+};
+
+static inline struct nouveau_instobj *
+nv_memobj(void *obj)
+{
+#if CONFIG_NOUVEAU_DEBUG >= NV_DBG_PARANOIA
+	if (unlikely(!nv_iclass(obj, NV_MEMOBJ_CLASS)))
+		nv_assert("BAD CAST -> NvMemObj, %08x", nv_hclass(obj));
+#endif
+	return obj;
+}
+
+#define nouveau_instobj_create(p,e,o,d)                                        \
+	nouveau_instobj_create_((p), (e), (o), sizeof(**d), (void **)d)
+#define nouveau_instobj_init(p)                                                \
+	nouveau_object_init(&(p)->base)
+#define nouveau_instobj_fini(p,s)                                              \
+	nouveau_object_fini(&(p)->base, (s))
+
+int  nouveau_instobj_create_(struct nouveau_object *, struct nouveau_object *,
+			     struct nouveau_oclass *, int, void **);
+void nouveau_instobj_destroy(struct nouveau_instobj *);
+
+void _nouveau_instobj_dtor(struct nouveau_object *);
+#define _nouveau_instobj_init nouveau_object_init
+#define _nouveau_instobj_fini nouveau_object_fini
+
+struct nouveau_instmem {
+	struct nouveau_subdev base;
+	struct list_head list;
+
+	u32 reserved;
+	int (*alloc)(struct nouveau_instmem *, struct nouveau_object *,
+		     u32 size, u32 align, struct nouveau_object **);
+};
+
+static inline struct nouveau_instmem *
+nouveau_instmem(void *obj)
+{
+	return (void *)nv_device(obj)->subdev[NVDEV_SUBDEV_INSTMEM];
+}
+
+#define nouveau_instmem_create(p,e,o,d)                                        \
+	nouveau_instmem_create_((p), (e), (o), sizeof(**d), (void **)d)
+#define nouveau_instmem_destroy(p)                                             \
+	nouveau_subdev_destroy(&(p)->base)
+int nouveau_instmem_create_(struct nouveau_object *, struct nouveau_object *,
+			    struct nouveau_oclass *, int, void **);
+int nouveau_instmem_init(struct nouveau_instmem *);
+int nouveau_instmem_fini(struct nouveau_instmem *, bool);
+
+#define _nouveau_instmem_dtor _nouveau_subdev_dtor
+int _nouveau_instmem_init(struct nouveau_object *);
+int _nouveau_instmem_fini(struct nouveau_object *, bool);
+
+extern struct nouveau_oclass nv04_instmem_oclass;
+extern struct nouveau_oclass nv40_instmem_oclass;
+extern struct nouveau_oclass nv50_instmem_oclass;
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/ltcg.h b/drivers/gpu/drm/nouveau/core/include/subdev/ltcg.h
new file mode 100644
index 000000000000..f351f63bc654
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/ltcg.h
@@ -0,0 +1,33 @@
+#ifndef __NOUVEAU_LTCG_H__
+#define __NOUVEAU_LTCG_H__
+
+#include <core/subdev.h>
+#include <core/device.h>
+
+struct nouveau_ltcg {
+	struct nouveau_subdev base;
+};
+
+static inline struct nouveau_ltcg *
+nouveau_ltcg(void *obj)
+{
+	return (void *)nv_device(obj)->subdev[NVDEV_SUBDEV_LTCG];
+}
+
+#define nouveau_ltcg_create(p,e,o,d)                                           \
+	nouveau_subdev_create_((p), (e), (o), 0, "PLTCG", "level2",            \
+			       sizeof(**d), (void **)d)
+#define nouveau_ltcg_destroy(p)                                                \
+	nouveau_subdev_destroy(&(p)->base)
+#define nouveau_ltcg_init(p)                                                   \
+	nouveau_subdev_init(&(p)->base)
+#define nouveau_ltcg_fini(p,s)                                                 \
+	nouveau_subdev_fini(&(p)->base, (s))
+
+#define _nouveau_ltcg_dtor _nouveau_subdev_dtor
+#define _nouveau_ltcg_init _nouveau_subdev_init
+#define _nouveau_ltcg_fini _nouveau_subdev_fini
+
+extern struct nouveau_oclass nvc0_ltcg_oclass;
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/mc.h b/drivers/gpu/drm/nouveau/core/include/subdev/mc.h
new file mode 100644
index 000000000000..fded97cea500
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/mc.h
@@ -0,0 +1,49 @@
+#ifndef __NOUVEAU_MC_H__
+#define __NOUVEAU_MC_H__
+
+#include <core/subdev.h>
+#include <core/device.h>
+
+struct nouveau_mc_intr {
+	u32 stat;
+	u32 unit;
+};
+
+struct nouveau_mc {
+	struct nouveau_subdev base;
+	const struct nouveau_mc_intr *intr_map;
+};
+
+static inline struct nouveau_mc *
+nouveau_mc(void *obj)
+{
+	return (void *)nv_device(obj)->subdev[NVDEV_SUBDEV_MC];
+}
+
+#define nouveau_mc_create(p,e,o,d)                                             \
+	nouveau_subdev_create_((p), (e), (o), 0, "PMC", "master",              \
+			       sizeof(**d), (void **)d)
+#define nouveau_mc_destroy(p)                                                  \
+	nouveau_subdev_destroy(&(p)->base)
+#define nouveau_mc_init(p)                                                     \
+	nouveau_subdev_init(&(p)->base)
+#define nouveau_mc_fini(p,s)                                                   \
+	nouveau_subdev_fini(&(p)->base, (s))
+
+#define _nouveau_mc_dtor _nouveau_subdev_dtor
+#define _nouveau_mc_init _nouveau_subdev_init
+#define _nouveau_mc_fini _nouveau_subdev_fini
+
+extern struct nouveau_oclass nv04_mc_oclass;
+extern struct nouveau_oclass nv44_mc_oclass;
+extern struct nouveau_oclass nv50_mc_oclass;
+extern struct nouveau_oclass nv98_mc_oclass;
+extern struct nouveau_oclass nvc0_mc_oclass;
+
+void nouveau_mc_intr(struct nouveau_subdev *);
+
+extern const struct nouveau_mc_intr nv04_mc_intr[];
+int nv04_mc_init(struct nouveau_object *);
+int nv50_mc_init(struct nouveau_object *);
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/mxm.h b/drivers/gpu/drm/nouveau/core/include/subdev/mxm.h
new file mode 100644
index 000000000000..b93b152cb566
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/mxm.h
@@ -0,0 +1,37 @@
+#ifndef __NOUVEAU_MXM_H__
+#define __NOUVEAU_MXM_H__
+
+#include <core/subdev.h>
+#include <core/device.h>
+
+#define MXM_SANITISE_DCB 0x00000001
+
+struct nouveau_mxm {
+	struct nouveau_subdev base;
+	u32 action;
+	u8 *mxms;
+};
+
+static inline struct nouveau_mxm *
+nouveau_mxm(void *obj)
+{
+	return (void *)nv_device(obj)->subdev[NVDEV_SUBDEV_MXM];
+}
+
+#define nouveau_mxm_create(p,e,o,d)                                            \
+	nouveau_mxm_create_((p), (e), (o), sizeof(**d), (void **)d)
+#define nouveau_mxm_init(p)                                                    \
+	nouveau_subdev_init(&(p)->base)
+#define nouveau_mxm_fini(p,s)                                                  \
+	nouveau_subdev_fini(&(p)->base, (s))
+int  nouveau_mxm_create_(struct nouveau_object *, struct nouveau_object *,
+			 struct nouveau_oclass *, int, void **);
+void nouveau_mxm_destroy(struct nouveau_mxm *);
+
+#define _nouveau_mxm_dtor _nouveau_subdev_dtor
+#define _nouveau_mxm_init _nouveau_subdev_init
+#define _nouveau_mxm_fini _nouveau_subdev_fini
+
+extern struct nouveau_oclass nv50_mxm_oclass;
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/therm.h b/drivers/gpu/drm/nouveau/core/include/subdev/therm.h
new file mode 100644
index 000000000000..faee569fd458
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/therm.h
@@ -0,0 +1,58 @@
+#ifndef __NOUVEAU_THERM_H__
+#define __NOUVEAU_THERM_H__
+
+#include <core/device.h>
+#include <core/subdev.h>
+
+enum nouveau_therm_fan_mode {
+	FAN_CONTROL_NONE = 0,
+	FAN_CONTROL_MANUAL = 1,
+	FAN_CONTROL_NR,
+};
+
+enum nouveau_therm_attr_type {
+	NOUVEAU_THERM_ATTR_FAN_MIN_DUTY = 0,
+	NOUVEAU_THERM_ATTR_FAN_MAX_DUTY = 1,
+	NOUVEAU_THERM_ATTR_FAN_MODE = 2,
+
+	NOUVEAU_THERM_ATTR_THRS_FAN_BOOST = 10,
+	NOUVEAU_THERM_ATTR_THRS_FAN_BOOST_HYST = 11,
+	NOUVEAU_THERM_ATTR_THRS_DOWN_CLK = 12,
+	NOUVEAU_THERM_ATTR_THRS_DOWN_CLK_HYST = 13,
+	NOUVEAU_THERM_ATTR_THRS_CRITICAL = 14,
+	NOUVEAU_THERM_ATTR_THRS_CRITICAL_HYST = 15,
+	NOUVEAU_THERM_ATTR_THRS_SHUTDOWN = 16,
+	NOUVEAU_THERM_ATTR_THRS_SHUTDOWN_HYST = 17,
+};
+
+struct nouveau_therm {
+	struct nouveau_subdev base;
+
+	int (*fan_get)(struct nouveau_therm *);
+	int (*fan_set)(struct nouveau_therm *, int);
+	int (*fan_sense)(struct nouveau_therm *);
+
+	int (*temp_get)(struct nouveau_therm *);
+
+	int (*attr_get)(struct nouveau_therm *, enum nouveau_therm_attr_type);
+	int (*attr_set)(struct nouveau_therm *,
+			enum nouveau_therm_attr_type, int);
+};
+
+static inline struct nouveau_therm *
+nouveau_therm(void *obj)
+{
+	return (void *)nv_device(obj)->subdev[NVDEV_SUBDEV_THERM];
+}
+
+#define nouveau_therm_create(p,e,o,d)                                          \
+	nouveau_subdev_create((p), (e), (o), 0, "THERM", "therm", d)
+#define nouveau_therm_destroy(p)                                               \
+	nouveau_subdev_destroy(&(p)->base)
+
+#define _nouveau_therm_dtor _nouveau_subdev_dtor
+
+extern struct nouveau_oclass nv40_therm_oclass;
+extern struct nouveau_oclass nv50_therm_oclass;
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/timer.h b/drivers/gpu/drm/nouveau/core/include/subdev/timer.h
new file mode 100644
index 000000000000..49bff901544c
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/timer.h
@@ -0,0 +1,53 @@
+#ifndef __NOUVEAU_TIMER_H__
+#define __NOUVEAU_TIMER_H__
+
+#include <core/subdev.h>
+#include <core/device.h>
+
+struct nouveau_alarm {
+	struct list_head head;
+	u64 timestamp;
+	void (*func)(struct nouveau_alarm *);
+};
+
+bool nouveau_timer_wait_eq(void *, u64 nsec, u32 addr, u32 mask, u32 data);
+bool nouveau_timer_wait_ne(void *, u64 nsec, u32 addr, u32 mask, u32 data);
+bool nouveau_timer_wait_cb(void *, u64 nsec, bool (*func)(void *), void *data);
+void nouveau_timer_alarm(void *, u32 nsec, struct nouveau_alarm *);
+
+#define NV_WAIT_DEFAULT 2000000000ULL
+#define nv_wait(o,a,m,v)                                                       \
+	nouveau_timer_wait_eq((o), NV_WAIT_DEFAULT, (a), (m), (v))
+#define nv_wait_ne(o,a,m,v)                                                    \
+	nouveau_timer_wait_ne((o), NV_WAIT_DEFAULT, (a), (m), (v))
+#define nv_wait_cb(o,c,d)                                                      \
+	nouveau_timer_wait_cb((o), NV_WAIT_DEFAULT, (c), (d))
+
+struct nouveau_timer {
+	struct nouveau_subdev base;
+	u64  (*read)(struct nouveau_timer *);
+	void (*alarm)(struct nouveau_timer *, u32 time, struct nouveau_alarm *);
+};
+
+static inline struct nouveau_timer *
+nouveau_timer(void *obj)
+{
+	return (void *)nv_device(obj)->subdev[NVDEV_SUBDEV_TIMER];
+}
+
+#define nouveau_timer_create(p,e,o,d)                                          \
+	nouveau_subdev_create_((p), (e), (o), 0, "PTIMER", "timer",            \
+			       sizeof(**d), (void **)d)
+#define nouveau_timer_destroy(p)                                               \
+	nouveau_subdev_destroy(&(p)->base)
+#define nouveau_timer_init(p)                                                  \
+	nouveau_subdev_init(&(p)->base)
+#define nouveau_timer_fini(p,s)                                                \
+	nouveau_subdev_fini(&(p)->base, (s))
+
+int nouveau_timer_create_(struct nouveau_object *, struct nouveau_engine *,
+			  struct nouveau_oclass *, int size, void **);
+
+extern struct nouveau_oclass nv04_timer_oclass;
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/vga.h b/drivers/gpu/drm/nouveau/core/include/subdev/vga.h
new file mode 100644
index 000000000000..fee09ad818e4
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/vga.h
@@ -0,0 +1,30 @@
+#ifndef __NOUVEAU_VGA_H__
+#define __NOUVEAU_VGA_H__
+
+#include <core/os.h>
+
+/* access to various legacy io ports */
+u8   nv_rdport(void *obj, int head, u16 port);
+void nv_wrport(void *obj, int head, u16 port, u8 value);
+
+/* VGA Sequencer */
+u8   nv_rdvgas(void *obj, int head, u8 index);
+void nv_wrvgas(void *obj, int head, u8 index, u8 value);
+
+/* VGA Graphics */
+u8   nv_rdvgag(void *obj, int head, u8 index);
+void nv_wrvgag(void *obj, int head, u8 index, u8 value);
+
+/* VGA CRTC */
+u8   nv_rdvgac(void *obj, int head, u8 index);
+void nv_wrvgac(void *obj, int head, u8 index, u8 value);
+
+/* VGA indexed port access dispatcher */
+u8   nv_rdvgai(void *obj, int head, u16 port, u8 index);
+void nv_wrvgai(void *obj, int head, u16 port, u8 index, u8 value);
+
+bool nv_lockvgac(void *obj, bool lock);
+u8   nv_rdvgaowner(void *obj);
+void nv_wrvgaowner(void *obj, u8);
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_vm.h b/drivers/gpu/drm/nouveau/core/include/subdev/vm.h
index 3cdf6001d635..9d595efe667a 100644
--- a/drivers/gpu/drm/nouveau/nouveau_vm.h
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/vm.h
@@ -25,10 +25,10 @@
 #ifndef __NOUVEAU_VM_H__
 #define __NOUVEAU_VM_H__
 
-#include <drm/drmP.h>
-
-#include "nouveau_drv.h"
-#include "nouveau_mm.h"
+#include <core/object.h>
+#include <core/subdev.h>
+#include <core/device.h>
+#include <core/mm.h>
 
 struct nouveau_vm_pgt {
 	struct nouveau_gpuobj *obj[2];
@@ -40,6 +40,9 @@ struct nouveau_vm_pgd {
 	struct nouveau_gpuobj *obj;
 };
 
+struct nouveau_gpuobj;
+struct nouveau_mem;
+
 struct nouveau_vma {
 	struct list_head head;
 	int refcount;
@@ -50,21 +53,30 @@ struct nouveau_vma {
 };
 
 struct nouveau_vm {
-	struct drm_device *dev;
+	struct nouveau_vmmgr *vmm;
 	struct nouveau_mm mm;
 	int refcount;
 
 	struct list_head pgd_list;
-	atomic_t engref[16];
+	atomic_t engref[64]; //NVDEV_SUBDEV_NR];
 
 	struct nouveau_vm_pgt *pgt;
 	u32 fpde;
 	u32 lpde;
+};
+
+struct nouveau_vmmgr {
+	struct nouveau_subdev base;
 
+	u64 limit;
+	u8  dma_bits;
 	u32 pgt_bits;
 	u8  spg_shift;
 	u8  lpg_shift;
 
+	int  (*create)(struct nouveau_vmmgr *, u64 offset, u64 length,
+		       u64 mm_offset, struct nouveau_vm **);
+
 	void (*map_pgt)(struct nouveau_gpuobj *pgd, u32 pde,
 			struct nouveau_gpuobj *pgt[2]);
 	void (*map)(struct nouveau_vma *, struct nouveau_gpuobj *,
@@ -72,16 +84,47 @@ struct nouveau_vm {
 		    u64 phys, u64 delta);
 	void (*map_sg)(struct nouveau_vma *, struct nouveau_gpuobj *,
 		       struct nouveau_mem *, u32 pte, u32 cnt, dma_addr_t *);
-
-	void (*map_sg_table)(struct nouveau_vma *, struct nouveau_gpuobj *,
-			     struct nouveau_mem *, u32 pte, u32 cnt, dma_addr_t *);
 	void (*unmap)(struct nouveau_gpuobj *pgt, u32 pte, u32 cnt);
 	void (*flush)(struct nouveau_vm *);
 };
 
-/* nouveau_vm.c */
-int  nouveau_vm_new(struct drm_device *, u64 offset, u64 length, u64 mm_offset,
+static inline struct nouveau_vmmgr *
+nouveau_vmmgr(void *obj)
+{
+	return (void *)nv_device(obj)->subdev[NVDEV_SUBDEV_VM];
+}
+
+#define nouveau_vmmgr_create(p,e,o,i,f,d)                                      \
+	nouveau_subdev_create((p), (e), (o), 0, (i), (f), (d))
+#define nouveau_vmmgr_destroy(p)                                               \
+	nouveau_subdev_destroy(&(p)->base)
+#define nouveau_vmmgr_init(p)                                                  \
+	nouveau_subdev_init(&(p)->base)
+#define nouveau_vmmgr_fini(p,s)                                                \
+	nouveau_subdev_fini(&(p)->base, (s))
+
+#define _nouveau_vmmgr_dtor _nouveau_subdev_dtor
+#define _nouveau_vmmgr_init _nouveau_subdev_init
+#define _nouveau_vmmgr_fini _nouveau_subdev_fini
+
+extern struct nouveau_oclass nv04_vmmgr_oclass;
+extern struct nouveau_oclass nv41_vmmgr_oclass;
+extern struct nouveau_oclass nv44_vmmgr_oclass;
+extern struct nouveau_oclass nv50_vmmgr_oclass;
+extern struct nouveau_oclass nvc0_vmmgr_oclass;
+
+int  nv04_vm_create(struct nouveau_vmmgr *, u64, u64, u64,
 		    struct nouveau_vm **);
+void nv04_vmmgr_dtor(struct nouveau_object *);
+
+void nv50_vm_flush_engine(struct nouveau_subdev *, int engine);
+void nvc0_vm_flush_engine(struct nouveau_subdev *, u64 addr, int type);
+
+/* nouveau_vm.c */
+int  nouveau_vm_create(struct nouveau_vmmgr *, u64 offset, u64 length,
+		       u64 mm_offset, u32 block, struct nouveau_vm **);
+int  nouveau_vm_new(struct nouveau_device *, u64 offset, u64 length,
+		    u64 mm_offset, struct nouveau_vm **);
 int  nouveau_vm_ref(struct nouveau_vm *, struct nouveau_vm **,
 		    struct nouveau_gpuobj *pgd);
 int  nouveau_vm_get(struct nouveau_vm *, u64 size, u32 page_shift,
@@ -94,26 +137,6 @@ void nouveau_vm_unmap_at(struct nouveau_vma *, u64 offset, u64 length);
 void nouveau_vm_map_sg(struct nouveau_vma *, u64 offset, u64 length,
 		       struct nouveau_mem *);
 void nouveau_vm_map_sg_table(struct nouveau_vma *vma, u64 delta, u64 length,
-			     struct nouveau_mem *mem);
-/* nv50_vm.c */
-void nv50_vm_map_pgt(struct nouveau_gpuobj *pgd, u32 pde,
-		     struct nouveau_gpuobj *pgt[2]);
-void nv50_vm_map(struct nouveau_vma *, struct nouveau_gpuobj *,
-		 struct nouveau_mem *, u32 pte, u32 cnt, u64 phys, u64 delta);
-void nv50_vm_map_sg(struct nouveau_vma *, struct nouveau_gpuobj *,
-		    struct nouveau_mem *, u32 pte, u32 cnt, dma_addr_t *);
-void nv50_vm_unmap(struct nouveau_gpuobj *, u32 pte, u32 cnt);
-void nv50_vm_flush(struct nouveau_vm *);
-void nv50_vm_flush_engine(struct drm_device *, int engine);
-
-/* nvc0_vm.c */
-void nvc0_vm_map_pgt(struct nouveau_gpuobj *pgd, u32 pde,
-		     struct nouveau_gpuobj *pgt[2]);
-void nvc0_vm_map(struct nouveau_vma *, struct nouveau_gpuobj *,
-		 struct nouveau_mem *, u32 pte, u32 cnt, u64 phys, u64 delta);
-void nvc0_vm_map_sg(struct nouveau_vma *, struct nouveau_gpuobj *,
-		    struct nouveau_mem *, u32 pte, u32 cnt, dma_addr_t *);
-void nvc0_vm_unmap(struct nouveau_gpuobj *, u32 pte, u32 cnt);
-void nvc0_vm_flush(struct nouveau_vm *);
+		     struct nouveau_mem *mem);
 
 #endif
diff --git a/drivers/gpu/drm/nouveau/core/os.h b/drivers/gpu/drm/nouveau/core/os.h
new file mode 100644
index 000000000000..cfe3b9cad156
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/os.h
@@ -0,0 +1,47 @@
+#ifndef __NOUVEAU_OS_H__
+#define __NOUVEAU_OS_H__
+
+#include <linux/types.h>
+#include <linux/slab.h>
+#include <linux/mutex.h>
+#include <linux/pci.h>
+#include <linux/printk.h>
+#include <linux/bitops.h>
+#include <linux/firmware.h>
+#include <linux/module.h>
+#include <linux/i2c.h>
+#include <linux/i2c-algo-bit.h>
+#include <linux/delay.h>
+#include <linux/io-mapping.h>
+#include <linux/vmalloc.h>
+#include <linux/acpi.h>
+#include <linux/dmi.h>
+
+#include <asm/unaligned.h>
+
+static inline int
+ffsll(u64 mask)
+{
+	int i;
+	for (i = 0; i < 64; i++) {
+		if (mask & (1ULL << i))
+			return i + 1;
+	}
+	return 0;
+}
+
+#ifndef ioread32_native
+#ifdef __BIG_ENDIAN
+#define ioread16_native ioread16be
+#define iowrite16_native iowrite16be
+#define ioread32_native  ioread32be
+#define iowrite32_native iowrite32be
+#else /* def __BIG_ENDIAN */
+#define ioread16_native ioread16
+#define iowrite16_native iowrite16
+#define ioread32_native  ioread32
+#define iowrite32_native iowrite32
+#endif /* def __BIG_ENDIAN else */
+#endif /* !ioread32_native */
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bar/base.c b/drivers/gpu/drm/nouveau/core/subdev/bar/base.c
new file mode 100644
index 000000000000..cd01c533007a
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/bar/base.c
@@ -0,0 +1,135 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <core/object.h>
+#include <subdev/bar.h>
+
+struct nouveau_barobj {
+	struct nouveau_object base;
+	struct nouveau_vma vma;
+	void __iomem *iomem;
+};
+
+static int
+nouveau_barobj_ctor(struct nouveau_object *parent,
+		    struct nouveau_object *engine,
+		    struct nouveau_oclass *oclass, void *mem, u32 size,
+		    struct nouveau_object **pobject)
+{
+	struct nouveau_bar *bar = (void *)engine;
+	struct nouveau_barobj *barobj;
+	int ret;
+
+	ret = nouveau_object_create(parent, engine, oclass, 0, &barobj);
+	*pobject = nv_object(barobj);
+	if (ret)
+		return ret;
+
+	ret = bar->kmap(bar, mem, NV_MEM_ACCESS_RW, &barobj->vma);
+	if (ret)
+		return ret;
+
+	barobj->iomem = bar->iomem + (u32)barobj->vma.offset;
+	return 0;
+}
+
+static void
+nouveau_barobj_dtor(struct nouveau_object *object)
+{
+	struct nouveau_bar *bar = (void *)object->engine;
+	struct nouveau_barobj *barobj = (void *)object;
+	if (barobj->vma.node)
+		bar->unmap(bar, &barobj->vma);
+	nouveau_object_destroy(&barobj->base);
+}
+
+static u32
+nouveau_barobj_rd32(struct nouveau_object *object, u32 addr)
+{
+	struct nouveau_barobj *barobj = (void *)object;
+	return ioread32_native(barobj->iomem + addr);
+}
+
+static void
+nouveau_barobj_wr32(struct nouveau_object *object, u32 addr, u32 data)
+{
+	struct nouveau_barobj *barobj = (void *)object;
+	iowrite32_native(data, barobj->iomem + addr);
+}
+
+static struct nouveau_oclass
+nouveau_barobj_oclass = {
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nouveau_barobj_ctor,
+		.dtor = nouveau_barobj_dtor,
+		.init = nouveau_object_init,
+		.fini = nouveau_object_fini,
+		.rd32 = nouveau_barobj_rd32,
+		.wr32 = nouveau_barobj_wr32,
+	},
+};
+
+int
+nouveau_bar_alloc(struct nouveau_bar *bar, struct nouveau_object *parent,
+		  struct nouveau_mem *mem, struct nouveau_object **pobject)
+{
+	struct nouveau_object *engine = nv_object(bar);
+	return nouveau_object_ctor(parent, engine, &nouveau_barobj_oclass,
+				   mem, 0, pobject);
+}
+
+int
+nouveau_bar_create_(struct nouveau_object *parent,
+		    struct nouveau_object *engine,
+		    struct nouveau_oclass *oclass, int length, void **pobject)
+{
+	struct nouveau_device *device = nv_device(parent);
+	struct nouveau_bar *bar;
+	int ret;
+
+	ret = nouveau_subdev_create_(parent, engine, oclass, 0, "BARCTL",
+				     "bar", length, pobject);
+	bar = *pobject;
+	if (ret)
+		return ret;
+
+	bar->iomem = ioremap(pci_resource_start(device->pdev, 3),
+			     pci_resource_len(device->pdev, 3));
+	return 0;
+}
+
+void
+nouveau_bar_destroy(struct nouveau_bar *bar)
+{
+	if (bar->iomem)
+		iounmap(bar->iomem);
+	nouveau_subdev_destroy(&bar->base);
+}
+
+void
+_nouveau_bar_dtor(struct nouveau_object *object)
+{
+	struct nouveau_bar *bar = (void *)object;
+	nouveau_bar_destroy(bar);
+}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bar/nv50.c b/drivers/gpu/drm/nouveau/core/subdev/bar/nv50.c
new file mode 100644
index 000000000000..c3acf5b70d9e
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/bar/nv50.c
@@ -0,0 +1,263 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <core/gpuobj.h>
+
+#include <subdev/timer.h>
+#include <subdev/bar.h>
+#include <subdev/fb.h>
+#include <subdev/vm.h>
+
+struct nv50_bar_priv {
+	struct nouveau_bar base;
+	spinlock_t lock;
+	struct nouveau_gpuobj *mem;
+	struct nouveau_gpuobj *pad;
+	struct nouveau_gpuobj *pgd;
+	struct nouveau_vm *bar1_vm;
+	struct nouveau_gpuobj *bar1;
+	struct nouveau_vm *bar3_vm;
+	struct nouveau_gpuobj *bar3;
+};
+
+static int
+nv50_bar_kmap(struct nouveau_bar *bar, struct nouveau_mem *mem,
+	      u32 flags, struct nouveau_vma *vma)
+{
+	struct nv50_bar_priv *priv = (void *)bar;
+	int ret;
+
+	ret = nouveau_vm_get(priv->bar3_vm, mem->size << 12, 12, flags, vma);
+	if (ret)
+		return ret;
+
+	nouveau_vm_map(vma, mem);
+	nv50_vm_flush_engine(nv_subdev(bar), 6);
+	return 0;
+}
+
+static int
+nv50_bar_umap(struct nouveau_bar *bar, struct nouveau_mem *mem,
+	      u32 flags, struct nouveau_vma *vma)
+{
+	struct nv50_bar_priv *priv = (void *)bar;
+	int ret;
+
+	ret = nouveau_vm_get(priv->bar1_vm, mem->size << 12, 12, flags, vma);
+	if (ret)
+		return ret;
+
+	nouveau_vm_map(vma, mem);
+	nv50_vm_flush_engine(nv_subdev(bar), 6);
+	return 0;
+}
+
+static void
+nv50_bar_unmap(struct nouveau_bar *bar, struct nouveau_vma *vma)
+{
+	nouveau_vm_unmap(vma);
+	nv50_vm_flush_engine(nv_subdev(bar), 6);
+	nouveau_vm_put(vma);
+}
+
+static void
+nv50_bar_flush(struct nouveau_bar *bar)
+{
+	struct nv50_bar_priv *priv = (void *)bar;
+	unsigned long flags;
+	spin_lock_irqsave(&priv->lock, flags);
+	nv_wr32(priv, 0x00330c, 0x00000001);
+	if (!nv_wait(priv, 0x00330c, 0x00000002, 0x00000000))
+		nv_warn(priv, "flush timeout\n");
+	spin_unlock_irqrestore(&priv->lock, flags);
+}
+
+void
+nv84_bar_flush(struct nouveau_bar *bar)
+{
+	struct nv50_bar_priv *priv = (void *)bar;
+	unsigned long flags;
+	spin_lock_irqsave(&priv->lock, flags);
+	nv_wr32(bar, 0x070000, 0x00000001);
+	if (!nv_wait(priv, 0x070000, 0x00000002, 0x00000000))
+		nv_warn(priv, "flush timeout\n");
+	spin_unlock_irqrestore(&priv->lock, flags);
+}
+
+static int
+nv50_bar_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+	      struct nouveau_oclass *oclass, void *data, u32 size,
+	      struct nouveau_object **pobject)
+{
+	struct nouveau_device *device = nv_device(parent);
+	struct nouveau_object *heap;
+	struct nouveau_vm *vm;
+	struct nv50_bar_priv *priv;
+	u64 start, limit;
+	int ret;
+
+	ret = nouveau_bar_create(parent, engine, oclass, &priv);
+	*pobject = nv_object(priv);
+	if (ret)
+		return ret;
+
+	ret = nouveau_gpuobj_new(parent, NULL, 0x20000, 0, NVOBJ_FLAG_HEAP,
+				&priv->mem);
+	heap = nv_object(priv->mem);
+	if (ret)
+		return ret;
+
+	ret = nouveau_gpuobj_new(parent, heap, (device->chipset == 0x50) ?
+				 0x1400 : 0x0200, 0, 0, &priv->pad);
+	if (ret)
+		return ret;
+
+	ret = nouveau_gpuobj_new(parent, heap, 0x4000, 0, 0, &priv->pgd);
+	if (ret)
+		return ret;
+
+	/* BAR3 */
+	start = 0x0100000000ULL;
+	limit = start + pci_resource_len(device->pdev, 3);
+
+	ret = nouveau_vm_new(device, start, limit, start, &vm);
+	if (ret)
+		return ret;
+
+	ret = nouveau_gpuobj_new(parent, heap, ((limit-- - start) >> 12) * 8,
+				 0x1000, NVOBJ_FLAG_ZERO_ALLOC,
+				 &vm->pgt[0].obj[0]);
+	vm->pgt[0].refcount[0] = 1;
+	if (ret)
+		return ret;
+
+	ret = nouveau_vm_ref(vm, &priv->bar3_vm, priv->pgd);
+	nouveau_vm_ref(NULL, &vm, NULL);
+	if (ret)
+		return ret;
+
+	ret = nouveau_gpuobj_new(parent, heap, 24, 16, 0, &priv->bar3);
+	if (ret)
+		return ret;
+
+	nv_wo32(priv->bar3, 0x00, 0x7fc00000);
+	nv_wo32(priv->bar3, 0x04, lower_32_bits(limit));
+	nv_wo32(priv->bar3, 0x08, lower_32_bits(start));
+	nv_wo32(priv->bar3, 0x0c, upper_32_bits(limit) << 24 |
+				  upper_32_bits(start));
+	nv_wo32(priv->bar3, 0x10, 0x00000000);
+	nv_wo32(priv->bar3, 0x14, 0x00000000);
+
+	/* BAR1 */
+	start = 0x0000000000ULL;
+	limit = start + pci_resource_len(device->pdev, 1);
+
+	ret = nouveau_vm_new(device, start, limit--, start, &vm);
+	if (ret)
+		return ret;
+
+	ret = nouveau_vm_ref(vm, &priv->bar1_vm, priv->pgd);
+	nouveau_vm_ref(NULL, &vm, NULL);
+	if (ret)
+		return ret;
+
+	ret = nouveau_gpuobj_new(parent, heap, 24, 16, 0, &priv->bar1);
+	if (ret)
+		return ret;
+
+	nv_wo32(priv->bar1, 0x00, 0x7fc00000);
+	nv_wo32(priv->bar1, 0x04, lower_32_bits(limit));
+	nv_wo32(priv->bar1, 0x08, lower_32_bits(start));
+	nv_wo32(priv->bar1, 0x0c, upper_32_bits(limit) << 24 |
+				  upper_32_bits(start));
+	nv_wo32(priv->bar1, 0x10, 0x00000000);
+	nv_wo32(priv->bar1, 0x14, 0x00000000);
+
+	priv->base.alloc = nouveau_bar_alloc;
+	priv->base.kmap = nv50_bar_kmap;
+	priv->base.umap = nv50_bar_umap;
+	priv->base.unmap = nv50_bar_unmap;
+	if (device->chipset == 0x50)
+		priv->base.flush = nv50_bar_flush;
+	else
+		priv->base.flush = nv84_bar_flush;
+	spin_lock_init(&priv->lock);
+	return 0;
+}
+
+static void
+nv50_bar_dtor(struct nouveau_object *object)
+{
+	struct nv50_bar_priv *priv = (void *)object;
+	nouveau_gpuobj_ref(NULL, &priv->bar1);
+	nouveau_vm_ref(NULL, &priv->bar1_vm, priv->pgd);
+	nouveau_gpuobj_ref(NULL, &priv->bar3);
+	if (priv->bar3_vm) {
+		nouveau_gpuobj_ref(NULL, &priv->bar3_vm->pgt[0].obj[0]);
+		nouveau_vm_ref(NULL, &priv->bar3_vm, priv->pgd);
+	}
+	nouveau_gpuobj_ref(NULL, &priv->pgd);
+	nouveau_gpuobj_ref(NULL, &priv->pad);
+	nouveau_gpuobj_ref(NULL, &priv->mem);
+	nouveau_bar_destroy(&priv->base);
+}
+
+static int
+nv50_bar_init(struct nouveau_object *object)
+{
+	struct nv50_bar_priv *priv = (void *)object;
+	int ret;
+
+	ret = nouveau_bar_init(&priv->base);
+	if (ret)
+		return ret;
+
+	nv_mask(priv, 0x000200, 0x00000100, 0x00000000);
+	nv_mask(priv, 0x000200, 0x00000100, 0x00000100);
+	nv50_vm_flush_engine(nv_subdev(priv), 6);
+
+	nv_wr32(priv, 0x001704, 0x00000000 | priv->mem->addr >> 12);
+	nv_wr32(priv, 0x001704, 0x40000000 | priv->mem->addr >> 12);
+	nv_wr32(priv, 0x001708, 0x80000000 | priv->bar1->node->offset >> 4);
+	nv_wr32(priv, 0x00170c, 0x80000000 | priv->bar3->node->offset >> 4);
+	return 0;
+}
+
+static int
+nv50_bar_fini(struct nouveau_object *object, bool suspend)
+{
+	struct nv50_bar_priv *priv = (void *)object;
+	return nouveau_bar_fini(&priv->base, suspend);
+}
+
+struct nouveau_oclass
+nv50_bar_oclass = {
+	.handle = NV_SUBDEV(BAR, 0x50),
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nv50_bar_ctor,
+		.dtor = nv50_bar_dtor,
+		.init = nv50_bar_init,
+		.fini = nv50_bar_fini,
+	},
+};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bar/nvc0.c b/drivers/gpu/drm/nouveau/core/subdev/bar/nvc0.c
new file mode 100644
index 000000000000..77a6fb725d3f
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/bar/nvc0.c
@@ -0,0 +1,215 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <core/gpuobj.h>
+
+#include <subdev/timer.h>
+#include <subdev/bar.h>
+#include <subdev/fb.h>
+#include <subdev/vm.h>
+
+struct nvc0_bar_priv {
+	struct nouveau_bar base;
+	spinlock_t lock;
+	struct {
+		struct nouveau_gpuobj *mem;
+		struct nouveau_gpuobj *pgd;
+		struct nouveau_vm *vm;
+	} bar[2];
+};
+
+static int
+nvc0_bar_kmap(struct nouveau_bar *bar, struct nouveau_mem *mem,
+	      u32 flags, struct nouveau_vma *vma)
+{
+	struct nvc0_bar_priv *priv = (void *)bar;
+	int ret;
+
+	ret = nouveau_vm_get(priv->bar[0].vm, mem->size << 12, 12, flags, vma);
+	if (ret)
+		return ret;
+
+	nouveau_vm_map(vma, mem);
+	nvc0_vm_flush_engine(nv_subdev(bar), priv->bar[0].pgd->addr, 5);
+	return 0;
+}
+
+static int
+nvc0_bar_umap(struct nouveau_bar *bar, struct nouveau_mem *mem,
+	      u32 flags, struct nouveau_vma *vma)
+{
+	struct nvc0_bar_priv *priv = (void *)bar;
+	int ret;
+
+	ret = nouveau_vm_get(priv->bar[1].vm, mem->size << 12,
+			     mem->page_shift, flags, vma);
+	if (ret)
+		return ret;
+
+	nouveau_vm_map(vma, mem);
+	nvc0_vm_flush_engine(nv_subdev(bar), priv->bar[1].pgd->addr, 5);
+	return 0;
+}
+
+static void
+nvc0_bar_unmap(struct nouveau_bar *bar, struct nouveau_vma *vma)
+{
+	struct nvc0_bar_priv *priv = (void *)bar;
+	int i = !(vma->vm == priv->bar[0].vm);
+
+	nouveau_vm_unmap(vma);
+	nvc0_vm_flush_engine(nv_subdev(bar), priv->bar[i].pgd->addr, 5);
+	nouveau_vm_put(vma);
+}
+
+static int
+nvc0_bar_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+	      struct nouveau_oclass *oclass, void *data, u32 size,
+	      struct nouveau_object **pobject)
+{
+	struct nouveau_device *device = nv_device(parent);
+	struct pci_dev *pdev = device->pdev;
+	struct nvc0_bar_priv *priv;
+	struct nouveau_gpuobj *mem;
+	struct nouveau_vm *vm;
+	int ret;
+
+	ret = nouveau_bar_create(parent, engine, oclass, &priv);
+	*pobject = nv_object(priv);
+	if (ret)
+		return ret;
+
+	/* BAR3 */
+	ret = nouveau_gpuobj_new(parent, NULL, 0x1000, 0, 0, &priv->bar[0].mem);
+	mem = priv->bar[0].mem;
+	if (ret)
+		return ret;
+
+	ret = nouveau_gpuobj_new(parent, NULL, 0x8000, 0, 0, &priv->bar[0].pgd);
+	if (ret)
+		return ret;
+
+	ret = nouveau_vm_new(device, 0, pci_resource_len(pdev, 3), 0, &vm);
+	if (ret)
+		return ret;
+
+	ret = nouveau_gpuobj_new(parent, NULL,
+				 (pci_resource_len(pdev, 3) >> 12) * 8,
+				 0x1000, NVOBJ_FLAG_ZERO_ALLOC,
+				 &vm->pgt[0].obj[0]);
+	vm->pgt[0].refcount[0] = 1;
+	if (ret)
+		return ret;
+
+	ret = nouveau_vm_ref(vm, &priv->bar[0].vm, priv->bar[0].pgd);
+	nouveau_vm_ref(NULL, &vm, NULL);
+	if (ret)
+		return ret;
+
+	nv_wo32(mem, 0x0200, lower_32_bits(priv->bar[0].pgd->addr));
+	nv_wo32(mem, 0x0204, upper_32_bits(priv->bar[0].pgd->addr));
+	nv_wo32(mem, 0x0208, lower_32_bits(pci_resource_len(pdev, 3) - 1));
+	nv_wo32(mem, 0x020c, upper_32_bits(pci_resource_len(pdev, 3) - 1));
+
+	/* BAR1 */
+	ret = nouveau_gpuobj_new(parent, NULL, 0x1000, 0, 0, &priv->bar[1].mem);
+	mem = priv->bar[1].mem;
+	if (ret)
+		return ret;
+
+	ret = nouveau_gpuobj_new(parent, NULL, 0x8000, 0, 0, &priv->bar[1].pgd);
+	if (ret)
+		return ret;
+
+	ret = nouveau_vm_new(device, 0, pci_resource_len(pdev, 1), 0, &vm);
+	if (ret)
+		return ret;
+
+	ret = nouveau_vm_ref(vm, &priv->bar[1].vm, priv->bar[1].pgd);
+	nouveau_vm_ref(NULL, &vm, NULL);
+	if (ret)
+		return ret;
+
+	nv_wo32(mem, 0x0200, lower_32_bits(priv->bar[1].pgd->addr));
+	nv_wo32(mem, 0x0204, upper_32_bits(priv->bar[1].pgd->addr));
+	nv_wo32(mem, 0x0208, lower_32_bits(pci_resource_len(pdev, 1) - 1));
+	nv_wo32(mem, 0x020c, upper_32_bits(pci_resource_len(pdev, 1) - 1));
+
+	priv->base.alloc = nouveau_bar_alloc;
+	priv->base.kmap = nvc0_bar_kmap;
+	priv->base.umap = nvc0_bar_umap;
+	priv->base.unmap = nvc0_bar_unmap;
+	priv->base.flush = nv84_bar_flush;
+	spin_lock_init(&priv->lock);
+	return 0;
+}
+
+static void
+nvc0_bar_dtor(struct nouveau_object *object)
+{
+	struct nvc0_bar_priv *priv = (void *)object;
+
+	nouveau_vm_ref(NULL, &priv->bar[1].vm, priv->bar[1].pgd);
+	nouveau_gpuobj_ref(NULL, &priv->bar[1].pgd);
+	nouveau_gpuobj_ref(NULL, &priv->bar[1].mem);
+
+	if (priv->bar[0].vm) {
+		nouveau_gpuobj_ref(NULL, &priv->bar[0].vm->pgt[0].obj[0]);
+		nouveau_vm_ref(NULL, &priv->bar[0].vm, priv->bar[0].pgd);
+	}
+	nouveau_gpuobj_ref(NULL, &priv->bar[0].pgd);
+	nouveau_gpuobj_ref(NULL, &priv->bar[0].mem);
+
+	nouveau_bar_destroy(&priv->base);
+}
+
+static int
+nvc0_bar_init(struct nouveau_object *object)
+{
+	struct nvc0_bar_priv *priv = (void *)object;
+	int ret;
+
+	ret = nouveau_bar_init(&priv->base);
+	if (ret)
+		return ret;
+
+	nv_mask(priv, 0x000200, 0x00000100, 0x00000000);
+	nv_mask(priv, 0x000200, 0x00000100, 0x00000100);
+	nv_mask(priv, 0x100c80, 0x00000001, 0x00000000);
+
+	nv_wr32(priv, 0x001704, 0x80000000 | priv->bar[1].mem->addr >> 12);
+	nv_wr32(priv, 0x001714, 0xc0000000 | priv->bar[0].mem->addr >> 12);
+	return 0;
+}
+
+struct nouveau_oclass
+nvc0_bar_oclass = {
+	.handle = NV_SUBDEV(BAR, 0xc0),
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nvc0_bar_ctor,
+		.dtor = nvc0_bar_dtor,
+		.init = nvc0_bar_init,
+		.fini = _nouveau_bar_fini,
+	},
+};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/base.c b/drivers/gpu/drm/nouveau/core/subdev/bios/base.c
new file mode 100644
index 000000000000..2fbb6df697cd
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/bios/base.c
@@ -0,0 +1,479 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <core/object.h>
+#include <core/device.h>
+#include <core/subdev.h>
+#include <core/option.h>
+
+#include <subdev/bios.h>
+#include <subdev/bios/bmp.h>
+#include <subdev/bios/bit.h>
+
+u8
+nvbios_checksum(const u8 *data, int size)
+{
+	u8 sum = 0;
+	while (size--)
+		sum += *data++;
+	return sum;
+}
+
+u16
+nvbios_findstr(const u8 *data, int size, const char *str, int len)
+{
+	int i, j;
+
+	for (i = 0; i <= (size - len); i++) {
+		for (j = 0; j < len; j++)
+			if ((char)data[i + j] != str[j])
+				break;
+		if (j == len)
+			return i;
+	}
+
+	return 0;
+}
+
+#if defined(__powerpc__)
+static void
+nouveau_bios_shadow_of(struct nouveau_bios *bios)
+{
+	struct pci_dev *pdev = nv_device(bios)->pdev;
+	struct device_node *dn;
+	const u32 *data;
+	int size, i;
+
+	dn = pci_device_to_OF_node(pdev);
+	if (!dn) {
+		nv_info(bios, "Unable to get the OF node\n");
+		return;
+	}
+
+	data = of_get_property(dn, "NVDA,BMP", &size);
+	if (data) {
+		bios->size = size;
+		bios->data = kmalloc(bios->size, GFP_KERNEL);
+		if (bios->data)
+			memcpy(bios->data, data, size);
+	}
+}
+#endif
+
+static void
+nouveau_bios_shadow_pramin(struct nouveau_bios *bios)
+{
+	struct nouveau_device *device = nv_device(bios);
+	u32 bar0 = 0;
+	int i;
+
+	if (device->card_type >= NV_50) {
+		u64 addr = (u64)(nv_rd32(bios, 0x619f04) & 0xffffff00) << 8;
+		if (!addr) {
+			addr  = (u64)nv_rd32(bios, 0x001700) << 16;
+			addr += 0xf0000;
+		}
+
+		bar0 = nv_mask(bios, 0x001700, 0xffffffff, addr >> 16);
+	}
+
+	/* bail if no rom signature */
+	if (nv_rd08(bios, 0x700000) != 0x55 ||
+	    nv_rd08(bios, 0x700001) != 0xaa)
+		goto out;
+
+	bios->size = nv_rd08(bios, 0x700002) * 512;
+	bios->data = kmalloc(bios->size, GFP_KERNEL);
+	if (bios->data) {
+		for (i = 0; i < bios->size; i++)
+			nv_wo08(bios, i, nv_rd08(bios, 0x700000 + i));
+	}
+
+out:
+	if (device->card_type >= NV_50)
+		nv_wr32(bios, 0x001700, bar0);
+}
+
+static void
+nouveau_bios_shadow_prom(struct nouveau_bios *bios)
+{
+	struct nouveau_device *device = nv_device(bios);
+	u32 pcireg, access;
+	u16 pcir;
+	int i;
+
+	/* enable access to rom */
+	if (device->card_type >= NV_50)
+		pcireg = 0x088050;
+	else
+		pcireg = 0x001850;
+	access = nv_mask(bios, pcireg, 0x00000001, 0x00000000);
+
+	/* bail if no rom signature, with a workaround for a PROM reading
+	 * issue on some chipsets.  the first read after a period of
+	 * inactivity returns the wrong result, so retry the first header
+	 * byte a few times before giving up as a workaround
+	 */
+	i = 16;
+	do {
+		if (nv_rd08(bios, 0x300000) == 0x55)
+			break;
+	} while (i--);
+
+	if (!i || nv_rd08(bios, 0x300001) != 0xaa)
+		goto out;
+
+	/* additional check (see note below) - read PCI record header */
+	pcir = nv_rd08(bios, 0x300018) |
+	       nv_rd08(bios, 0x300019) << 8;
+	if (nv_rd08(bios, 0x300000 + pcir) != 'P' ||
+	    nv_rd08(bios, 0x300001 + pcir) != 'C' ||
+	    nv_rd08(bios, 0x300002 + pcir) != 'I' ||
+	    nv_rd08(bios, 0x300003 + pcir) != 'R')
+		goto out;
+
+	/* read entire bios image to system memory */
+	bios->size = nv_rd08(bios, 0x300002) * 512;
+	bios->data = kmalloc(bios->size, GFP_KERNEL);
+	if (bios->data) {
+		for (i = 0; i < bios->size; i++)
+			nv_wo08(bios, i, nv_rd08(bios, 0x300000 + i));
+	}
+
+out:
+	/* disable access to rom */
+	nv_wr32(bios, pcireg, access);
+}
+
+#if defined(CONFIG_ACPI)
+int nouveau_acpi_get_bios_chunk(uint8_t *bios, int offset, int len);
+bool nouveau_acpi_rom_supported(struct pci_dev *pdev);
+#else
+static inline bool
+nouveau_acpi_rom_supported(struct pci_dev *pdev) {
+	return false;
+}
+
+static inline int
+nouveau_acpi_get_bios_chunk(uint8_t *bios, int offset, int len) {
+	return -EINVAL;
+}
+#endif
+
+static void
+nouveau_bios_shadow_acpi(struct nouveau_bios *bios)
+{
+	struct pci_dev *pdev = nv_device(bios)->pdev;
+	int cnt = 65536 / 4096;
+	int ret;
+
+	if (!nouveau_acpi_rom_supported(pdev))
+		return;
+
+	bios->data = kmalloc(65536, GFP_KERNEL);
+	bios->size = 0;
+	if (!bios->data)
+		return;
+
+	while (cnt--) {
+		ret = nouveau_acpi_get_bios_chunk(bios->data, bios->size, 4096);
+		if (ret != 4096)
+			return;
+
+		bios->size += 4096;
+	}
+}
+
+static void
+nouveau_bios_shadow_pci(struct nouveau_bios *bios)
+{
+	struct pci_dev *pdev = nv_device(bios)->pdev;
+	size_t size;
+
+	if (!pci_enable_rom(pdev)) {
+		void __iomem *rom = pci_map_rom(pdev, &size);
+		if (rom && size) {
+			bios->data = kmalloc(size, GFP_KERNEL);
+			if (bios->data) {
+				memcpy_fromio(bios->data, rom, size);
+				bios->size = size;
+			}
+		}
+		if (rom)
+			pci_unmap_rom(pdev, rom);
+
+		pci_disable_rom(pdev);
+	}
+}
+
+static int
+nouveau_bios_score(struct nouveau_bios *bios, const bool writeable)
+{
+	if (!bios->data || bios->data[0] != 0x55 || bios->data[1] != 0xAA) {
+		nv_info(bios, "... signature not found\n");
+		return 0;
+	}
+
+	if (nvbios_checksum(bios->data, bios->data[2] * 512)) {
+		nv_info(bios, "... checksum invalid\n");
+		/* if a ro image is somewhat bad, it's probably all rubbish */
+		return writeable ? 2 : 1;
+	}
+
+	nv_info(bios, "... appears to be valid\n");
+	return 3;
+}
+
+struct methods {
+	const char desc[16];
+	void (*shadow)(struct nouveau_bios *);
+	const bool rw;
+	int score;
+	u32 size;
+	u8 *data;
+};
+
+static int
+nouveau_bios_shadow(struct nouveau_bios *bios)
+{
+	struct methods shadow_methods[] = {
+#if defined(__powerpc__)
+		{ "OpenFirmware", nouveau_bios_shadow_of, true, 0, 0, NULL },
+#endif
+		{ "PRAMIN", nouveau_bios_shadow_pramin, true, 0, 0, NULL },
+		{ "PROM", nouveau_bios_shadow_prom, false, 0, 0, NULL },
+		{ "ACPI", nouveau_bios_shadow_acpi, true, 0, 0, NULL },
+		{ "PCIROM", nouveau_bios_shadow_pci, true, 0, 0, NULL },
+		{}
+	};
+	struct methods *mthd, *best;
+	const struct firmware *fw;
+	const char *optarg;
+	int optlen, ret;
+	char *source;
+
+	optarg = nouveau_stropt(nv_device(bios)->cfgopt, "NvBios", &optlen);
+	source = optarg ? kstrndup(optarg, optlen, GFP_KERNEL) : NULL;
+	if (source) {
+		/* try to match one of the built-in methods */
+		mthd = shadow_methods;
+		do {
+			if (strcasecmp(source, mthd->desc))
+				continue;
+			nv_info(bios, "source: %s\n", mthd->desc);
+
+			mthd->shadow(bios);
+			mthd->score = nouveau_bios_score(bios, mthd->rw);
+			if (mthd->score) {
+				kfree(source);
+				return 0;
+			}
+		} while ((++mthd)->shadow);
+
+		/* attempt to load firmware image */
+		ret = request_firmware(&fw, source, &nv_device(bios)->pdev->dev);
+		if (ret == 0) {
+			bios->size = fw->size;
+			bios->data = kmemdup(fw->data, fw->size, GFP_KERNEL);
+			release_firmware(fw);
+
+			nv_info(bios, "image: %s\n", source);
+			if (nouveau_bios_score(bios, 1)) {
+				kfree(source);
+				return 0;
+			}
+
+			kfree(bios->data);
+			bios->data = NULL;
+		}
+
+		nv_error(bios, "source \'%s\' invalid\n", source);
+		kfree(source);
+	}
+
+	mthd = shadow_methods;
+	do {
+		nv_info(bios, "checking %s for image...\n", mthd->desc);
+		mthd->shadow(bios);
+		mthd->score = nouveau_bios_score(bios, mthd->rw);
+		mthd->size = bios->size;
+		mthd->data = bios->data;
+		bios->data = NULL;
+	} while (mthd->score != 3 && (++mthd)->shadow);
+
+	mthd = shadow_methods;
+	best = mthd;
+	do {
+		if (mthd->score > best->score) {
+			kfree(best->data);
+			best = mthd;
+		}
+	} while ((++mthd)->shadow);
+
+	if (best->score) {
+		nv_info(bios, "using image from %s\n", best->desc);
+		bios->size = best->size;
+		bios->data = best->data;
+		return 0;
+	}
+
+	nv_error(bios, "unable to locate usable image\n");
+	return -EINVAL;
+}
+
+static u8
+nouveau_bios_rd08(struct nouveau_object *object, u32 addr)
+{
+	struct nouveau_bios *bios = (void *)object;
+	return bios->data[addr];
+}
+
+static u16
+nouveau_bios_rd16(struct nouveau_object *object, u32 addr)
+{
+	struct nouveau_bios *bios = (void *)object;
+	return get_unaligned_le16(&bios->data[addr]);
+}
+
+static u32
+nouveau_bios_rd32(struct nouveau_object *object, u32 addr)
+{
+	struct nouveau_bios *bios = (void *)object;
+	return get_unaligned_le32(&bios->data[addr]);
+}
+
+static void
+nouveau_bios_wr08(struct nouveau_object *object, u32 addr, u8 data)
+{
+	struct nouveau_bios *bios = (void *)object;
+	bios->data[addr] = data;
+}
+
+static void
+nouveau_bios_wr16(struct nouveau_object *object, u32 addr, u16 data)
+{
+	struct nouveau_bios *bios = (void *)object;
+	put_unaligned_le16(data, &bios->data[addr]);
+}
+
+static void
+nouveau_bios_wr32(struct nouveau_object *object, u32 addr, u32 data)
+{
+	struct nouveau_bios *bios = (void *)object;
+	put_unaligned_le32(data, &bios->data[addr]);
+}
+
+static int
+nouveau_bios_ctor(struct nouveau_object *parent,
+		  struct nouveau_object *engine,
+		  struct nouveau_oclass *oclass, void *data, u32 size,
+		  struct nouveau_object **pobject)
+{
+	struct nouveau_bios *bios;
+	struct bit_entry bit_i;
+	int ret;
+
+	ret = nouveau_subdev_create(parent, engine, oclass, 0,
+				    "VBIOS", "bios", &bios);
+	*pobject = nv_object(bios);
+	if (ret)
+		return ret;
+
+	ret = nouveau_bios_shadow(bios);
+	if (ret)
+		return ret;
+
+	/* detect type of vbios we're dealing with */
+	bios->bmp_offset = nvbios_findstr(bios->data, bios->size,
+					  "\xff\x7f""NV\0", 5);
+	if (bios->bmp_offset) {
+		nv_info(bios, "BMP version %x.%x\n",
+			bmp_version(bios) >> 8,
+			bmp_version(bios) & 0xff);
+	}
+
+	bios->bit_offset = nvbios_findstr(bios->data, bios->size,
+					  "\xff\xb8""BIT", 5);
+	if (bios->bit_offset)
+		nv_info(bios, "BIT signature found\n");
+
+	/* determine the vbios version number */
+	if (!bit_entry(bios, 'i', &bit_i) && bit_i.length >= 4) {
+		bios->version.major = nv_ro08(bios, bit_i.offset + 3);
+		bios->version.chip  = nv_ro08(bios, bit_i.offset + 2);
+		bios->version.minor = nv_ro08(bios, bit_i.offset + 1);
+		bios->version.micro = nv_ro08(bios, bit_i.offset + 0);
+	} else
+	if (bmp_version(bios)) {
+		bios->version.major = nv_ro08(bios, bios->bmp_offset + 13);
+		bios->version.chip  = nv_ro08(bios, bios->bmp_offset + 12);
+		bios->version.minor = nv_ro08(bios, bios->bmp_offset + 11);
+		bios->version.micro = nv_ro08(bios, bios->bmp_offset + 10);
+	}
+
+	nv_info(bios, "version %02x.%02x.%02x.%02x\n",
+		bios->version.major, bios->version.chip,
+		bios->version.minor, bios->version.micro);
+
+	return 0;
+}
+
+static void
+nouveau_bios_dtor(struct nouveau_object *object)
+{
+	struct nouveau_bios *bios = (void *)object;
+	kfree(bios->data);
+	nouveau_subdev_destroy(&bios->base);
+}
+
+static int
+nouveau_bios_init(struct nouveau_object *object)
+{
+	struct nouveau_bios *bios = (void *)object;
+	return nouveau_subdev_init(&bios->base);
+}
+
+static int
+nouveau_bios_fini(struct nouveau_object *object, bool suspend)
+{
+	struct nouveau_bios *bios = (void *)object;
+	return nouveau_subdev_fini(&bios->base, suspend);
+}
+
+struct nouveau_oclass
+nouveau_bios_oclass = {
+	.handle = NV_SUBDEV(VBIOS, 0x00),
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nouveau_bios_ctor,
+		.dtor = nouveau_bios_dtor,
+		.init = nouveau_bios_init,
+		.fini = nouveau_bios_fini,
+		.rd08 = nouveau_bios_rd08,
+		.rd16 = nouveau_bios_rd16,
+		.rd32 = nouveau_bios_rd32,
+		.wr08 = nouveau_bios_wr08,
+		.wr16 = nouveau_bios_wr16,
+		.wr32 = nouveau_bios_wr32,
+	},
+};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/bit.c b/drivers/gpu/drm/nouveau/core/subdev/bios/bit.c
new file mode 100644
index 000000000000..1d03a3f2b2d2
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/bios/bit.c
@@ -0,0 +1,52 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include "core/object.h"
+
+#include "subdev/bios.h"
+#include "subdev/bios/bit.h"
+
+int
+bit_entry(struct nouveau_bios *bios, u8 id, struct bit_entry *bit)
+{
+	if (likely(bios->bit_offset)) {
+		u8  entries = nv_ro08(bios, bios->bit_offset + 10);
+		u32 entry   = bios->bit_offset + 12;
+		while (entries--) {
+			if (nv_ro08(bios, entry + 0) == id) {
+				bit->id      = nv_ro08(bios, entry + 0);
+				bit->version = nv_ro08(bios, entry + 1);
+				bit->length  = nv_ro16(bios, entry + 2);
+				bit->offset  = nv_ro16(bios, entry + 4);
+				return 0;
+			}
+
+			entry += nv_ro08(bios, bios->bit_offset + 9);
+		}
+
+		return -ENOENT;
+	}
+
+	return -EINVAL;
+}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/conn.c b/drivers/gpu/drm/nouveau/core/subdev/bios/conn.c
new file mode 100644
index 000000000000..5ac010efd959
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/bios/conn.c
@@ -0,0 +1,56 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <core/device.h>
+
+#include <subdev/bios.h>
+#include <subdev/bios/dcb.h>
+#include <subdev/bios/conn.h>
+
+u16
+dcb_conntab(struct nouveau_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
+{
+	u16 dcb = dcb_table(bios, ver, hdr, cnt, len);
+	if (dcb && *ver >= 0x30 && *hdr >= 0x16) {
+		u16 data = nv_ro16(bios, dcb + 0x14);
+		if (data) {
+			*ver = nv_ro08(bios, data + 0);
+			*hdr = nv_ro08(bios, data + 1);
+			*cnt = nv_ro08(bios, data + 2);
+			*len = nv_ro08(bios, data + 3);
+			return data;
+		}
+	}
+	return 0x0000;
+}
+
+u16
+dcb_conn(struct nouveau_bios *bios, u8 idx, u8 *ver, u8 *len)
+{
+	u8  hdr, cnt;
+	u16 data = dcb_conntab(bios, ver, &hdr, &cnt, len);
+	if (data && idx < cnt)
+		return data + hdr + (idx * *len);
+	return 0x0000;
+}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/dcb.c b/drivers/gpu/drm/nouveau/core/subdev/bios/dcb.c
new file mode 100644
index 000000000000..9ed6e728a94c
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/bios/dcb.c
@@ -0,0 +1,135 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include "core/device.h"
+
+#include "subdev/bios.h"
+#include "subdev/bios/dcb.h"
+
+u16
+dcb_table(struct nouveau_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
+{
+	struct nouveau_device *device = nv_device(bios);
+	u16 dcb = 0x0000;
+
+	if (device->card_type > NV_04)
+		dcb = nv_ro16(bios, 0x36);
+	if (!dcb) {
+		nv_warn(bios, "DCB table not found\n");
+		return dcb;
+	}
+
+	*ver = nv_ro08(bios, dcb);
+
+	if (*ver >= 0x41) {
+		nv_warn(bios, "DCB *ver 0x%02x unknown\n", *ver);
+		return 0x0000;
+	} else
+	if (*ver >= 0x30) {
+		if (nv_ro32(bios, dcb + 6) == 0x4edcbdcb) {
+			*hdr = nv_ro08(bios, dcb + 1);
+			*cnt = nv_ro08(bios, dcb + 2);
+			*len = nv_ro08(bios, dcb + 3);
+			return dcb;
+		}
+	} else
+	if (*ver >= 0x20) {
+		if (nv_ro32(bios, dcb + 4) == 0x4edcbdcb) {
+			u16 i2c = nv_ro16(bios, dcb + 2);
+			*hdr = 8;
+			*cnt = (i2c - dcb) / 8;
+			*len = 8;
+			return dcb;
+		}
+	} else
+	if (*ver >= 0x15) {
+		if (!nv_strncmp(bios, dcb - 7, 7, "DEV_REC")) {
+			u16 i2c = nv_ro16(bios, dcb + 2);
+			*hdr = 4;
+			*cnt = (i2c - dcb) / 10;
+			*len = 10;
+			return dcb;
+		}
+	} else {
+		/*
+		 * v1.4 (some NV15/16, NV11+) seems the same as v1.5, but
+		 * always has the same single (crt) entry, even when tv-out
+		 * present, so the conclusion is this version cannot really
+		 * be used.
+		 *
+		 * v1.2 tables (some NV6/10, and NV15+) normally have the
+		 * same 5 entries, which are not specific to the card and so
+		 * no use.
+		 *
+		 * v1.2 does have an I2C table that read_dcb_i2c_table can
+		 * handle, but cards exist (nv11 in #14821) with a bad i2c
+		 * table pointer, so use the indices parsed in
+		 * parse_bmp_structure.
+		 *
+		 * v1.1 (NV5+, maybe some NV4) is entirely unhelpful
+		 */
+		nv_warn(bios, "DCB contains no useful data\n");
+		return 0x0000;
+	}
+
+	nv_warn(bios, "DCB header validation failed\n");
+	return 0x0000;
+}
+
+u16
+dcb_outp(struct nouveau_bios *bios, u8 idx, u8 *ver, u8 *len)
+{
+	u8  hdr, cnt;
+	u16 dcb = dcb_table(bios, ver, &hdr, &cnt, len);
+	if (dcb && idx < cnt)
+		return dcb + hdr + (idx * *len);
+	return 0x0000;
+}
+
+int
+dcb_outp_foreach(struct nouveau_bios *bios, void *data,
+		 int (*exec)(struct nouveau_bios *, void *, int, u16))
+{
+	int ret, idx = -1;
+	u8  ver, len;
+	u16 outp;
+
+	while ((outp = dcb_outp(bios, ++idx, &ver, &len))) {
+		if (nv_ro32(bios, outp) == 0x00000000)
+			break; /* seen on an NV11 with DCB v1.5 */
+		if (nv_ro32(bios, outp) == 0xffffffff)
+			break; /* seen on an NV17 with DCB v2.0 */
+
+		if (nv_ro08(bios, outp) == DCB_OUTPUT_UNUSED)
+			continue;
+		if (nv_ro08(bios, outp) == DCB_OUTPUT_EOL)
+			break;
+
+		ret = exec(bios, data, idx, outp);
+		if (ret)
+			return ret;
+	}
+
+	return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/dp.c b/drivers/gpu/drm/nouveau/core/subdev/bios/dp.c
new file mode 100644
index 000000000000..3cbc0f3e8d5e
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/bios/dp.c
@@ -0,0 +1,76 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+
+#include "subdev/bios.h"
+#include "subdev/bios/bit.h"
+#include "subdev/bios/dcb.h"
+#include "subdev/bios/dp.h"
+
+u16
+dp_table(struct nouveau_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
+{
+	struct bit_entry bit_d;
+
+	if (!bit_entry(bios, 'd', &bit_d)) {
+		if (bit_d.version == 1) {
+			u16 data = nv_ro16(bios, bit_d.offset);
+			if (data) {
+				*ver = nv_ro08(bios, data + 0);
+				*hdr = nv_ro08(bios, data + 1);
+				*len = nv_ro08(bios, data + 2);
+				*cnt = nv_ro08(bios, data + 3);
+				return data;
+			}
+		}
+	}
+
+	return 0x0000;
+}
+
+u16
+dp_outp(struct nouveau_bios *bios, u8 idx, u8 *ver, u8 *len)
+{
+	u8  hdr, cnt;
+	u16 table = dp_table(bios, ver, &hdr, &cnt, len);
+	if (table && idx < cnt)
+		return nv_ro16(bios, table + hdr + (idx * *len));
+	return 0xffff;
+}
+
+u16
+dp_outp_match(struct nouveau_bios *bios, struct dcb_output *outp,
+	      u8 *ver, u8 *len)
+{
+	u8  idx = 0;
+	u16 data;
+	while ((data = dp_outp(bios, idx++, ver, len)) != 0xffff) {
+		if (data) {
+			u32 hash = nv_ro32(bios, data);
+			if (dcb_hash_match(outp, hash))
+				return data;
+		}
+	}
+	return 0x0000;
+}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/extdev.c b/drivers/gpu/drm/nouveau/core/subdev/bios/extdev.c
new file mode 100644
index 000000000000..5afb568b2d69
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/bios/extdev.c
@@ -0,0 +1,100 @@
+/*
+ * Copyright 2012 Nouveau Community
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Martin Peres
+ */
+
+#include <subdev/bios.h>
+#include <subdev/bios/dcb.h>
+#include <subdev/bios/extdev.h>
+
+static u16
+extdev_table(struct nouveau_bios *bios, u8 *ver, u8 *hdr, u8 *len, u8 *cnt)
+{
+	u8  dcb_ver, dcb_hdr, dcb_cnt, dcb_len;
+	u16 dcb, extdev = 0;
+
+	dcb = dcb_table(bios, &dcb_ver, &dcb_hdr, &dcb_cnt, &dcb_len);
+	if (!dcb || (dcb_ver != 0x30 && dcb_ver != 0x40))
+		return 0x0000;
+
+	extdev = nv_ro16(bios, dcb + 18);
+	if (!extdev)
+		return 0x0000;
+
+	*ver = nv_ro08(bios, extdev + 0);
+	*hdr = nv_ro08(bios, extdev + 1);
+	*cnt = nv_ro08(bios, extdev + 2);
+	*len = nv_ro08(bios, extdev + 3);
+
+	return extdev + *hdr;
+}
+
+u16
+nvbios_extdev_entry(struct nouveau_bios *bios, int idx, u8 *ver, u8 *len)
+{
+	u8 hdr, cnt;
+	u16 extdev = extdev_table(bios, ver, &hdr, len, &cnt);
+	if (extdev && idx < cnt)
+		return extdev + idx * *len;
+	return 0x0000;
+}
+
+static void
+extdev_parse_entry(struct nouveau_bios *bios, u16 offset,
+			  struct nvbios_extdev_func *entry)
+{
+	entry->type = nv_ro08(bios, offset + 0);
+	entry->addr = nv_ro08(bios, offset + 1);
+	entry->bus = (nv_ro08(bios, offset + 2) >> 4) & 1;
+}
+
+int
+nvbios_extdev_parse(struct nouveau_bios *bios, int idx,
+		    struct nvbios_extdev_func *func)
+{
+	u8 ver, len;
+	u16 entry;
+
+	if (!(entry = nvbios_extdev_entry(bios, idx, &ver, &len)))
+		return -EINVAL;
+
+	extdev_parse_entry(bios, entry, func);
+
+	return 0;
+}
+
+int
+nvbios_extdev_find(struct nouveau_bios *bios, enum nvbios_extdev_type type,
+		   struct nvbios_extdev_func *func)
+{
+	u8 ver, len, i;
+	u16 entry;
+
+	i = 0;
+	while (!(entry = nvbios_extdev_entry(bios, i++, &ver, &len))) {
+		extdev_parse_entry(bios, entry, func);
+		if (func->type == type)
+			return 0;
+	}
+
+	return -EINVAL;
+}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/gpio.c b/drivers/gpu/drm/nouveau/core/subdev/bios/gpio.c
new file mode 100644
index 000000000000..4c9f1e508165
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/bios/gpio.c
@@ -0,0 +1,121 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <subdev/bios.h>
+#include <subdev/bios/dcb.h>
+#include <subdev/bios/gpio.h>
+
+u16
+dcb_gpio_table(struct nouveau_bios *bios)
+{
+	u8  ver, hdr, cnt, len;
+	u16 dcb = dcb_table(bios, &ver, &hdr, &cnt, &len);
+	if (dcb) {
+		if (ver >= 0x30 && hdr >= 0x0c)
+			return nv_ro16(bios, dcb + 0x0a);
+		if (ver >= 0x22 && nv_ro08(bios, dcb - 1) >= 0x13)
+			return nv_ro16(bios, dcb - 0x0f);
+	}
+	return 0x0000;
+}
+
+u16
+dcb_gpio_entry(struct nouveau_bios *bios, int idx, int ent, u8 *ver)
+{
+	u16 gpio = dcb_gpio_table(bios);
+	if (gpio) {
+		*ver = nv_ro08(bios, gpio);
+		if (*ver < 0x30 && ent < nv_ro08(bios, gpio + 2))
+			return gpio + 3 + (ent * nv_ro08(bios, gpio + 1));
+		else if (ent < nv_ro08(bios, gpio + 2))
+			return gpio + nv_ro08(bios, gpio + 1) +
+			       (ent * nv_ro08(bios, gpio + 3));
+	}
+	return 0x0000;
+}
+
+int
+dcb_gpio_parse(struct nouveau_bios *bios, int idx, u8 func, u8 line,
+	       struct dcb_gpio_func *gpio)
+{
+	u8  ver, hdr, cnt, len;
+	u16 entry;
+	int i = -1;
+
+	while ((entry = dcb_gpio_entry(bios, idx, ++i, &ver))) {
+		if (ver < 0x40) {
+			u16 data = nv_ro16(bios, entry);
+			*gpio = (struct dcb_gpio_func) {
+				.line = (data & 0x001f) >> 0,
+				.func = (data & 0x07e0) >> 5,
+				.log[0] = (data & 0x1800) >> 11,
+				.log[1] = (data & 0x6000) >> 13,
+				.param = !!(data & 0x8000),
+			};
+		} else
+		if (ver < 0x41) {
+			u32 data = nv_ro32(bios, entry);
+			*gpio = (struct dcb_gpio_func) {
+				.line = (data & 0x0000001f) >> 0,
+				.func = (data & 0x0000ff00) >> 8,
+				.log[0] = (data & 0x18000000) >> 27,
+				.log[1] = (data & 0x60000000) >> 29,
+				.param = !!(data & 0x80000000),
+			};
+		} else {
+			u32 data = nv_ro32(bios, entry + 0);
+			u8 data1 = nv_ro32(bios, entry + 4);
+			*gpio = (struct dcb_gpio_func) {
+				.line = (data & 0x0000003f) >> 0,
+				.func = (data & 0x0000ff00) >> 8,
+				.log[0] = (data1 & 0x30) >> 4,
+				.log[1] = (data1 & 0xc0) >> 6,
+				.param = !!(data & 0x80000000),
+			};
+		}
+
+		if ((line == 0xff || line == gpio->line) &&
+		    (func == 0xff || func == gpio->func))
+			return 0;
+	}
+
+	/* DCB 2.2, fixed TVDAC GPIO data */
+	if ((entry = dcb_table(bios, &ver, &hdr, &cnt, &len)) && ver >= 0x22) {
+		if (func == DCB_GPIO_TVDAC0) {
+			u8 conf = nv_ro08(bios, entry - 5);
+			u8 addr = nv_ro08(bios, entry - 4);
+			if (conf & 0x01) {
+				*gpio = (struct dcb_gpio_func) {
+					.func = DCB_GPIO_TVDAC0,
+					.line = addr >> 4,
+					.log[0] = !!(conf & 0x02),
+					.log[1] =  !(conf & 0x02),
+				};
+				return 0;
+			}
+		}
+	}
+
+	return -EINVAL;
+}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/i2c.c b/drivers/gpu/drm/nouveau/core/subdev/bios/i2c.c
new file mode 100644
index 000000000000..ad577db83766
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/bios/i2c.c
@@ -0,0 +1,129 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+
+#include "subdev/bios.h"
+#include "subdev/bios/dcb.h"
+#include "subdev/bios/i2c.h"
+
+u16
+dcb_i2c_table(struct nouveau_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
+{
+	u16 i2c = 0x0000;
+	u16 dcb = dcb_table(bios, ver, hdr, cnt, len);
+	if (dcb) {
+		if (*ver >= 0x15)
+			i2c = nv_ro16(bios, dcb + 2);
+		if (*ver >= 0x30)
+			i2c = nv_ro16(bios, dcb + 4);
+	}
+
+	if (i2c && *ver >= 0x30) {
+		*ver = nv_ro08(bios, i2c + 0);
+		*hdr = nv_ro08(bios, i2c + 1);
+		*cnt = nv_ro08(bios, i2c + 2);
+		*len = nv_ro08(bios, i2c + 3);
+	} else {
+		*ver = *ver; /* use DCB version */
+		*hdr = 0;
+		*cnt = 16;
+		*len = 4;
+	}
+
+	return i2c;
+}
+
+u16
+dcb_i2c_entry(struct nouveau_bios *bios, u8 idx, u8 *ver, u8 *len)
+{
+	u8  hdr, cnt;
+	u16 i2c = dcb_i2c_table(bios, ver, &hdr, &cnt, len);
+	if (i2c && idx < cnt)
+		return i2c + hdr + (idx * *len);
+	return 0x0000;
+}
+
+int
+dcb_i2c_parse(struct nouveau_bios *bios, u8 idx, struct dcb_i2c_entry *info)
+{
+	u8  ver, len;
+	u16 ent = dcb_i2c_entry(bios, idx, &ver, &len);
+	if (ent) {
+		info->data = nv_ro32(bios, ent + 0);
+		info->type = nv_ro08(bios, ent + 3);
+		if (ver < 0x30) {
+			info->type &= 0x07;
+			if (info->type == 0x07)
+				info->type = 0xff;
+		}
+
+		switch (info->type) {
+		case DCB_I2C_NV04_BIT:
+			info->drive = nv_ro08(bios, ent + 0);
+			info->sense = nv_ro08(bios, ent + 1);
+			return 0;
+		case DCB_I2C_NV4E_BIT:
+			info->drive = nv_ro08(bios, ent + 1);
+			return 0;
+		case DCB_I2C_NVIO_BIT:
+		case DCB_I2C_NVIO_AUX:
+			info->drive = nv_ro08(bios, ent + 0);
+			return 0;
+		case DCB_I2C_UNUSED:
+			return 0;
+		default:
+			nv_warn(bios, "unknown i2c type %d\n", info->type);
+			info->type = DCB_I2C_UNUSED;
+			return 0;
+		}
+	}
+
+	if (bios->bmp_offset && idx < 2) {
+		/* BMP (from v4.0 has i2c info in the structure, it's in a
+		 * fixed location on earlier VBIOS
+		 */
+		if (nv_ro08(bios, bios->bmp_offset + 5) < 4)
+			ent = 0x0048;
+		else
+			ent = 0x0036 + bios->bmp_offset;
+
+		if (idx == 0) {
+			info->drive = nv_ro08(bios, ent + 4);
+			if (!info->drive) info->drive = 0x3f;
+			info->sense = nv_ro08(bios, ent + 5);
+			if (!info->sense) info->sense = 0x3e;
+		} else
+		if (idx == 1) {
+			info->drive = nv_ro08(bios, ent + 6);
+			if (!info->drive) info->drive = 0x37;
+			info->sense = nv_ro08(bios, ent + 7);
+			if (!info->sense) info->sense = 0x36;
+		}
+
+		info->type = DCB_I2C_NV04_BIT;
+		return 0;
+	}
+
+	return -ENOENT;
+}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/init.c b/drivers/gpu/drm/nouveau/core/subdev/bios/init.c
new file mode 100644
index 000000000000..6be8c32f6e4c
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/bios/init.c
@@ -0,0 +1,2120 @@
+#include <core/engine.h>
+#include <core/device.h>
+
+#include <subdev/bios.h>
+#include <subdev/bios/conn.h>
+#include <subdev/bios/bmp.h>
+#include <subdev/bios/bit.h>
+#include <subdev/bios/dcb.h>
+#include <subdev/bios/dp.h>
+#include <subdev/bios/init.h>
+#include <subdev/devinit.h>
+#include <subdev/clock.h>
+#include <subdev/i2c.h>
+#include <subdev/vga.h>
+#include <subdev/gpio.h>
+
+#define bioslog(lvl, fmt, args...) do {                                        \
+	nv_printk(init->bios, lvl, "0x%04x[%c]: "fmt, init->offset,            \
+		  init_exec(init) ? '0' + (init->nested - 1) : ' ', ##args);   \
+} while(0)
+#define cont(fmt, args...) do {                                                \
+	if (nv_subdev(init->bios)->debug >= NV_DBG_TRACE)                      \
+		printk(fmt, ##args);                                           \
+} while(0)
+#define trace(fmt, args...) bioslog(TRACE, fmt, ##args)
+#define warn(fmt, args...) bioslog(WARN, fmt, ##args)
+#define error(fmt, args...) bioslog(ERROR, fmt, ##args)
+
+/******************************************************************************
+ * init parser control flow helpers
+ *****************************************************************************/
+
+static inline bool
+init_exec(struct nvbios_init *init)
+{
+	return (init->execute == 1) || ((init->execute & 5) == 5);
+}
+
+static inline void
+init_exec_set(struct nvbios_init *init, bool exec)
+{
+	if (exec) init->execute &= 0xfd;
+	else      init->execute |= 0x02;
+}
+
+static inline void
+init_exec_inv(struct nvbios_init *init)
+{
+	init->execute ^= 0x02;
+}
+
+static inline void
+init_exec_force(struct nvbios_init *init, bool exec)
+{
+	if (exec) init->execute |= 0x04;
+	else      init->execute &= 0xfb;
+}
+
+/******************************************************************************
+ * init parser wrappers for normal register/i2c/whatever accessors
+ *****************************************************************************/
+
+static inline int
+init_or(struct nvbios_init *init)
+{
+	if (init->outp)
+		return ffs(init->outp->or) - 1;
+	error("script needs OR!!\n");
+	return 0;
+}
+
+static inline int
+init_link(struct nvbios_init *init)
+{
+	if (init->outp)
+		return !(init->outp->sorconf.link & 1);
+	error("script needs OR link\n");
+	return 0;
+}
+
+static inline int
+init_crtc(struct nvbios_init *init)
+{
+	if (init->crtc >= 0)
+		return init->crtc;
+	error("script needs crtc\n");
+	return 0;
+}
+
+static u8
+init_conn(struct nvbios_init *init)
+{
+	struct nouveau_bios *bios = init->bios;
+
+	if (init->outp) {
+		u8  ver, len;
+		u16 conn = dcb_conn(bios, init->outp->connector, &ver, &len);
+		if (conn)
+			return nv_ro08(bios, conn);
+	}
+
+	error("script needs connector type\n");
+	return 0x00;
+}
+
+static inline u32
+init_nvreg(struct nvbios_init *init, u32 reg)
+{
+	/* C51 (at least) sometimes has the lower bits set which the VBIOS
+	 * interprets to mean that access needs to go through certain IO
+	 * ports instead.  The NVIDIA binary driver has been seen to access
+	 * these through the NV register address, so lets assume we can
+	 * do the same
+	 */
+	reg &= ~0x00000003;
+
+	/* GF8+ display scripts need register addresses mangled a bit to
+	 * select a specific CRTC/OR
+	 */
+	if (nv_device(init->bios)->card_type >= NV_50) {
+		if (reg & 0x80000000) {
+			reg += init_crtc(init) * 0x800;
+			reg &= ~0x80000000;
+		}
+
+		if (reg & 0x40000000) {
+			reg += init_or(init) * 0x800;
+			reg &= ~0x40000000;
+			if (reg & 0x20000000) {
+				reg += init_link(init) * 0x80;
+				reg &= ~0x20000000;
+			}
+		}
+	}
+
+	if (reg & ~0x00fffffc)
+		warn("unknown bits in register 0x%08x\n", reg);
+	return reg;
+}
+
+static u32
+init_rd32(struct nvbios_init *init, u32 reg)
+{
+	reg = init_nvreg(init, reg);
+	if (init_exec(init))
+		return nv_rd32(init->subdev, reg);
+	return 0x00000000;
+}
+
+static void
+init_wr32(struct nvbios_init *init, u32 reg, u32 val)
+{
+	reg = init_nvreg(init, reg);
+	if (init_exec(init))
+		nv_wr32(init->subdev, reg, val);
+}
+
+static u32
+init_mask(struct nvbios_init *init, u32 reg, u32 mask, u32 val)
+{
+	reg = init_nvreg(init, reg);
+	if (init_exec(init)) {
+		u32 tmp = nv_rd32(init->subdev, reg);
+		nv_wr32(init->subdev, reg, (tmp & ~mask) | val);
+		return tmp;
+	}
+	return 0x00000000;
+}
+
+static u8
+init_rdport(struct nvbios_init *init, u16 port)
+{
+	if (init_exec(init))
+		return nv_rdport(init->subdev, init->crtc, port);
+	return 0x00;
+}
+
+static void
+init_wrport(struct nvbios_init *init, u16 port, u8 value)
+{
+	if (init_exec(init))
+		nv_wrport(init->subdev, init->crtc, port, value);
+}
+
+static u8
+init_rdvgai(struct nvbios_init *init, u16 port, u8 index)
+{
+	struct nouveau_subdev *subdev = init->subdev;
+	if (init_exec(init)) {
+		int head = init->crtc < 0 ? 0 : init->crtc;
+		return nv_rdvgai(subdev, head, port, index);
+	}
+	return 0x00;
+}
+
+static void
+init_wrvgai(struct nvbios_init *init, u16 port, u8 index, u8 value)
+{
+	/* force head 0 for updates to cr44, it only exists on first head */
+	if (nv_device(init->subdev)->card_type < NV_50) {
+		if (port == 0x03d4 && index == 0x44)
+			init->crtc = 0;
+	}
+
+	if (init_exec(init)) {
+		int head = init->crtc < 0 ? 0 : init->crtc;
+		nv_wrvgai(init->subdev, head, port, index, value);
+	}
+
+	/* select head 1 if cr44 write selected it */
+	if (nv_device(init->subdev)->card_type < NV_50) {
+		if (port == 0x03d4 && index == 0x44 && value == 3)
+			init->crtc = 1;
+	}
+}
+
+static struct nouveau_i2c_port *
+init_i2c(struct nvbios_init *init, int index)
+{
+	struct nouveau_i2c *i2c = nouveau_i2c(init->bios);
+
+	if (index == 0xff) {
+		index = NV_I2C_DEFAULT(0);
+		if (init->outp && init->outp->i2c_upper_default)
+			index = NV_I2C_DEFAULT(1);
+	} else
+	if (index < 0) {
+		if (!init->outp) {
+			error("script needs output for i2c\n");
+			return NULL;
+		}
+
+		index = init->outp->i2c_index;
+	}
+
+	return i2c->find(i2c, index);
+}
+
+static int
+init_rdi2cr(struct nvbios_init *init, u8 index, u8 addr, u8 reg)
+{
+	struct nouveau_i2c_port *port = init_i2c(init, index);
+	if (port && init_exec(init))
+		return nv_rdi2cr(port, addr, reg);
+	return -ENODEV;
+}
+
+static int
+init_wri2cr(struct nvbios_init *init, u8 index, u8 addr, u8 reg, u8 val)
+{
+	struct nouveau_i2c_port *port = init_i2c(init, index);
+	if (port && init_exec(init))
+		return nv_wri2cr(port, addr, reg, val);
+	return -ENODEV;
+}
+
+static int
+init_rdauxr(struct nvbios_init *init, u32 addr)
+{
+	struct nouveau_i2c_port *port = init_i2c(init, -1);
+	u8 data;
+
+	if (port && init_exec(init)) {
+		int ret = nv_rdaux(port, addr, &data, 1);
+		if (ret)
+			return ret;
+		return data;
+	}
+
+	return -ENODEV;
+}
+
+static int
+init_wrauxr(struct nvbios_init *init, u32 addr, u8 data)
+{
+	struct nouveau_i2c_port *port = init_i2c(init, -1);
+	if (port && init_exec(init))
+		return nv_wraux(port, addr, &data, 1);
+	return -ENODEV;
+}
+
+static void
+init_prog_pll(struct nvbios_init *init, u32 id, u32 freq)
+{
+	struct nouveau_clock *clk = nouveau_clock(init->bios);
+	if (clk && clk->pll_set && init_exec(init)) {
+		int ret = clk->pll_set(clk, id, freq);
+		if (ret)
+			warn("failed to prog pll 0x%08x to %dkHz\n", id, freq);
+	}
+}
+
+/******************************************************************************
+ * parsing of bios structures that are required to execute init tables
+ *****************************************************************************/
+
+static u16
+init_table(struct nouveau_bios *bios, u16 *len)
+{
+	struct bit_entry bit_I;
+
+	if (!bit_entry(bios, 'I', &bit_I)) {
+		*len = bit_I.length;
+		return bit_I.offset;
+	}
+
+	if (bmp_version(bios) >= 0x0510) {
+		*len = 14;
+		return bios->bmp_offset + 75;
+	}
+
+	return 0x0000;
+}
+
+static u16
+init_table_(struct nvbios_init *init, u16 offset, const char *name)
+{
+	struct nouveau_bios *bios = init->bios;
+	u16 len, data = init_table(bios, &len);
+	if (data) {
+		if (len >= offset + 2) {
+			data = nv_ro16(bios, data + offset);
+			if (data)
+				return data;
+
+			warn("%s pointer invalid\n", name);
+			return 0x0000;
+		}
+
+		warn("init data too short for %s pointer", name);
+		return 0x0000;
+	}
+
+	warn("init data not found\n");
+	return 0x0000;
+}
+
+#define init_script_table(b) init_table_((b), 0x00, "script table")
+#define init_macro_index_table(b) init_table_((b), 0x02, "macro index table")
+#define init_macro_table(b) init_table_((b), 0x04, "macro table")
+#define init_condition_table(b) init_table_((b), 0x06, "condition table")
+#define init_io_condition_table(b) init_table_((b), 0x08, "io condition table")
+#define init_io_flag_condition_table(b) init_table_((b), 0x0a, "io flag conditon table")
+#define init_function_table(b) init_table_((b), 0x0c, "function table")
+#define init_xlat_table(b) init_table_((b), 0x10, "xlat table");
+
+static u16
+init_script(struct nouveau_bios *bios, int index)
+{
+	struct nvbios_init init = { .bios = bios };
+	u16 data;
+
+	if (bmp_version(bios) && bmp_version(bios) < 0x0510) {
+		if (index > 1)
+			return 0x0000;
+
+		data = bios->bmp_offset + (bios->version.major < 2 ? 14 : 18);
+		return nv_ro16(bios, data + (index * 2));
+	}
+
+	data = init_script_table(&init);
+	if (data)
+		return nv_ro16(bios, data + (index * 2));
+
+	return 0x0000;
+}
+
+static u16
+init_unknown_script(struct nouveau_bios *bios)
+{
+	u16 len, data = init_table(bios, &len);
+	if (data && len >= 16)
+		return nv_ro16(bios, data + 14);
+	return 0x0000;
+}
+
+static u16
+init_ram_restrict_table(struct nvbios_init *init)
+{
+	struct nouveau_bios *bios = init->bios;
+	struct bit_entry bit_M;
+	u16 data = 0x0000;
+
+	if (!bit_entry(bios, 'M', &bit_M)) {
+		if (bit_M.version == 1 && bit_M.length >= 5)
+			data = nv_ro16(bios, bit_M.offset + 3);
+		if (bit_M.version == 2 && bit_M.length >= 3)
+			data = nv_ro16(bios, bit_M.offset + 1);
+	}
+
+	if (data == 0x0000)
+		warn("ram restrict table not found\n");
+	return data;
+}
+
+static u8
+init_ram_restrict_group_count(struct nvbios_init *init)
+{
+	struct nouveau_bios *bios = init->bios;
+	struct bit_entry bit_M;
+
+	if (!bit_entry(bios, 'M', &bit_M)) {
+		if (bit_M.version == 1 && bit_M.length >= 5)
+			return nv_ro08(bios, bit_M.offset + 2);
+		if (bit_M.version == 2 && bit_M.length >= 3)
+			return nv_ro08(bios, bit_M.offset + 0);
+	}
+
+	return 0x00;
+}
+
+static u8
+init_ram_restrict(struct nvbios_init *init)
+{
+	u32 strap = (init_rd32(init, 0x101000) & 0x0000003c) >> 2;
+	u16 table = init_ram_restrict_table(init);
+	if (table)
+		return nv_ro08(init->bios, table + strap);
+	return 0x00;
+}
+
+static u8
+init_xlat_(struct nvbios_init *init, u8 index, u8 offset)
+{
+	struct nouveau_bios *bios = init->bios;
+	u16 table = init_xlat_table(init);
+	if (table) {
+		u16 data = nv_ro16(bios, table + (index * 2));
+		if (data)
+			return nv_ro08(bios, data + offset);
+		warn("xlat table pointer %d invalid\n", index);
+	}
+	return 0x00;
+}
+
+/******************************************************************************
+ * utility functions used by various init opcode handlers
+ *****************************************************************************/
+
+static bool
+init_condition_met(struct nvbios_init *init, u8 cond)
+{
+	struct nouveau_bios *bios = init->bios;
+	u16 table = init_condition_table(init);
+	if (table) {
+		u32 reg = nv_ro32(bios, table + (cond * 12) + 0);
+		u32 msk = nv_ro32(bios, table + (cond * 12) + 4);
+		u32 val = nv_ro32(bios, table + (cond * 12) + 8);
+		trace("\t[0x%02x] (R[0x%06x] & 0x%08x) == 0x%08x\n",
+		      cond, reg, msk, val);
+		return (init_rd32(init, reg) & msk) == val;
+	}
+	return false;
+}
+
+static bool
+init_io_condition_met(struct nvbios_init *init, u8 cond)
+{
+	struct nouveau_bios *bios = init->bios;
+	u16 table = init_io_condition_table(init);
+	if (table) {
+		u16 port = nv_ro16(bios, table + (cond * 5) + 0);
+		u8 index = nv_ro08(bios, table + (cond * 5) + 2);
+		u8  mask = nv_ro08(bios, table + (cond * 5) + 3);
+		u8 value = nv_ro08(bios, table + (cond * 5) + 4);
+		trace("\t[0x%02x] (0x%04x[0x%02x] & 0x%02x) == 0x%02x\n",
+		      cond, port, index, mask, value);
+		return (init_rdvgai(init, port, index) & mask) == value;
+	}
+	return false;
+}
+
+static bool
+init_io_flag_condition_met(struct nvbios_init *init, u8 cond)
+{
+	struct nouveau_bios *bios = init->bios;
+	u16 table = init_io_flag_condition_table(init);
+	if (table) {
+		u16 port = nv_ro16(bios, table + (cond * 9) + 0);
+		u8 index = nv_ro08(bios, table + (cond * 9) + 2);
+		u8  mask = nv_ro08(bios, table + (cond * 9) + 3);
+		u8 shift = nv_ro08(bios, table + (cond * 9) + 4);
+		u16 data = nv_ro16(bios, table + (cond * 9) + 5);
+		u8 dmask = nv_ro08(bios, table + (cond * 9) + 7);
+		u8 value = nv_ro08(bios, table + (cond * 9) + 8);
+		u8 ioval = (init_rdvgai(init, port, index) & mask) >> shift;
+		return (nv_ro08(bios, data + ioval) & dmask) == value;
+	}
+	return false;
+}
+
+static inline u32
+init_shift(u32 data, u8 shift)
+{
+	if (shift < 0x80)
+		return data >> shift;
+	return data << (0x100 - shift);
+}
+
+static u32
+init_tmds_reg(struct nvbios_init *init, u8 tmds)
+{
+	/* For mlv < 0x80, it is an index into a table of TMDS base addresses.
+	 * For mlv == 0x80 use the "or" value of the dcb_entry indexed by
+	 * CR58 for CR57 = 0 to index a table of offsets to the basic
+	 * 0x6808b0 address.
+	 * For mlv == 0x81 use the "or" value of the dcb_entry indexed by
+	 * CR58 for CR57 = 0 to index a table of offsets to the basic
+	 * 0x6808b0 address, and then flip the offset by 8.
+	 */
+
+	const int pramdac_offset[13] = {
+		0, 0, 0x8, 0, 0x2000, 0, 0, 0, 0x2008, 0, 0, 0, 0x2000 };
+	const u32 pramdac_table[4] = {
+		0x6808b0, 0x6808b8, 0x6828b0, 0x6828b8 };
+
+	if (tmds >= 0x80) {
+		if (init->outp) {
+			u32 dacoffset = pramdac_offset[init->outp->or];
+			if (tmds == 0x81)
+				dacoffset ^= 8;
+			return 0x6808b0 + dacoffset;
+		}
+
+		error("tmds opcodes need dcb\n");
+	} else {
+		if (tmds < ARRAY_SIZE(pramdac_table))
+			return pramdac_table[tmds];
+
+		error("tmds selector 0x%02x unknown\n", tmds);
+	}
+
+	return 0;
+}
+
+/******************************************************************************
+ * init opcode handlers
+ *****************************************************************************/
+
+/**
+ * init_reserved - stub for various unknown/unused single-byte opcodes
+ *
+ */
+static void
+init_reserved(struct nvbios_init *init)
+{
+	u8 opcode = nv_ro08(init->bios, init->offset);
+	trace("RESERVED\t0x%02x\n", opcode);
+	init->offset += 1;
+}
+
+/**
+ * INIT_DONE - opcode 0x71
+ *
+ */
+static void
+init_done(struct nvbios_init *init)
+{
+	trace("DONE\n");
+	init->offset = 0x0000;
+}
+
+/**
+ * INIT_IO_RESTRICT_PROG - opcode 0x32
+ *
+ */
+static void
+init_io_restrict_prog(struct nvbios_init *init)
+{
+	struct nouveau_bios *bios = init->bios;
+	u16 port = nv_ro16(bios, init->offset + 1);
+	u8 index = nv_ro08(bios, init->offset + 3);
+	u8  mask = nv_ro08(bios, init->offset + 4);
+	u8 shift = nv_ro08(bios, init->offset + 5);
+	u8 count = nv_ro08(bios, init->offset + 6);
+	u32  reg = nv_ro32(bios, init->offset + 7);
+	u8 conf, i;
+
+	trace("IO_RESTRICT_PROG\tR[0x%06x] = "
+	      "((0x%04x[0x%02x] & 0x%02x) >> %d) [{\n",
+	      reg, port, index, mask, shift);
+	init->offset += 11;
+
+	conf = (init_rdvgai(init, port, index) & mask) >> shift;
+	for (i = 0; i < count; i++) {
+		u32 data = nv_ro32(bios, init->offset);
+
+		if (i == conf) {
+			trace("\t0x%08x *\n", data);
+			init_wr32(init, reg, data);
+		} else {
+			trace("\t0x%08x\n", data);
+		}
+
+		init->offset += 4;
+	}
+	trace("}]\n");
+}
+
+/**
+ * INIT_REPEAT - opcode 0x33
+ *
+ */
+static void
+init_repeat(struct nvbios_init *init)
+{
+	struct nouveau_bios *bios = init->bios;
+	u8 count = nv_ro08(bios, init->offset + 1);
+	u16 repeat = init->repeat;
+
+	trace("REPEAT\t0x%02x\n", count);
+	init->offset += 2;
+
+	init->repeat = init->offset;
+	init->repend = init->offset;
+	while (count--) {
+		init->offset = init->repeat;
+		nvbios_exec(init);
+		if (count)
+			trace("REPEAT\t0x%02x\n", count);
+	}
+	init->offset = init->repend;
+	init->repeat = repeat;
+}
+
+/**
+ * INIT_IO_RESTRICT_PLL - opcode 0x34
+ *
+ */
+static void
+init_io_restrict_pll(struct nvbios_init *init)
+{
+	struct nouveau_bios *bios = init->bios;
+	u16 port = nv_ro16(bios, init->offset + 1);
+	u8 index = nv_ro08(bios, init->offset + 3);
+	u8  mask = nv_ro08(bios, init->offset + 4);
+	u8 shift = nv_ro08(bios, init->offset + 5);
+	s8  iofc = nv_ro08(bios, init->offset + 6);
+	u8 count = nv_ro08(bios, init->offset + 7);
+	u32  reg = nv_ro32(bios, init->offset + 8);
+	u8 conf, i;
+
+	trace("IO_RESTRICT_PLL\tR[0x%06x] =PLL= "
+	      "((0x%04x[0x%02x] & 0x%02x) >> 0x%02x) IOFCOND 0x%02x [{\n",
+	      reg, port, index, mask, shift, iofc);
+	init->offset += 12;
+
+	conf = (init_rdvgai(init, port, index) & mask) >> shift;
+	for (i = 0; i < count; i++) {
+		u32 freq = nv_ro16(bios, init->offset) * 10;
+
+		if (i == conf) {
+			trace("\t%dkHz *\n", freq);
+			if (iofc > 0 && init_io_flag_condition_met(init, iofc))
+				freq *= 2;
+			init_prog_pll(init, reg, freq);
+		} else {
+			trace("\t%dkHz\n", freq);
+		}
+
+		init->offset += 2;
+	}
+	trace("}]\n");
+}
+
+/**
+ * INIT_END_REPEAT - opcode 0x36
+ *
+ */
+static void
+init_end_repeat(struct nvbios_init *init)
+{
+	trace("END_REPEAT\n");
+	init->offset += 1;
+
+	if (init->repeat) {
+		init->repend = init->offset;
+		init->offset = 0;
+	}
+}
+
+/**
+ * INIT_COPY - opcode 0x37
+ *
+ */
+static void
+init_copy(struct nvbios_init *init)
+{
+	struct nouveau_bios *bios = init->bios;
+	u32  reg = nv_ro32(bios, init->offset + 1);
+	u8 shift = nv_ro08(bios, init->offset + 5);
+	u8 smask = nv_ro08(bios, init->offset + 6);
+	u16 port = nv_ro16(bios, init->offset + 7);
+	u8 index = nv_ro08(bios, init->offset + 9);
+	u8  mask = nv_ro08(bios, init->offset + 10);
+	u8  data;
+
+	trace("COPY\t0x%04x[0x%02x] &= 0x%02x |= "
+	      "((R[0x%06x] %s 0x%02x) & 0x%02x)\n",
+	      port, index, mask, reg, (shift & 0x80) ? "<<" : ">>",
+	      (shift & 0x80) ? (0x100 - shift) : shift, smask);
+	init->offset += 11;
+
+	data  = init_rdvgai(init, port, index) & mask;
+	data |= init_shift(init_rd32(init, reg), shift) & smask;
+	init_wrvgai(init, port, index, data);
+}
+
+/**
+ * INIT_NOT - opcode 0x38
+ *
+ */
+static void
+init_not(struct nvbios_init *init)
+{
+	trace("NOT\n");
+	init->offset += 1;
+	init_exec_inv(init);
+}
+
+/**
+ * INIT_IO_FLAG_CONDITION - opcode 0x39
+ *
+ */
+static void
+init_io_flag_condition(struct nvbios_init *init)
+{
+	struct nouveau_bios *bios = init->bios;
+	u8 cond = nv_ro08(bios, init->offset + 1);
+
+	trace("IO_FLAG_CONDITION\t0x%02x\n", cond);
+	init->offset += 2;
+
+	if (!init_io_flag_condition_met(init, cond))
+		init_exec_set(init, false);
+}
+
+/**
+ * INIT_DP_CONDITION - opcode 0x3a
+ *
+ */
+static void
+init_dp_condition(struct nvbios_init *init)
+{
+	struct nouveau_bios *bios = init->bios;
+	u8  cond = nv_ro08(bios, init->offset + 1);
+	u8  unkn = nv_ro08(bios, init->offset + 2);
+	u8  ver, len;
+	u16 data;
+
+	trace("DP_CONDITION\t0x%02x 0x%02x\n", cond, unkn);
+	init->offset += 3;
+
+	switch (cond) {
+	case 0:
+		if (init_conn(init) != DCB_CONNECTOR_eDP)
+			init_exec_set(init, false);
+		break;
+	case 1:
+	case 2:
+		if ( init->outp &&
+		    (data = dp_outp_match(bios, init->outp, &ver, &len))) {
+			if (ver <= 0x40 && !(nv_ro08(bios, data + 5) & cond))
+				init_exec_set(init, false);
+			if (ver == 0x40 && !(nv_ro08(bios, data + 4) & cond))
+				init_exec_set(init, false);
+			break;
+		}
+
+		warn("script needs dp output table data\n");
+		break;
+	case 5:
+		if (!(init_rdauxr(init, 0x0d) & 1))
+			init_exec_set(init, false);
+		break;
+	default:
+		warn("unknown dp condition 0x%02x\n", cond);
+		break;
+	}
+}
+
+/**
+ * INIT_IO_MASK_OR - opcode 0x3b
+ *
+ */
+static void
+init_io_mask_or(struct nvbios_init *init)
+{
+	struct nouveau_bios *bios = init->bios;
+	u8 index = nv_ro08(bios, init->offset + 1);
+	u8    or = init_or(init);
+	u8  data;
+
+	trace("IO_MASK_OR\t0x03d4[0x%02x] &= ~(1 << 0x%02x)", index, or);
+	init->offset += 2;
+
+	data = init_rdvgai(init, 0x03d4, index);
+	init_wrvgai(init, 0x03d4, index, data &= ~(1 << or));
+}
+
+/**
+ * INIT_IO_OR - opcode 0x3c
+ *
+ */
+static void
+init_io_or(struct nvbios_init *init)
+{
+	struct nouveau_bios *bios = init->bios;
+	u8 index = nv_ro08(bios, init->offset + 1);
+	u8    or = init_or(init);
+	u8  data;
+
+	trace("IO_OR\t0x03d4[0x%02x] |= (1 << 0x%02x)", index, or);
+	init->offset += 2;
+
+	data = init_rdvgai(init, 0x03d4, index);
+	init_wrvgai(init, 0x03d4, index, data | (1 << or));
+}
+
+/**
+ * INIT_INDEX_ADDRESS_LATCHED - opcode 0x49
+ *
+ */
+static void
+init_idx_addr_latched(struct nvbios_init *init)
+{
+	struct nouveau_bios *bios = init->bios;
+	u32 creg = nv_ro32(bios, init->offset + 1);
+	u32 dreg = nv_ro32(bios, init->offset + 5);
+	u32 mask = nv_ro32(bios, init->offset + 9);
+	u32 data = nv_ro32(bios, init->offset + 13);
+	u8 count = nv_ro08(bios, init->offset + 17);
+
+	trace("INDEX_ADDRESS_LATCHED\t"
+	      "R[0x%06x] : R[0x%06x]\n\tCTRL &= 0x%08x |= 0x%08x\n",
+	      creg, dreg, mask, data);
+	init->offset += 18;
+
+	while (count--) {
+		u8 iaddr = nv_ro08(bios, init->offset + 0);
+		u8 idata = nv_ro08(bios, init->offset + 1);
+
+		trace("\t[0x%02x] = 0x%02x\n", iaddr, idata);
+		init->offset += 2;
+
+		init_wr32(init, dreg, idata);
+		init_mask(init, creg, ~mask, data | idata);
+	}
+}
+
+/**
+ * INIT_IO_RESTRICT_PLL2 - opcode 0x4a
+ *
+ */
+static void
+init_io_restrict_pll2(struct nvbios_init *init)
+{
+	struct nouveau_bios *bios = init->bios;
+	u16 port = nv_ro16(bios, init->offset + 1);
+	u8 index = nv_ro08(bios, init->offset + 3);
+	u8  mask = nv_ro08(bios, init->offset + 4);
+	u8 shift = nv_ro08(bios, init->offset + 5);
+	u8 count = nv_ro08(bios, init->offset + 6);
+	u32  reg = nv_ro32(bios, init->offset + 7);
+	u8  conf, i;
+
+	trace("IO_RESTRICT_PLL2\t"
+	      "R[0x%06x] =PLL= ((0x%04x[0x%02x] & 0x%02x) >> 0x%02x) [{\n",
+	      reg, port, index, mask, shift);
+	init->offset += 11;
+
+	conf = (init_rdvgai(init, port, index) & mask) >> shift;
+	for (i = 0; i < count; i++) {
+		u32 freq = nv_ro32(bios, init->offset);
+		if (i == conf) {
+			trace("\t%dkHz *\n", freq);
+			init_prog_pll(init, reg, freq);
+		} else {
+			trace("\t%dkHz\n", freq);
+		}
+		init->offset += 4;
+	}
+	trace("}]\n");
+}
+
+/**
+ * INIT_PLL2 - opcode 0x4b
+ *
+ */
+static void
+init_pll2(struct nvbios_init *init)
+{
+	struct nouveau_bios *bios = init->bios;
+	u32  reg = nv_ro32(bios, init->offset + 1);
+	u32 freq = nv_ro32(bios, init->offset + 5);
+
+	trace("PLL2\tR[0x%06x] =PLL= %dkHz\n", reg, freq);
+	init->offset += 9;
+
+	init_prog_pll(init, reg, freq);
+}
+
+/**
+ * INIT_I2C_BYTE - opcode 0x4c
+ *
+ */
+static void
+init_i2c_byte(struct nvbios_init *init)
+{
+	struct nouveau_bios *bios = init->bios;
+	u8 index = nv_ro08(bios, init->offset + 1);
+	u8  addr = nv_ro08(bios, init->offset + 2) >> 1;
+	u8 count = nv_ro08(bios, init->offset + 3);
+
+	trace("I2C_BYTE\tI2C[0x%02x][0x%02x]\n", index, addr);
+	init->offset += 4;
+
+	while (count--) {
+		u8  reg = nv_ro08(bios, init->offset + 0);
+		u8 mask = nv_ro08(bios, init->offset + 1);
+		u8 data = nv_ro08(bios, init->offset + 2);
+		int val;
+
+		trace("\t[0x%02x] &= 0x%02x |= 0x%02x\n", reg, mask, data);
+		init->offset += 3;
+
+		val = init_rdi2cr(init, index, addr, reg);
+		if (val < 0)
+			continue;
+		init_wri2cr(init, index, addr, reg, (val & mask) | data);
+	}
+}
+
+/**
+ * INIT_ZM_I2C_BYTE - opcode 0x4d
+ *
+ */
+static void
+init_zm_i2c_byte(struct nvbios_init *init)
+{
+	struct nouveau_bios *bios = init->bios;
+	u8 index = nv_ro08(bios, init->offset + 1);
+	u8  addr = nv_ro08(bios, init->offset + 2) >> 1;
+	u8 count = nv_ro08(bios, init->offset + 3);
+
+	trace("ZM_I2C_BYTE\tI2C[0x%02x][0x%02x]\n", index, addr);
+	init->offset += 4;
+
+	while (count--) {
+		u8  reg = nv_ro08(bios, init->offset + 0);
+		u8 data = nv_ro08(bios, init->offset + 1);
+
+		trace("\t[0x%02x] = 0x%02x\n", reg, data);
+		init->offset += 2;
+
+		init_wri2cr(init, index, addr, reg, data);
+	}
+
+}
+
+/**
+ * INIT_ZM_I2C - opcode 0x4e
+ *
+ */
+static void
+init_zm_i2c(struct nvbios_init *init)
+{
+	struct nouveau_bios *bios = init->bios;
+	u8 index = nv_ro08(bios, init->offset + 1);
+	u8  addr = nv_ro08(bios, init->offset + 2) >> 1;
+	u8 count = nv_ro08(bios, init->offset + 3);
+	u8 data[256], i;
+
+	trace("ZM_I2C\tI2C[0x%02x][0x%02x]\n", index, addr);
+	init->offset += 4;
+
+	for (i = 0; i < count; i++) {
+		data[i] = nv_ro08(bios, init->offset);
+		trace("\t0x%02x\n", data[i]);
+		init->offset++;
+	}
+
+	if (init_exec(init)) {
+		struct nouveau_i2c_port *port = init_i2c(init, index);
+		struct i2c_msg msg = {
+			.addr = addr, .flags = 0, .len = count, .buf = data,
+		};
+		int ret;
+
+		if (port && (ret = i2c_transfer(&port->adapter, &msg, 1)) != 1)
+			warn("i2c wr failed, %d\n", ret);
+	}
+}
+
+/**
+ * INIT_TMDS - opcode 0x4f
+ *
+ */
+static void
+init_tmds(struct nvbios_init *init)
+{
+	struct nouveau_bios *bios = init->bios;
+	u8 tmds = nv_ro08(bios, init->offset + 1);
+	u8 addr = nv_ro08(bios, init->offset + 2);
+	u8 mask = nv_ro08(bios, init->offset + 3);
+	u8 data = nv_ro08(bios, init->offset + 4);
+	u32 reg = init_tmds_reg(init, tmds);
+
+	trace("TMDS\tT[0x%02x][0x%02x] &= 0x%02x |= 0x%02x\n",
+	      tmds, addr, mask, data);
+	init->offset += 5;
+
+	if (reg == 0)
+		return;
+
+	init_wr32(init, reg + 0, addr | 0x00010000);
+	init_wr32(init, reg + 4, data | (init_rd32(init, reg + 4) & mask));
+	init_wr32(init, reg + 0, addr);
+}
+
+/**
+ * INIT_ZM_TMDS_GROUP - opcode 0x50
+ *
+ */
+static void
+init_zm_tmds_group(struct nvbios_init *init)
+{
+	struct nouveau_bios *bios = init->bios;
+	u8  tmds = nv_ro08(bios, init->offset + 1);
+	u8 count = nv_ro08(bios, init->offset + 2);
+	u32  reg = init_tmds_reg(init, tmds);
+
+	trace("TMDS_ZM_GROUP\tT[0x%02x]\n", tmds);
+	init->offset += 3;
+
+	while (count--) {
+		u8 addr = nv_ro08(bios, init->offset + 0);
+		u8 data = nv_ro08(bios, init->offset + 1);
+
+		trace("\t[0x%02x] = 0x%02x\n", addr, data);
+		init->offset += 2;
+
+		init_wr32(init, reg + 4, data);
+		init_wr32(init, reg + 0, addr);
+	}
+}
+
+/**
+ * INIT_CR_INDEX_ADDRESS_LATCHED - opcode 0x51
+ *
+ */
+static void
+init_cr_idx_adr_latch(struct nvbios_init *init)
+{
+	struct nouveau_bios *bios = init->bios;
+	u8 addr0 = nv_ro08(bios, init->offset + 1);
+	u8 addr1 = nv_ro08(bios, init->offset + 2);
+	u8  base = nv_ro08(bios, init->offset + 3);
+	u8 count = nv_ro08(bios, init->offset + 4);
+	u8 save0;
+
+	trace("CR_INDEX_ADDR C[%02x] C[%02x]\n", addr0, addr1);
+	init->offset += 5;
+
+	save0 = init_rdvgai(init, 0x03d4, addr0);
+	while (count--) {
+		u8 data = nv_ro08(bios, init->offset);
+
+		trace("\t\t[0x%02x] = 0x%02x\n", base, data);
+		init->offset += 1;
+
+		init_wrvgai(init, 0x03d4, addr0, base++);
+		init_wrvgai(init, 0x03d4, addr1, data);
+	}
+	init_wrvgai(init, 0x03d4, addr0, save0);
+}
+
+/**
+ * INIT_CR - opcode 0x52
+ *
+ */
+static void
+init_cr(struct nvbios_init *init)
+{
+	struct nouveau_bios *bios = init->bios;
+	u8 addr = nv_ro08(bios, init->offset + 1);
+	u8 mask = nv_ro08(bios, init->offset + 2);
+	u8 data = nv_ro08(bios, init->offset + 3);
+	u8 val;
+
+	trace("CR\t\tC[0x%02x] &= 0x%02x |= 0x%02x\n", addr, mask, data);
+	init->offset += 4;
+
+	val = init_rdvgai(init, 0x03d4, addr) & mask;
+	init_wrvgai(init, 0x03d4, addr, val | data);
+}
+
+/**
+ * INIT_ZM_CR - opcode 0x53
+ *
+ */
+static void
+init_zm_cr(struct nvbios_init *init)
+{
+	struct nouveau_bios *bios = init->bios;
+	u8 addr = nv_ro08(bios, init->offset + 1);
+	u8 data = nv_ro08(bios, init->offset + 2);
+
+	trace("ZM_CR\tC[0x%02x] = 0x%02x\n", addr,  data);
+	init->offset += 3;
+
+	init_wrvgai(init, 0x03d4, addr, data);
+}
+
+/**
+ * INIT_ZM_CR_GROUP - opcode 0x54
+ *
+ */
+static void
+init_zm_cr_group(struct nvbios_init *init)
+{
+	struct nouveau_bios *bios = init->bios;
+	u8 count = nv_ro08(bios, init->offset + 1);
+
+	trace("ZM_CR_GROUP\n");
+	init->offset += 2;
+
+	while (count--) {
+		u8 addr = nv_ro08(bios, init->offset + 0);
+		u8 data = nv_ro08(bios, init->offset + 1);
+
+		trace("\t\tC[0x%02x] = 0x%02x\n", addr, data);
+		init->offset += 2;
+
+		init_wrvgai(init, 0x03d4, addr, data);
+	}
+}
+
+/**
+ * INIT_CONDITION_TIME - opcode 0x56
+ *
+ */
+static void
+init_condition_time(struct nvbios_init *init)
+{
+	struct nouveau_bios *bios = init->bios;
+	u8  cond = nv_ro08(bios, init->offset + 1);
+	u8 retry = nv_ro08(bios, init->offset + 2);
+	u8  wait = min((u16)retry * 50, 100);
+
+	trace("CONDITION_TIME\t0x%02x 0x%02x\n", cond, retry);
+	init->offset += 3;
+
+	if (!init_exec(init))
+		return;
+
+	while (wait--) {
+		if (init_condition_met(init, cond))
+			return;
+		mdelay(20);
+	}
+
+	init_exec_set(init, false);
+}
+
+/**
+ * INIT_LTIME - opcode 0x57
+ *
+ */
+static void
+init_ltime(struct nvbios_init *init)
+{
+	struct nouveau_bios *bios = init->bios;
+	u16 msec = nv_ro16(bios, init->offset + 1);
+
+	trace("LTIME\t0x%04x\n", msec);
+	init->offset += 3;
+
+	if (init_exec(init))
+		mdelay(msec);
+}
+
+/**
+ * INIT_ZM_REG_SEQUENCE - opcode 0x58
+ *
+ */
+static void
+init_zm_reg_sequence(struct nvbios_init *init)
+{
+	struct nouveau_bios *bios = init->bios;
+	u32 base = nv_ro32(bios, init->offset + 1);
+	u8 count = nv_ro08(bios, init->offset + 5);
+
+	trace("ZM_REG_SEQUENCE\t0x%02x\n", count);
+	init->offset += 6;
+
+	while (count--) {
+		u32 data = nv_ro32(bios, init->offset);
+
+		trace("\t\tR[0x%06x] = 0x%08x\n", base, data);
+		init->offset += 4;
+
+		init_wr32(init, base, data);
+		base += 4;
+	}
+}
+
+/**
+ * INIT_SUB_DIRECT - opcode 0x5b
+ *
+ */
+static void
+init_sub_direct(struct nvbios_init *init)
+{
+	struct nouveau_bios *bios = init->bios;
+	u16 addr = nv_ro16(bios, init->offset + 1);
+	u16 save;
+
+	trace("SUB_DIRECT\t0x%04x\n", addr);
+
+	if (init_exec(init)) {
+		save = init->offset;
+		init->offset = addr;
+		if (nvbios_exec(init)) {
+			error("error parsing sub-table\n");
+			return;
+		}
+		init->offset = save;
+	}
+
+	init->offset += 3;
+}
+
+/**
+ * INIT_JUMP - opcode 0x5c
+ *
+ */
+static void
+init_jump(struct nvbios_init *init)
+{
+	struct nouveau_bios *bios = init->bios;
+	u16 offset = nv_ro16(bios, init->offset + 1);
+
+	trace("JUMP\t0x%04x\n", offset);
+	init->offset = offset;
+}
+
+/**
+ * INIT_I2C_IF - opcode 0x5e
+ *
+ */
+static void
+init_i2c_if(struct nvbios_init *init)
+{
+	struct nouveau_bios *bios = init->bios;
+	u8 index = nv_ro08(bios, init->offset + 1);
+	u8  addr = nv_ro08(bios, init->offset + 2);
+	u8   reg = nv_ro08(bios, init->offset + 3);
+	u8  mask = nv_ro08(bios, init->offset + 4);
+	u8  data = nv_ro08(bios, init->offset + 5);
+	u8 value;
+
+	trace("I2C_IF\tI2C[0x%02x][0x%02x][0x%02x] & 0x%02x == 0x%02x\n",
+	      index, addr, reg, mask, data);
+	init->offset += 6;
+	init_exec_force(init, true);
+
+	value = init_rdi2cr(init, index, addr, reg);
+	if ((value & mask) != data)
+		init_exec_set(init, false);
+
+	init_exec_force(init, false);
+}
+
+/**
+ * INIT_COPY_NV_REG - opcode 0x5f
+ *
+ */
+static void
+init_copy_nv_reg(struct nvbios_init *init)
+{
+	struct nouveau_bios *bios = init->bios;
+	u32  sreg = nv_ro32(bios, init->offset + 1);
+	u8  shift = nv_ro08(bios, init->offset + 5);
+	u32 smask = nv_ro32(bios, init->offset + 6);
+	u32  sxor = nv_ro32(bios, init->offset + 10);
+	u32  dreg = nv_ro32(bios, init->offset + 14);
+	u32 dmask = nv_ro32(bios, init->offset + 18);
+	u32 data;
+
+	trace("COPY_NV_REG\tR[0x%06x] &= 0x%08x |= "
+	      "((R[0x%06x] %s 0x%02x) & 0x%08x ^ 0x%08x)\n",
+	      dreg, dmask, sreg, (shift & 0x80) ? "<<" : ">>",
+	      (shift & 0x80) ? (0x100 - shift) : shift, smask, sxor);
+	init->offset += 22;
+
+	data = init_shift(init_rd32(init, sreg), shift);
+	init_mask(init, dreg, ~dmask, (data & smask) ^ sxor);
+}
+
+/**
+ * INIT_ZM_INDEX_IO - opcode 0x62
+ *
+ */
+static void
+init_zm_index_io(struct nvbios_init *init)
+{
+	struct nouveau_bios *bios = init->bios;
+	u16 port = nv_ro16(bios, init->offset + 1);
+	u8 index = nv_ro08(bios, init->offset + 3);
+	u8  data = nv_ro08(bios, init->offset + 4);
+
+	trace("ZM_INDEX_IO\tI[0x%04x][0x%02x] = 0x%02x\n", port, index, data);
+	init->offset += 5;
+
+	init_wrvgai(init, port, index, data);
+}
+
+/**
+ * INIT_COMPUTE_MEM - opcode 0x63
+ *
+ */
+static void
+init_compute_mem(struct nvbios_init *init)
+{
+	struct nouveau_devinit *devinit = nouveau_devinit(init->bios);
+
+	trace("COMPUTE_MEM\n");
+	init->offset += 1;
+
+	init_exec_force(init, true);
+	if (init_exec(init) && devinit->meminit)
+		devinit->meminit(devinit);
+	init_exec_force(init, false);
+}
+
+/**
+ * INIT_RESET - opcode 0x65
+ *
+ */
+static void
+init_reset(struct nvbios_init *init)
+{
+	struct nouveau_bios *bios = init->bios;
+	u32   reg = nv_ro32(bios, init->offset + 1);
+	u32 data1 = nv_ro32(bios, init->offset + 5);
+	u32 data2 = nv_ro32(bios, init->offset + 9);
+	u32 savepci19;
+
+	trace("RESET\tR[0x%08x] = 0x%08x, 0x%08x", reg, data1, data2);
+	init->offset += 13;
+	init_exec_force(init, true);
+
+	savepci19 = init_mask(init, 0x00184c, 0x00000f00, 0x00000000);
+	init_wr32(init, reg, data1);
+	udelay(10);
+	init_wr32(init, reg, data2);
+	init_wr32(init, 0x00184c, savepci19);
+	init_mask(init, 0x001850, 0x00000001, 0x00000000);
+
+	init_exec_force(init, false);
+}
+
+/**
+ * INIT_CONFIGURE_MEM - opcode 0x66
+ *
+ */
+static u16
+init_configure_mem_clk(struct nvbios_init *init)
+{
+	u16 mdata = bmp_mem_init_table(init->bios);
+	if (mdata)
+		mdata += (init_rdvgai(init, 0x03d4, 0x3c) >> 4) * 66;
+	return mdata;
+}
+
+static void
+init_configure_mem(struct nvbios_init *init)
+{
+	struct nouveau_bios *bios = init->bios;
+	u16 mdata, sdata;
+	u32 addr, data;
+
+	trace("CONFIGURE_MEM\n");
+	init->offset += 1;
+
+	if (bios->version.major > 2) {
+		init_done(init);
+		return;
+	}
+	init_exec_force(init, true);
+
+	mdata = init_configure_mem_clk(init);
+	sdata = bmp_sdr_seq_table(bios);
+	if (nv_ro08(bios, mdata) & 0x01)
+		sdata = bmp_ddr_seq_table(bios);
+	mdata += 6; /* skip to data */
+
+	data = init_rdvgai(init, 0x03c4, 0x01);
+	init_wrvgai(init, 0x03c4, 0x01, data | 0x20);
+
+	while ((addr = nv_ro32(bios, sdata)) != 0xffffffff) {
+		switch (addr) {
+		case 0x10021c: /* CKE_NORMAL */
+		case 0x1002d0: /* CMD_REFRESH */
+		case 0x1002d4: /* CMD_PRECHARGE */
+			data = 0x00000001;
+			break;
+		default:
+			data = nv_ro32(bios, mdata);
+			mdata += 4;
+			if (data == 0xffffffff)
+				continue;
+			break;
+		}
+
+		init_wr32(init, addr, data);
+	}
+
+	init_exec_force(init, false);
+}
+
+/**
+ * INIT_CONFIGURE_CLK - opcode 0x67
+ *
+ */
+static void
+init_configure_clk(struct nvbios_init *init)
+{
+	struct nouveau_bios *bios = init->bios;
+	u16 mdata, clock;
+
+	trace("CONFIGURE_CLK\n");
+	init->offset += 1;
+
+	if (bios->version.major > 2) {
+		init_done(init);
+		return;
+	}
+	init_exec_force(init, true);
+
+	mdata = init_configure_mem_clk(init);
+
+	/* NVPLL */
+	clock = nv_ro16(bios, mdata + 4) * 10;
+	init_prog_pll(init, 0x680500, clock);
+
+	/* MPLL */
+	clock = nv_ro16(bios, mdata + 2) * 10;
+	if (nv_ro08(bios, mdata) & 0x01)
+		clock *= 2;
+	init_prog_pll(init, 0x680504, clock);
+
+	init_exec_force(init, false);
+}
+
+/**
+ * INIT_CONFIGURE_PREINIT - opcode 0x68
+ *
+ */
+static void
+init_configure_preinit(struct nvbios_init *init)
+{
+	struct nouveau_bios *bios = init->bios;
+	u32 strap;
+
+	trace("CONFIGURE_PREINIT\n");
+	init->offset += 1;
+
+	if (bios->version.major > 2) {
+		init_done(init);
+		return;
+	}
+	init_exec_force(init, true);
+
+	strap = init_rd32(init, 0x101000);
+	strap = ((strap << 2) & 0xf0) | ((strap & 0x40) >> 6);
+	init_wrvgai(init, 0x03d4, 0x3c, strap);
+
+	init_exec_force(init, false);
+}
+
+/**
+ * INIT_IO - opcode 0x69
+ *
+ */
+static void
+init_io(struct nvbios_init *init)
+{
+	struct nouveau_bios *bios = init->bios;
+	u16 port = nv_ro16(bios, init->offset + 1);
+	u8  mask = nv_ro16(bios, init->offset + 3);
+	u8  data = nv_ro16(bios, init->offset + 4);
+	u8 value;
+
+	trace("IO\t\tI[0x%04x] &= 0x%02x |= 0x%02x\n", port, mask, data);
+	init->offset += 5;
+
+	/* ummm.. yes.. should really figure out wtf this is and why it's
+	 * needed some day..  it's almost certainly wrong, but, it also
+	 * somehow makes things work...
+	 */
+	if (nv_device(init->bios)->card_type >= NV_50 &&
+	    port == 0x03c3 && data == 0x01) {
+		init_mask(init, 0x614100, 0xf0800000, 0x00800000);
+		init_mask(init, 0x00e18c, 0x00020000, 0x00020000);
+		init_mask(init, 0x614900, 0xf0800000, 0x00800000);
+		init_mask(init, 0x000200, 0x40000000, 0x00000000);
+		mdelay(10);
+		init_mask(init, 0x00e18c, 0x00020000, 0x00000000);
+		init_mask(init, 0x000200, 0x40000000, 0x40000000);
+		init_wr32(init, 0x614100, 0x00800018);
+		init_wr32(init, 0x614900, 0x00800018);
+		mdelay(10);
+		init_wr32(init, 0x614100, 0x10000018);
+		init_wr32(init, 0x614900, 0x10000018);
+		return;
+	}
+
+	value = init_rdport(init, port) & mask;
+	init_wrport(init, port, data | value);
+}
+
+/**
+ * INIT_SUB - opcode 0x6b
+ *
+ */
+static void
+init_sub(struct nvbios_init *init)
+{
+	struct nouveau_bios *bios = init->bios;
+	u8 index = nv_ro08(bios, init->offset + 1);
+	u16 addr, save;
+
+	trace("SUB\t0x%02x\n", index);
+
+	addr = init_script(bios, index);
+	if (addr && init_exec(init)) {
+		save = init->offset;
+		init->offset = addr;
+		if (nvbios_exec(init)) {
+			error("error parsing sub-table\n");
+			return;
+		}
+		init->offset = save;
+	}
+
+	init->offset += 2;
+}
+
+/**
+ * INIT_RAM_CONDITION - opcode 0x6d
+ *
+ */
+static void
+init_ram_condition(struct nvbios_init *init)
+{
+	struct nouveau_bios *bios = init->bios;
+	u8  mask = nv_ro08(bios, init->offset + 1);
+	u8 value = nv_ro08(bios, init->offset + 2);
+
+	trace("RAM_CONDITION\t"
+	      "(R[0x100000] & 0x%02x) == 0x%02x\n", mask, value);
+	init->offset += 3;
+
+	if ((init_rd32(init, 0x100000) & mask) != value)
+		init_exec_set(init, false);
+}
+
+/**
+ * INIT_NV_REG - opcode 0x6e
+ *
+ */
+static void
+init_nv_reg(struct nvbios_init *init)
+{
+	struct nouveau_bios *bios = init->bios;
+	u32  reg = nv_ro32(bios, init->offset + 1);
+	u32 mask = nv_ro32(bios, init->offset + 5);
+	u32 data = nv_ro32(bios, init->offset + 9);
+
+	trace("NV_REG\tR[0x%06x] &= 0x%08x |= 0x%08x\n", reg, mask, data);
+	init->offset += 13;
+
+	init_mask(init, reg, ~mask, data);
+}
+
+/**
+ * INIT_MACRO - opcode 0x6f
+ *
+ */
+static void
+init_macro(struct nvbios_init *init)
+{
+	struct nouveau_bios *bios = init->bios;
+	u8  macro = nv_ro08(bios, init->offset + 1);
+	u16 table;
+
+	trace("MACRO\t0x%02x\n", macro);
+
+	table = init_macro_table(init);
+	if (table) {
+		u32 addr = nv_ro32(bios, table + (macro * 8) + 0);
+		u32 data = nv_ro32(bios, table + (macro * 8) + 4);
+		trace("\t\tR[0x%06x] = 0x%08x\n", addr, data);
+		init_wr32(init, addr, data);
+	}
+
+	init->offset += 2;
+}
+
+/**
+ * INIT_RESUME - opcode 0x72
+ *
+ */
+static void
+init_resume(struct nvbios_init *init)
+{
+	trace("RESUME\n");
+	init->offset += 1;
+	init_exec_set(init, true);
+}
+
+/**
+ * INIT_TIME - opcode 0x74
+ *
+ */
+static void
+init_time(struct nvbios_init *init)
+{
+	struct nouveau_bios *bios = init->bios;
+	u16 usec = nv_ro16(bios, init->offset + 1);
+
+	trace("TIME\t0x%04x\n", usec);
+	init->offset += 3;
+
+	if (init_exec(init)) {
+		if (usec < 1000)
+			udelay(usec);
+		else
+			mdelay((usec + 900) / 1000);
+	}
+}
+
+/**
+ * INIT_CONDITION - opcode 0x75
+ *
+ */
+static void
+init_condition(struct nvbios_init *init)
+{
+	struct nouveau_bios *bios = init->bios;
+	u8 cond = nv_ro08(bios, init->offset + 1);
+
+	trace("CONDITION\t0x%02x\n", cond);
+	init->offset += 2;
+
+	if (!init_condition_met(init, cond))
+		init_exec_set(init, false);
+}
+
+/**
+ * INIT_IO_CONDITION - opcode 0x76
+ *
+ */
+static void
+init_io_condition(struct nvbios_init *init)
+{
+	struct nouveau_bios *bios = init->bios;
+	u8 cond = nv_ro08(bios, init->offset + 1);
+
+	trace("IO_CONDITION\t0x%02x\n", cond);
+	init->offset += 2;
+
+	if (!init_io_condition_met(init, cond))
+		init_exec_set(init, false);
+}
+
+/**
+ * INIT_INDEX_IO - opcode 0x78
+ *
+ */
+static void
+init_index_io(struct nvbios_init *init)
+{
+	struct nouveau_bios *bios = init->bios;
+	u16 port = nv_ro16(bios, init->offset + 1);
+	u8 index = nv_ro16(bios, init->offset + 3);
+	u8  mask = nv_ro08(bios, init->offset + 4);
+	u8  data = nv_ro08(bios, init->offset + 5);
+	u8 value;
+
+	trace("INDEX_IO\tI[0x%04x][0x%02x] &= 0x%02x |= 0x%02x\n",
+	      port, index, mask, data);
+	init->offset += 6;
+
+	value = init_rdvgai(init, port, index) & mask;
+	init_wrvgai(init, port, index, data | value);
+}
+
+/**
+ * INIT_PLL - opcode 0x79
+ *
+ */
+static void
+init_pll(struct nvbios_init *init)
+{
+	struct nouveau_bios *bios = init->bios;
+	u32  reg = nv_ro32(bios, init->offset + 1);
+	u32 freq = nv_ro16(bios, init->offset + 5) * 10;
+
+	trace("PLL\tR[0x%06x] =PLL= %dkHz\n", reg, freq);
+	init->offset += 7;
+
+	init_prog_pll(init, reg, freq);
+}
+
+/**
+ * INIT_ZM_REG - opcode 0x7a
+ *
+ */
+static void
+init_zm_reg(struct nvbios_init *init)
+{
+	struct nouveau_bios *bios = init->bios;
+	u32 addr = nv_ro32(bios, init->offset + 1);
+	u32 data = nv_ro32(bios, init->offset + 5);
+
+	trace("ZM_REG\tR[0x%06x] = 0x%08x\n", addr, data);
+	init->offset += 9;
+
+	if (addr == 0x000200)
+		data |= 0x00000001;
+
+	init_wr32(init, addr, data);
+}
+
+/**
+ * INIT_RAM_RESTRICT_PLL - opcde 0x87
+ *
+ */
+static void
+init_ram_restrict_pll(struct nvbios_init *init)
+{
+	struct nouveau_bios *bios = init->bios;
+	u8  type = nv_ro08(bios, init->offset + 1);
+	u8 count = init_ram_restrict_group_count(init);
+	u8 strap = init_ram_restrict(init);
+	u8 cconf;
+
+	trace("RAM_RESTRICT_PLL\t0x%02x\n", type);
+	init->offset += 2;
+
+	for (cconf = 0; cconf < count; cconf++) {
+		u32 freq = nv_ro32(bios, init->offset);
+
+		if (cconf == strap) {
+			trace("%dkHz *\n", freq);
+			init_prog_pll(init, type, freq);
+		} else {
+			trace("%dkHz\n", freq);
+		}
+
+		init->offset += 4;
+	}
+}
+
+/**
+ * INIT_GPIO - opcode 0x8e
+ *
+ */
+static void
+init_gpio(struct nvbios_init *init)
+{
+	struct nouveau_gpio *gpio = nouveau_gpio(init->bios);
+
+	trace("GPIO\n");
+	init->offset += 1;
+
+	if (init_exec(init) && gpio && gpio->reset)
+		gpio->reset(gpio);
+}
+
+/**
+ * INIT_RAM_RESTRICT_ZM_GROUP - opcode 0x8f
+ *
+ */
+static void
+init_ram_restrict_zm_reg_group(struct nvbios_init *init)
+{
+	struct nouveau_bios *bios = init->bios;
+	u32 addr = nv_ro32(bios, init->offset + 1);
+	u8  incr = nv_ro08(bios, init->offset + 5);
+	u8   num = nv_ro08(bios, init->offset + 6);
+	u8 count = init_ram_restrict_group_count(init);
+	u8 index = init_ram_restrict(init);
+	u8 i, j;
+
+	trace("RAM_RESTRICT_ZM_REG_GROUP\t"
+	      "R[%08x] 0x%02x 0x%02x\n", addr, incr, num);
+	init->offset += 7;
+
+	for (i = 0; i < num; i++) {
+		trace("\tR[0x%06x] = {\n", addr);
+		for (j = 0; j < count; j++) {
+			u32 data = nv_ro32(bios, init->offset);
+
+			if (j == index) {
+				trace("\t\t0x%08x *\n", data);
+				init_wr32(init, addr, data);
+			} else {
+				trace("\t\t0x%08x\n", data);
+			}
+
+			init->offset += 4;
+		}
+		trace("\t}\n");
+		addr += incr;
+	}
+}
+
+/**
+ * INIT_COPY_ZM_REG - opcode 0x90
+ *
+ */
+static void
+init_copy_zm_reg(struct nvbios_init *init)
+{
+	struct nouveau_bios *bios = init->bios;
+	u32 sreg = nv_ro32(bios, init->offset + 1);
+	u32 dreg = nv_ro32(bios, init->offset + 5);
+
+	trace("COPY_ZM_REG\tR[0x%06x] = R[0x%06x]\n", sreg, dreg);
+	init->offset += 9;
+
+	init_wr32(init, dreg, init_rd32(init, sreg));
+}
+
+/**
+ * INIT_ZM_REG_GROUP - opcode 0x91
+ *
+ */
+static void
+init_zm_reg_group(struct nvbios_init *init)
+{
+	struct nouveau_bios *bios = init->bios;
+	u32 addr = nv_ro32(bios, init->offset + 1);
+	u8 count = nv_ro08(bios, init->offset + 5);
+
+	trace("ZM_REG_GROUP\tR[0x%06x] =\n");
+	init->offset += 6;
+
+	while (count--) {
+		u32 data = nv_ro32(bios, init->offset);
+		trace("\t0x%08x\n", data);
+		init_wr32(init, addr, data);
+		init->offset += 4;
+	}
+}
+
+/**
+ * INIT_XLAT - opcode 0x96
+ *
+ */
+static void
+init_xlat(struct nvbios_init *init)
+{
+	struct nouveau_bios *bios = init->bios;
+	u32 saddr = nv_ro32(bios, init->offset + 1);
+	u8 sshift = nv_ro08(bios, init->offset + 5);
+	u8  smask = nv_ro08(bios, init->offset + 6);
+	u8  index = nv_ro08(bios, init->offset + 7);
+	u32 daddr = nv_ro32(bios, init->offset + 8);
+	u32 dmask = nv_ro32(bios, init->offset + 12);
+	u8  shift = nv_ro08(bios, init->offset + 16);
+	u32 data;
+
+	trace("INIT_XLAT\tR[0x%06x] &= 0x%08x |= "
+	      "(X%02x((R[0x%06x] %s 0x%02x) & 0x%02x) << 0x%02x)\n",
+	      daddr, dmask, index, saddr, (sshift & 0x80) ? "<<" : ">>",
+	      (sshift & 0x80) ? (0x100 - sshift) : sshift, smask, shift);
+	init->offset += 17;
+
+	data = init_shift(init_rd32(init, saddr), sshift) & smask;
+	data = init_xlat_(init, index, data) << shift;
+	init_mask(init, daddr, ~dmask, data);
+}
+
+/**
+ * INIT_ZM_MASK_ADD - opcode 0x97
+ *
+ */
+static void
+init_zm_mask_add(struct nvbios_init *init)
+{
+	struct nouveau_bios *bios = init->bios;
+	u32 addr = nv_ro32(bios, init->offset + 1);
+	u32 mask = nv_ro32(bios, init->offset + 5);
+	u32  add = nv_ro32(bios, init->offset + 9);
+	u32 data;
+
+	trace("ZM_MASK_ADD\tR[0x%06x] &= 0x%08x += 0x%08x\n", addr, mask, add);
+	init->offset += 13;
+
+	data  =  init_rd32(init, addr) & mask;
+	data |= ((data + add) & ~mask);
+	init_wr32(init, addr, data);
+}
+
+/**
+ * INIT_AUXCH - opcode 0x98
+ *
+ */
+static void
+init_auxch(struct nvbios_init *init)
+{
+	struct nouveau_bios *bios = init->bios;
+	u32 addr = nv_ro32(bios, init->offset + 1);
+	u8 count = nv_ro08(bios, init->offset + 5);
+
+	trace("AUXCH\tAUX[0x%08x] 0x%02x\n", addr, count);
+	init->offset += 6;
+
+	while (count--) {
+		u8 mask = nv_ro08(bios, init->offset + 0);
+		u8 data = nv_ro08(bios, init->offset + 1);
+		trace("\tAUX[0x%08x] &= 0x%02x |= 0x%02x\n", addr, mask, data);
+		mask = init_rdauxr(init, addr) & mask;
+		init_wrauxr(init, addr, mask | data);
+		init->offset += 2;
+	}
+}
+
+/**
+ * INIT_AUXCH - opcode 0x99
+ *
+ */
+static void
+init_zm_auxch(struct nvbios_init *init)
+{
+	struct nouveau_bios *bios = init->bios;
+	u32 addr = nv_ro32(bios, init->offset + 1);
+	u8 count = nv_ro08(bios, init->offset + 5);
+
+	trace("ZM_AUXCH\tAUX[0x%08x] 0x%02x\n", addr, count);
+	init->offset += 6;
+
+	while (count--) {
+		u8 data = nv_ro08(bios, init->offset + 0);
+		trace("\tAUX[0x%08x] = 0x%02x\n", addr, data);
+		init_wrauxr(init, addr, data);
+		init->offset += 1;
+	}
+}
+
+/**
+ * INIT_I2C_LONG_IF - opcode 0x9a
+ *
+ */
+static void
+init_i2c_long_if(struct nvbios_init *init)
+{
+	struct nouveau_bios *bios = init->bios;
+	u8 index = nv_ro08(bios, init->offset + 1);
+	u8  addr = nv_ro08(bios, init->offset + 2) >> 1;
+	u8 reglo = nv_ro08(bios, init->offset + 3);
+	u8 reghi = nv_ro08(bios, init->offset + 4);
+	u8  mask = nv_ro08(bios, init->offset + 5);
+	u8  data = nv_ro08(bios, init->offset + 6);
+	struct nouveau_i2c_port *port;
+
+	trace("I2C_LONG_IF\t"
+	      "I2C[0x%02x][0x%02x][0x%02x%02x] & 0x%02x == 0x%02x\n",
+	      index, addr, reglo, reghi, mask, data);
+	init->offset += 7;
+
+	port = init_i2c(init, index);
+	if (port) {
+		u8 i[2] = { reghi, reglo };
+		u8 o[1] = {};
+		struct i2c_msg msg[] = {
+			{ .addr = addr, .flags = 0, .len = 2, .buf = i },
+			{ .addr = addr, .flags = I2C_M_RD, .len = 1, .buf = o }
+		};
+		int ret;
+
+		ret = i2c_transfer(&port->adapter, msg, 2);
+		if (ret == 2 && ((o[0] & mask) == data))
+			return;
+	}
+
+	init_exec_set(init, false);
+}
+
+static struct nvbios_init_opcode {
+	void (*exec)(struct nvbios_init *);
+} init_opcode[] = {
+	[0x32] = { init_io_restrict_prog },
+	[0x33] = { init_repeat },
+	[0x34] = { init_io_restrict_pll },
+	[0x36] = { init_end_repeat },
+	[0x37] = { init_copy },
+	[0x38] = { init_not },
+	[0x39] = { init_io_flag_condition },
+	[0x3a] = { init_dp_condition },
+	[0x3b] = { init_io_mask_or },
+	[0x3c] = { init_io_or },
+	[0x49] = { init_idx_addr_latched },
+	[0x4a] = { init_io_restrict_pll2 },
+	[0x4b] = { init_pll2 },
+	[0x4c] = { init_i2c_byte },
+	[0x4d] = { init_zm_i2c_byte },
+	[0x4e] = { init_zm_i2c },
+	[0x4f] = { init_tmds },
+	[0x50] = { init_zm_tmds_group },
+	[0x51] = { init_cr_idx_adr_latch },
+	[0x52] = { init_cr },
+	[0x53] = { init_zm_cr },
+	[0x54] = { init_zm_cr_group },
+	[0x56] = { init_condition_time },
+	[0x57] = { init_ltime },
+	[0x58] = { init_zm_reg_sequence },
+	[0x5b] = { init_sub_direct },
+	[0x5c] = { init_jump },
+	[0x5e] = { init_i2c_if },
+	[0x5f] = { init_copy_nv_reg },
+	[0x62] = { init_zm_index_io },
+	[0x63] = { init_compute_mem },
+	[0x65] = { init_reset },
+	[0x66] = { init_configure_mem },
+	[0x67] = { init_configure_clk },
+	[0x68] = { init_configure_preinit },
+	[0x69] = { init_io },
+	[0x6b] = { init_sub },
+	[0x6d] = { init_ram_condition },
+	[0x6e] = { init_nv_reg },
+	[0x6f] = { init_macro },
+	[0x71] = { init_done },
+	[0x72] = { init_resume },
+	[0x74] = { init_time },
+	[0x75] = { init_condition },
+	[0x76] = { init_io_condition },
+	[0x78] = { init_index_io },
+	[0x79] = { init_pll },
+	[0x7a] = { init_zm_reg },
+	[0x87] = { init_ram_restrict_pll },
+	[0x8c] = { init_reserved },
+	[0x8d] = { init_reserved },
+	[0x8e] = { init_gpio },
+	[0x8f] = { init_ram_restrict_zm_reg_group },
+	[0x90] = { init_copy_zm_reg },
+	[0x91] = { init_zm_reg_group },
+	[0x92] = { init_reserved },
+	[0x96] = { init_xlat },
+	[0x97] = { init_zm_mask_add },
+	[0x98] = { init_auxch },
+	[0x99] = { init_zm_auxch },
+	[0x9a] = { init_i2c_long_if },
+};
+
+#define init_opcode_nr (sizeof(init_opcode) / sizeof(init_opcode[0]))
+
+int
+nvbios_exec(struct nvbios_init *init)
+{
+	init->nested++;
+	while (init->offset) {
+		u8 opcode = nv_ro08(init->bios, init->offset);
+		if (opcode >= init_opcode_nr || !init_opcode[opcode].exec) {
+			error("unknown opcode 0x%02x\n", opcode);
+			return -EINVAL;
+		}
+
+		init_opcode[opcode].exec(init);
+	}
+	init->nested--;
+	return 0;
+}
+
+int
+nvbios_init(struct nouveau_subdev *subdev, bool execute)
+{
+	struct nouveau_bios *bios = nouveau_bios(subdev);
+	int ret = 0;
+	int i = -1;
+	u16 data;
+
+	if (execute)
+		nv_info(bios, "running init tables\n");
+	while (!ret && (data = (init_script(bios, ++i)))) {
+		struct nvbios_init init = {
+			.subdev = subdev,
+			.bios = bios,
+			.offset = data,
+			.outp = NULL,
+			.crtc = -1,
+			.execute = execute ? 1 : 0,
+		};
+
+		ret = nvbios_exec(&init);
+	}
+
+	/* the vbios parser will run this right after the normal init
+	 * tables, whereas the binary driver appears to run it later.
+	 */
+	if (!ret && (data = init_unknown_script(bios))) {
+		struct nvbios_init init = {
+			.subdev = subdev,
+			.bios = bios,
+			.offset = data,
+			.outp = NULL,
+			.crtc = -1,
+			.execute = execute ? 1 : 0,
+		};
+
+		ret = nvbios_exec(&init);
+	}
+
+	return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/mxm.c b/drivers/gpu/drm/nouveau/core/subdev/bios/mxm.c
new file mode 100644
index 000000000000..2610b11a99b3
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/bios/mxm.c
@@ -0,0 +1,135 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <subdev/bios.h>
+#include <subdev/bios/bit.h>
+#include <subdev/bios/mxm.h>
+
+u16
+mxm_table(struct nouveau_bios *bios, u8 *ver, u8 *hdr)
+{
+	struct bit_entry x;
+
+	if (bit_entry(bios, 'x', &x)) {
+		nv_debug(bios, "BIT 'x' table not present\n");
+		return 0x0000;
+	}
+
+	*ver = x.version;
+	*hdr = x.length;
+	if (*ver != 1 || *hdr < 3) {
+		nv_warn(bios, "BIT 'x' table %d/%d unknown\n", *ver, *hdr);
+		return 0x0000;
+	}
+
+	return x.offset;
+}
+
+/* These map MXM v2.x digital connection values to the appropriate SOR/link,
+ * hopefully they're correct for all boards within the same chipset...
+ *
+ * MXM v3.x VBIOS are nicer and provide pointers to these tables.
+ */
+static u8 nv84_sor_map[16] = {
+	0x00, 0x12, 0x22, 0x11, 0x32, 0x31, 0x11, 0x31,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
+};
+
+static u8 nv92_sor_map[16] = {
+	0x00, 0x12, 0x22, 0x11, 0x32, 0x31, 0x11, 0x31,
+	0x11, 0x31, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
+};
+
+static u8 nv94_sor_map[16] = {
+	0x00, 0x14, 0x24, 0x11, 0x34, 0x31, 0x11, 0x31,
+	0x11, 0x31, 0x12, 0x00, 0x00, 0x00, 0x00, 0x00
+};
+
+static u8 nv98_sor_map[16] = {
+	0x00, 0x14, 0x12, 0x11, 0x00, 0x31, 0x11, 0x31,
+	0x11, 0x31, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
+};
+
+u8
+mxm_sor_map(struct nouveau_bios *bios, u8 conn)
+{
+	u8  ver, hdr;
+	u16 mxm = mxm_table(bios, &ver, &hdr);
+	if (mxm && hdr >= 6) {
+		u16 map = nv_ro16(bios, mxm + 4);
+		if (map) {
+			ver = nv_ro08(bios, map);
+			if (ver == 0x10) {
+				if (conn < nv_ro08(bios, map + 3)) {
+					map += nv_ro08(bios, map + 1);
+					map += conn;
+					return nv_ro08(bios, map);
+				}
+
+				return 0x00;
+			}
+
+			nv_warn(bios, "unknown sor map v%02x\n", ver);
+		}
+	}
+
+	if (bios->version.chip == 0x84 || bios->version.chip == 0x86)
+		return nv84_sor_map[conn];
+	if (bios->version.chip == 0x92)
+		return nv92_sor_map[conn];
+	if (bios->version.chip == 0x94 || bios->version.chip == 0x96)
+		return nv94_sor_map[conn];
+	if (bios->version.chip == 0x98)
+		return nv98_sor_map[conn];
+
+	nv_warn(bios, "missing sor map\n");
+	return 0x00;
+}
+
+u8
+mxm_ddc_map(struct nouveau_bios *bios, u8 port)
+{
+	u8  ver, hdr;
+	u16 mxm = mxm_table(bios, &ver, &hdr);
+	if (mxm && hdr >= 8) {
+		u16 map = nv_ro16(bios, mxm + 6);
+		if (map) {
+			ver = nv_ro08(bios, map);
+			if (ver == 0x10) {
+				if (port < nv_ro08(bios, map + 3)) {
+					map += nv_ro08(bios, map + 1);
+					map += port;
+					return nv_ro08(bios, map);
+				}
+
+				return 0x00;
+			}
+
+			nv_warn(bios, "unknown ddc map v%02x\n", ver);
+		}
+	}
+
+	/* v2.x: directly write port as dcb i2cidx */
+	return (port << 4) | port;
+}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/perf.c b/drivers/gpu/drm/nouveau/core/subdev/bios/perf.c
new file mode 100644
index 000000000000..bcbb056c2887
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/bios/perf.c
@@ -0,0 +1,75 @@
+/*
+ * Copyright 2012 Nouveau Community
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Martin Peres
+ */
+
+#include <subdev/bios.h>
+#include <subdev/bios/bit.h>
+#include <subdev/bios/perf.h>
+
+static u16
+perf_table(struct nouveau_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
+{
+	struct bit_entry bit_P;
+	u16 perf = 0x0000;
+
+	if (!bit_entry(bios, 'P', &bit_P)) {
+		if (bit_P.version <= 2) {
+			perf = nv_ro16(bios, bit_P.offset + 0);
+			if (perf) {
+				*ver = nv_ro08(bios, perf + 0);
+				*hdr = nv_ro08(bios, perf + 1);
+			}
+		} else
+			nv_error(bios, "unknown offset for perf in BIT P %d\n",
+				bit_P.version);
+	}
+
+	if (bios->bmp_offset) {
+		if (nv_ro08(bios, bios->bmp_offset + 6) >= 0x25) {
+			perf = nv_ro16(bios, bios->bmp_offset + 0x94);
+			if (perf) {
+				*hdr = nv_ro08(bios, perf + 0);
+				*ver = nv_ro08(bios, perf + 1);
+			}
+		}
+	}
+
+	return perf;
+}
+
+int
+nvbios_perf_fan_parse(struct nouveau_bios *bios,
+		      struct nvbios_perf_fan *fan)
+{
+	u8 ver = 0, hdr = 0, cnt = 0, len = 0;
+	u16 perf = perf_table(bios, &ver, &hdr, &cnt, &len);
+	if (!perf)
+		return -ENODEV;
+
+	if (ver >= 0x20 && ver < 0x40 && hdr > 6)
+		fan->pwm_divisor = nv_ro16(bios, perf + 6);
+	else
+		fan->pwm_divisor = 0;
+
+	return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/pll.c b/drivers/gpu/drm/nouveau/core/subdev/bios/pll.c
new file mode 100644
index 000000000000..5e5f4cddae3c
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/bios/pll.c
@@ -0,0 +1,417 @@
+/*
+ * Copyright 2005-2006 Erik Waling
+ * Copyright 2006 Stephane Marchesin
+ * Copyright 2007-2009 Stuart Bennett
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF
+ * OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <subdev/vga.h>
+#include <subdev/bios.h>
+#include <subdev/bios/bit.h>
+#include <subdev/bios/bmp.h>
+#include <subdev/bios/pll.h>
+
+struct pll_mapping {
+	u8  type;
+	u32 reg;
+};
+
+static struct pll_mapping
+nv04_pll_mapping[] = {
+	{ PLL_CORE  , 0x680500 },
+	{ PLL_MEMORY, 0x680504 },
+	{ PLL_VPLL0 , 0x680508 },
+	{ PLL_VPLL1 , 0x680520 },
+	{}
+};
+
+static struct pll_mapping
+nv40_pll_mapping[] = {
+	{ PLL_CORE  , 0x004000 },
+	{ PLL_MEMORY, 0x004020 },
+	{ PLL_VPLL0 , 0x680508 },
+	{ PLL_VPLL1 , 0x680520 },
+	{}
+};
+
+static struct pll_mapping
+nv50_pll_mapping[] = {
+	{ PLL_CORE  , 0x004028 },
+	{ PLL_SHADER, 0x004020 },
+	{ PLL_UNK03 , 0x004000 },
+	{ PLL_MEMORY, 0x004008 },
+	{ PLL_UNK40 , 0x00e810 },
+	{ PLL_UNK41 , 0x00e818 },
+	{ PLL_UNK42 , 0x00e824 },
+	{ PLL_VPLL0 , 0x614100 },
+	{ PLL_VPLL1 , 0x614900 },
+	{}
+};
+
+static struct pll_mapping
+nv84_pll_mapping[] = {
+	{ PLL_CORE  , 0x004028 },
+	{ PLL_SHADER, 0x004020 },
+	{ PLL_MEMORY, 0x004008 },
+	{ PLL_VDEC  , 0x004030 },
+	{ PLL_UNK41 , 0x00e818 },
+	{ PLL_VPLL0 , 0x614100 },
+	{ PLL_VPLL1 , 0x614900 },
+	{}
+};
+
+static u16
+pll_limits_table(struct nouveau_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
+{
+	struct bit_entry bit_C;
+
+	if (!bit_entry(bios, 'C', &bit_C) && bit_C.length >= 10) {
+		u16 data = nv_ro16(bios, bit_C.offset + 8);
+		if (data) {
+			*ver = nv_ro08(bios, data + 0);
+			*hdr = nv_ro08(bios, data + 1);
+			*len = nv_ro08(bios, data + 2);
+			*cnt = nv_ro08(bios, data + 3);
+			return data;
+		}
+	}
+
+	if (bmp_version(bios) >= 0x0524) {
+		u16 data = nv_ro16(bios, bios->bmp_offset + 142);
+		if (data) {
+			*ver = nv_ro08(bios, data + 0);
+			*hdr = 1;
+			*cnt = 1;
+			*len = 0x18;
+			return data;
+		}
+	}
+
+	*ver = 0x00;
+	return 0x0000;
+}
+
+static struct pll_mapping *
+pll_map(struct nouveau_bios *bios)
+{
+	switch (nv_device(bios)->card_type) {
+	case NV_04:
+	case NV_10:
+	case NV_20:
+	case NV_30:
+		return nv04_pll_mapping;
+		break;
+	case NV_40:
+		return nv40_pll_mapping;
+	case NV_50:
+		if (nv_device(bios)->chipset == 0x50)
+			return nv50_pll_mapping;
+		else
+		if (nv_device(bios)->chipset <  0xa3 ||
+		    nv_device(bios)->chipset == 0xaa ||
+		    nv_device(bios)->chipset == 0xac)
+			return nv84_pll_mapping;
+	default:
+		return NULL;
+	}
+}
+
+static u16
+pll_map_reg(struct nouveau_bios *bios, u32 reg, u32 *type, u8 *ver, u8 *len)
+{
+	struct pll_mapping *map;
+	u8  hdr, cnt;
+	u16 data;
+
+	data = pll_limits_table(bios, ver, &hdr, &cnt, len);
+	if (data && *ver >= 0x30) {
+		data += hdr;
+		while (cnt--) {
+			if (nv_ro32(bios, data + 3) == reg) {
+				*type = nv_ro08(bios, data + 0);
+				return data;
+			}
+			data += *len;
+		}
+		return 0x0000;
+	}
+
+	map = pll_map(bios);
+	while (map->reg) {
+		if (map->reg == reg && *ver >= 0x20) {
+			u16 addr = (data += hdr);
+			while (cnt--) {
+				if (nv_ro32(bios, data) == map->reg) {
+					*type = map->type;
+					return data;
+				}
+				data += *len;
+			}
+			return addr;
+		} else
+		if (map->reg == reg) {
+			*type = map->type;
+			return data + 1;
+		}
+		map++;
+	}
+
+	return 0x0000;
+}
+
+static u16
+pll_map_type(struct nouveau_bios *bios, u8 type, u32 *reg, u8 *ver, u8 *len)
+{
+	struct pll_mapping *map;
+	u8  hdr, cnt;
+	u16 data;
+
+	data = pll_limits_table(bios, ver, &hdr, &cnt, len);
+	if (data && *ver >= 0x30) {
+		data += hdr;
+		while (cnt--) {
+			if (nv_ro08(bios, data + 0) == type) {
+				*reg = nv_ro32(bios, data + 3);
+				return data;
+			}
+			data += *len;
+		}
+		return 0x0000;
+	}
+
+	map = pll_map(bios);
+	while (map->reg) {
+		if (map->type == type && *ver >= 0x20) {
+			u16 addr = (data += hdr);
+			while (cnt--) {
+				if (nv_ro32(bios, data) == map->reg) {
+					*reg = map->reg;
+					return data;
+				}
+				data += *len;
+			}
+			return addr;
+		} else
+		if (map->type == type) {
+			*reg = map->reg;
+			return data + 1;
+		}
+		map++;
+	}
+
+	return 0x0000;
+}
+
+int
+nvbios_pll_parse(struct nouveau_bios *bios, u32 type, struct nvbios_pll *info)
+{
+	u8  ver, len;
+	u32 reg = type;
+	u16 data;
+
+	if (type > PLL_MAX) {
+		reg  = type;
+		data = pll_map_reg(bios, reg, &type, &ver, &len);
+	} else {
+		data = pll_map_type(bios, type, &reg, &ver, &len);
+	}
+
+	if (ver && !data)
+		return -ENOENT;
+
+	memset(info, 0, sizeof(*info));
+	info->type = type;
+	info->reg = reg;
+
+	switch (ver) {
+	case 0x00:
+		break;
+	case 0x10:
+	case 0x11:
+		info->vco1.min_freq = nv_ro32(bios, data + 0);
+		info->vco1.max_freq = nv_ro32(bios, data + 4);
+		info->vco2.min_freq = nv_ro32(bios, data + 8);
+		info->vco2.max_freq = nv_ro32(bios, data + 12);
+		info->vco1.min_inputfreq = nv_ro32(bios, data + 16);
+		info->vco2.min_inputfreq = nv_ro32(bios, data + 20);
+		info->vco1.max_inputfreq = INT_MAX;
+		info->vco2.max_inputfreq = INT_MAX;
+
+		info->max_p = 0x7;
+		info->max_p_usable = 0x6;
+
+		/* these values taken from nv30/31/36 */
+		switch (bios->version.chip) {
+		case 0x36:
+			info->vco1.min_n = 0x5;
+			break;
+		default:
+			info->vco1.min_n = 0x1;
+			break;
+		}
+		info->vco1.max_n = 0xff;
+		info->vco1.min_m = 0x1;
+		info->vco1.max_m = 0xd;
+
+		/*
+		 * On nv30, 31, 36 (i.e. all cards with two stage PLLs with this
+		 * table version (apart from nv35)), N2 is compared to
+		 * maxN2 (0x46) and 10 * maxM2 (0x4), so set maxN2 to 0x28 and
+		 * save a comparison
+		 */
+		info->vco2.min_n = 0x4;
+		switch (bios->version.chip) {
+		case 0x30:
+		case 0x35:
+			info->vco2.max_n = 0x1f;
+			break;
+		default:
+			info->vco2.max_n = 0x28;
+			break;
+		}
+		info->vco2.min_m = 0x1;
+		info->vco2.max_m = 0x4;
+		break;
+	case 0x20:
+	case 0x21:
+		info->vco1.min_freq = nv_ro16(bios, data + 4) * 1000;
+		info->vco1.max_freq = nv_ro16(bios, data + 6) * 1000;
+		info->vco2.min_freq = nv_ro16(bios, data + 8) * 1000;
+		info->vco2.max_freq = nv_ro16(bios, data + 10) * 1000;
+		info->vco1.min_inputfreq = nv_ro16(bios, data + 12) * 1000;
+		info->vco2.min_inputfreq = nv_ro16(bios, data + 14) * 1000;
+		info->vco1.max_inputfreq = nv_ro16(bios, data + 16) * 1000;
+		info->vco2.max_inputfreq = nv_ro16(bios, data + 18) * 1000;
+		info->vco1.min_n = nv_ro08(bios, data + 20);
+		info->vco1.max_n = nv_ro08(bios, data + 21);
+		info->vco1.min_m = nv_ro08(bios, data + 22);
+		info->vco1.max_m = nv_ro08(bios, data + 23);
+		info->vco2.min_n = nv_ro08(bios, data + 24);
+		info->vco2.max_n = nv_ro08(bios, data + 25);
+		info->vco2.min_m = nv_ro08(bios, data + 26);
+		info->vco2.max_m = nv_ro08(bios, data + 27);
+
+		info->max_p = nv_ro08(bios, data + 29);
+		info->max_p_usable = info->max_p;
+		if (bios->version.chip < 0x60)
+			info->max_p_usable = 0x6;
+		info->bias_p = nv_ro08(bios, data + 30);
+
+		if (len > 0x22)
+			info->refclk = nv_ro32(bios, data + 31);
+		break;
+	case 0x30:
+		data = nv_ro16(bios, data + 1);
+
+		info->vco1.min_freq = nv_ro16(bios, data + 0) * 1000;
+		info->vco1.max_freq = nv_ro16(bios, data + 2) * 1000;
+		info->vco2.min_freq = nv_ro16(bios, data + 4) * 1000;
+		info->vco2.max_freq = nv_ro16(bios, data + 6) * 1000;
+		info->vco1.min_inputfreq = nv_ro16(bios, data + 8) * 1000;
+		info->vco2.min_inputfreq = nv_ro16(bios, data + 10) * 1000;
+		info->vco1.max_inputfreq = nv_ro16(bios, data + 12) * 1000;
+		info->vco2.max_inputfreq = nv_ro16(bios, data + 14) * 1000;
+		info->vco1.min_n = nv_ro08(bios, data + 16);
+		info->vco1.max_n = nv_ro08(bios, data + 17);
+		info->vco1.min_m = nv_ro08(bios, data + 18);
+		info->vco1.max_m = nv_ro08(bios, data + 19);
+		info->vco2.min_n = nv_ro08(bios, data + 20);
+		info->vco2.max_n = nv_ro08(bios, data + 21);
+		info->vco2.min_m = nv_ro08(bios, data + 22);
+		info->vco2.max_m = nv_ro08(bios, data + 23);
+		info->max_p_usable = info->max_p = nv_ro08(bios, data + 25);
+		info->bias_p = nv_ro08(bios, data + 27);
+		info->refclk = nv_ro32(bios, data + 28);
+		break;
+	case 0x40:
+		info->refclk = nv_ro16(bios, data + 9) * 1000;
+		data = nv_ro16(bios, data + 1);
+
+		info->vco1.min_freq = nv_ro16(bios, data + 0) * 1000;
+		info->vco1.max_freq = nv_ro16(bios, data + 2) * 1000;
+		info->vco1.min_inputfreq = nv_ro16(bios, data + 4) * 1000;
+		info->vco1.max_inputfreq = nv_ro16(bios, data + 6) * 1000;
+		info->vco1.min_m = nv_ro08(bios, data + 8);
+		info->vco1.max_m = nv_ro08(bios, data + 9);
+		info->vco1.min_n = nv_ro08(bios, data + 10);
+		info->vco1.max_n = nv_ro08(bios, data + 11);
+		info->min_p = nv_ro08(bios, data + 12);
+		info->max_p = nv_ro08(bios, data + 13);
+		break;
+	default:
+		nv_error(bios, "unknown pll limits version 0x%02x\n", ver);
+		return -EINVAL;
+	}
+
+	if (!info->refclk) {
+		info->refclk = nv_device(bios)->crystal;
+		if (bios->version.chip == 0x51) {
+			u32 sel_clk = nv_rd32(bios, 0x680524);
+			if ((info->reg == 0x680508 && sel_clk & 0x20) ||
+			    (info->reg == 0x680520 && sel_clk & 0x80)) {
+				if (nv_rdvgac(bios, 0, 0x27) < 0xa3)
+					info->refclk = 200000;
+				else
+					info->refclk = 25000;
+			}
+		}
+	}
+
+	/*
+	 * By now any valid limit table ought to have set a max frequency for
+	 * vco1, so if it's zero it's either a pre limit table bios, or one
+	 * with an empty limit table (seen on nv18)
+	 */
+	if (!info->vco1.max_freq) {
+		info->vco1.max_freq = nv_ro32(bios, bios->bmp_offset + 67);
+		info->vco1.min_freq = nv_ro32(bios, bios->bmp_offset + 71);
+		if (bmp_version(bios) < 0x0506) {
+			info->vco1.max_freq = 256000;
+			info->vco1.min_freq = 128000;
+		}
+
+		info->vco1.min_inputfreq = 0;
+		info->vco1.max_inputfreq = INT_MAX;
+		info->vco1.min_n = 0x1;
+		info->vco1.max_n = 0xff;
+		info->vco1.min_m = 0x1;
+
+		if (nv_device(bios)->crystal == 13500) {
+			/* nv05 does this, nv11 doesn't, nv10 unknown */
+			if (bios->version.chip < 0x11)
+				info->vco1.min_m = 0x7;
+			info->vco1.max_m = 0xd;
+		} else {
+			if (bios->version.chip < 0x11)
+				info->vco1.min_m = 0x8;
+			info->vco1.max_m = 0xe;
+		}
+
+		if (bios->version.chip <  0x17 ||
+		    bios->version.chip == 0x1a ||
+		    bios->version.chip == 0x20)
+			info->max_p = 4;
+		else
+			info->max_p = 5;
+		info->max_p_usable = info->max_p;
+	}
+
+	return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/therm.c b/drivers/gpu/drm/nouveau/core/subdev/bios/therm.c
new file mode 100644
index 000000000000..862a08a2ae27
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/bios/therm.c
@@ -0,0 +1,177 @@
+/*
+ * Copyright 2012 Nouveau Community
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Martin Peres
+ */
+
+#include <subdev/bios.h>
+#include <subdev/bios/bit.h>
+#include <subdev/bios/therm.h>
+
+static u16
+therm_table(struct nouveau_bios *bios, u8 *ver, u8 *hdr, u8 *len, u8 *cnt)
+{
+	struct bit_entry bit_P;
+	u16 therm = 0;
+
+	if (!bit_entry(bios, 'P', &bit_P)) {
+		if (bit_P.version == 1)
+			therm = nv_ro16(bios, bit_P.offset + 12);
+		else if (bit_P.version == 2)
+			therm = nv_ro16(bios, bit_P.offset + 16);
+		else
+			nv_error(bios,
+				"unknown offset for thermal in BIT P %d\n",
+				bit_P.version);
+	}
+
+	/* exit now if we haven't found the thermal table */
+	if (!therm)
+		return 0x0000;
+
+	*ver = nv_ro08(bios, therm + 0);
+	*hdr = nv_ro08(bios, therm + 1);
+	*len = nv_ro08(bios, therm + 2);
+	*cnt = nv_ro08(bios, therm + 3);
+
+	return therm + nv_ro08(bios, therm + 1);
+}
+
+u16
+nvbios_therm_entry(struct nouveau_bios *bios, int idx, u8 *ver, u8 *len)
+{
+	u8 hdr, cnt;
+	u16 therm = therm_table(bios, ver, &hdr, len, &cnt);
+	if (therm && idx < cnt)
+		return therm + idx * *len;
+	return 0x0000;
+}
+
+int
+nvbios_therm_sensor_parse(struct nouveau_bios *bios,
+			  enum nvbios_therm_domain domain,
+			  struct nvbios_therm_sensor *sensor)
+{
+	s8 thrs_section, sensor_section, offset;
+	u8 ver, len, i;
+	u16 entry;
+
+	/* we only support the core domain for now */
+	if (domain != NVBIOS_THERM_DOMAIN_CORE)
+		return -EINVAL;
+
+	/* Read the entries from the table */
+	thrs_section = 0;
+	sensor_section = -1;
+	i = 0;
+	while ((entry = nvbios_therm_entry(bios, i++, &ver, &len))) {
+		s16 value = nv_ro16(bios, entry + 1);
+
+		switch (nv_ro08(bios, entry + 0)) {
+		case 0x0:
+			thrs_section = value;
+			if (value > 0)
+				return 0; /* we do not try to support ambient */
+			break;
+		case 0x01:
+			sensor_section++;
+			if (sensor_section == 0) {
+				offset = ((s8) nv_ro08(bios, entry + 2)) / 2;
+				sensor->offset_constant = offset;
+			}
+			break;
+
+		case 0x04:
+			if (thrs_section == 0) {
+				sensor->thrs_critical.temp = (value & 0xff0) >> 4;
+				sensor->thrs_critical.hysteresis = value & 0xf;
+			}
+			break;
+
+		case 0x07:
+			if (thrs_section == 0) {
+				sensor->thrs_down_clock.temp = (value & 0xff0) >> 4;
+				sensor->thrs_down_clock.hysteresis = value & 0xf;
+			}
+			break;
+
+		case 0x08:
+			if (thrs_section == 0) {
+				sensor->thrs_fan_boost.temp = (value & 0xff0) >> 4;
+				sensor->thrs_fan_boost.hysteresis = value & 0xf;
+			}
+			break;
+
+		case 0x10:
+			if (sensor_section == 0)
+				sensor->offset_num = value;
+			break;
+
+		case 0x11:
+			if (sensor_section == 0)
+				sensor->offset_den = value;
+			break;
+
+		case 0x12:
+			if (sensor_section == 0)
+				sensor->slope_mult = value;
+			break;
+
+		case 0x13:
+			if (sensor_section == 0)
+				sensor->slope_div = value;
+			break;
+		case 0x32:
+			if (thrs_section == 0) {
+				sensor->thrs_shutdown.temp = (value & 0xff0) >> 4;
+				sensor->thrs_shutdown.hysteresis = value & 0xf;
+			}
+			break;
+		}
+	}
+
+	return 0;
+}
+
+int
+nvbios_therm_fan_parse(struct nouveau_bios *bios,
+			  struct nvbios_therm_fan *fan)
+{
+	u8 ver, len, i;
+	u16 entry;
+
+	i = 0;
+	while ((entry = nvbios_therm_entry(bios, i++, &ver, &len))) {
+		s16 value = nv_ro16(bios, entry + 1);
+
+		switch (nv_ro08(bios, entry + 0)) {
+		case 0x22:
+			fan->min_duty = value & 0xff;
+			fan->max_duty = (value & 0xff00) >> 8;
+			break;
+		case 0x26:
+			fan->pwm_freq = value;
+			break;
+		}
+	}
+
+	return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/clock/nv04.c b/drivers/gpu/drm/nouveau/core/subdev/clock/nv04.c
new file mode 100644
index 000000000000..b7fd1151166e
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/clock/nv04.c
@@ -0,0 +1,359 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <subdev/clock.h>
+#include <subdev/bios.h>
+#include <subdev/bios/pll.h>
+
+#include "pll.h"
+
+struct nv04_clock_priv {
+	struct nouveau_clock base;
+};
+
+static int
+powerctrl_1_shift(int chip_version, int reg)
+{
+	int shift = -4;
+
+	if (chip_version < 0x17 || chip_version == 0x1a || chip_version == 0x20)
+		return shift;
+
+	switch (reg) {
+	case 0x680520:
+		shift += 4;
+	case 0x680508:
+		shift += 4;
+	case 0x680504:
+		shift += 4;
+	case 0x680500:
+		shift += 4;
+	}
+
+	/*
+	 * the shift for vpll regs is only used for nv3x chips with a single
+	 * stage pll
+	 */
+	if (shift > 4 && (chip_version < 0x32 || chip_version == 0x35 ||
+			  chip_version == 0x36 || chip_version >= 0x40))
+		shift = -4;
+
+	return shift;
+}
+
+static void
+setPLL_single(struct nv04_clock_priv *priv, u32 reg,
+	      struct nouveau_pll_vals *pv)
+{
+	int chip_version = nouveau_bios(priv)->version.chip;
+	uint32_t oldpll = nv_rd32(priv, reg);
+	int oldN = (oldpll >> 8) & 0xff, oldM = oldpll & 0xff;
+	uint32_t pll = (oldpll & 0xfff80000) | pv->log2P << 16 | pv->NM1;
+	uint32_t saved_powerctrl_1 = 0;
+	int shift_powerctrl_1 = powerctrl_1_shift(chip_version, reg);
+
+	if (oldpll == pll)
+		return;	/* already set */
+
+	if (shift_powerctrl_1 >= 0) {
+		saved_powerctrl_1 = nv_rd32(priv, 0x001584);
+		nv_wr32(priv, 0x001584,
+			(saved_powerctrl_1 & ~(0xf << shift_powerctrl_1)) |
+			1 << shift_powerctrl_1);
+	}
+
+	if (oldM && pv->M1 && (oldN / oldM < pv->N1 / pv->M1))
+		/* upclock -- write new post divider first */
+		nv_wr32(priv, reg, pv->log2P << 16 | (oldpll & 0xffff));
+	else
+		/* downclock -- write new NM first */
+		nv_wr32(priv, reg, (oldpll & 0xffff0000) | pv->NM1);
+
+	if (chip_version < 0x17 && chip_version != 0x11)
+		/* wait a bit on older chips */
+		msleep(64);
+	nv_rd32(priv, reg);
+
+	/* then write the other half as well */
+	nv_wr32(priv, reg, pll);
+
+	if (shift_powerctrl_1 >= 0)
+		nv_wr32(priv, 0x001584, saved_powerctrl_1);
+}
+
+static uint32_t
+new_ramdac580(uint32_t reg1, bool ss, uint32_t ramdac580)
+{
+	bool head_a = (reg1 == 0x680508);
+
+	if (ss)	/* single stage pll mode */
+		ramdac580 |= head_a ? 0x00000100 : 0x10000000;
+	else
+		ramdac580 &= head_a ? 0xfffffeff : 0xefffffff;
+
+	return ramdac580;
+}
+
+static void
+setPLL_double_highregs(struct nv04_clock_priv *priv, u32 reg1,
+		       struct nouveau_pll_vals *pv)
+{
+	int chip_version = nouveau_bios(priv)->version.chip;
+	bool nv3035 = chip_version == 0x30 || chip_version == 0x35;
+	uint32_t reg2 = reg1 + ((reg1 == 0x680520) ? 0x5c : 0x70);
+	uint32_t oldpll1 = nv_rd32(priv, reg1);
+	uint32_t oldpll2 = !nv3035 ? nv_rd32(priv, reg2) : 0;
+	uint32_t pll1 = (oldpll1 & 0xfff80000) | pv->log2P << 16 | pv->NM1;
+	uint32_t pll2 = (oldpll2 & 0x7fff0000) | 1 << 31 | pv->NM2;
+	uint32_t oldramdac580 = 0, ramdac580 = 0;
+	bool single_stage = !pv->NM2 || pv->N2 == pv->M2;	/* nv41+ only */
+	uint32_t saved_powerctrl_1 = 0, savedc040 = 0;
+	int shift_powerctrl_1 = powerctrl_1_shift(chip_version, reg1);
+
+	/* model specific additions to generic pll1 and pll2 set up above */
+	if (nv3035) {
+		pll1 = (pll1 & 0xfcc7ffff) | (pv->N2 & 0x18) << 21 |
+		       (pv->N2 & 0x7) << 19 | 8 << 4 | (pv->M2 & 7) << 4;
+		pll2 = 0;
+	}
+	if (chip_version > 0x40 && reg1 >= 0x680508) { /* !nv40 */
+		oldramdac580 = nv_rd32(priv, 0x680580);
+		ramdac580 = new_ramdac580(reg1, single_stage, oldramdac580);
+		if (oldramdac580 != ramdac580)
+			oldpll1 = ~0;	/* force mismatch */
+		if (single_stage)
+			/* magic value used by nvidia in single stage mode */
+			pll2 |= 0x011f;
+	}
+	if (chip_version > 0x70)
+		/* magic bits set by the blob (but not the bios) on g71-73 */
+		pll1 = (pll1 & 0x7fffffff) | (single_stage ? 0x4 : 0xc) << 28;
+
+	if (oldpll1 == pll1 && oldpll2 == pll2)
+		return;	/* already set */
+
+	if (shift_powerctrl_1 >= 0) {
+		saved_powerctrl_1 = nv_rd32(priv, 0x001584);
+		nv_wr32(priv, 0x001584,
+			(saved_powerctrl_1 & ~(0xf << shift_powerctrl_1)) |
+			1 << shift_powerctrl_1);
+	}
+
+	if (chip_version >= 0x40) {
+		int shift_c040 = 14;
+
+		switch (reg1) {
+		case 0x680504:
+			shift_c040 += 2;
+		case 0x680500:
+			shift_c040 += 2;
+		case 0x680520:
+			shift_c040 += 2;
+		case 0x680508:
+			shift_c040 += 2;
+		}
+
+		savedc040 = nv_rd32(priv, 0xc040);
+		if (shift_c040 != 14)
+			nv_wr32(priv, 0xc040, savedc040 & ~(3 << shift_c040));
+	}
+
+	if (oldramdac580 != ramdac580)
+		nv_wr32(priv, 0x680580, ramdac580);
+
+	if (!nv3035)
+		nv_wr32(priv, reg2, pll2);
+	nv_wr32(priv, reg1, pll1);
+
+	if (shift_powerctrl_1 >= 0)
+		nv_wr32(priv, 0x001584, saved_powerctrl_1);
+	if (chip_version >= 0x40)
+		nv_wr32(priv, 0xc040, savedc040);
+}
+
+static void
+setPLL_double_lowregs(struct nv04_clock_priv *priv, u32 NMNMreg,
+		      struct nouveau_pll_vals *pv)
+{
+	/* When setting PLLs, there is a merry game of disabling and enabling
+	 * various bits of hardware during the process. This function is a
+	 * synthesis of six nv4x traces, nearly each card doing a subtly
+	 * different thing. With luck all the necessary bits for each card are
+	 * combined herein. Without luck it deviates from each card's formula
+	 * so as to not work on any :)
+	 */
+
+	uint32_t Preg = NMNMreg - 4;
+	bool mpll = Preg == 0x4020;
+	uint32_t oldPval = nv_rd32(priv, Preg);
+	uint32_t NMNM = pv->NM2 << 16 | pv->NM1;
+	uint32_t Pval = (oldPval & (mpll ? ~(0x77 << 16) : ~(7 << 16))) |
+			0xc << 28 | pv->log2P << 16;
+	uint32_t saved4600 = 0;
+	/* some cards have different maskc040s */
+	uint32_t maskc040 = ~(3 << 14), savedc040;
+	bool single_stage = !pv->NM2 || pv->N2 == pv->M2;
+
+	if (nv_rd32(priv, NMNMreg) == NMNM && (oldPval & 0xc0070000) == Pval)
+		return;
+
+	if (Preg == 0x4000)
+		maskc040 = ~0x333;
+	if (Preg == 0x4058)
+		maskc040 = ~(0xc << 24);
+
+	if (mpll) {
+		struct nvbios_pll info;
+		uint8_t Pval2;
+
+		if (nvbios_pll_parse(nouveau_bios(priv), Preg, &info))
+			return;
+
+		Pval2 = pv->log2P + info.bias_p;
+		if (Pval2 > info.max_p)
+			Pval2 = info.max_p;
+		Pval |= 1 << 28 | Pval2 << 20;
+
+		saved4600 = nv_rd32(priv, 0x4600);
+		nv_wr32(priv, 0x4600, saved4600 | 8 << 28);
+	}
+	if (single_stage)
+		Pval |= mpll ? 1 << 12 : 1 << 8;
+
+	nv_wr32(priv, Preg, oldPval | 1 << 28);
+	nv_wr32(priv, Preg, Pval & ~(4 << 28));
+	if (mpll) {
+		Pval |= 8 << 20;
+		nv_wr32(priv, 0x4020, Pval & ~(0xc << 28));
+		nv_wr32(priv, 0x4038, Pval & ~(0xc << 28));
+	}
+
+	savedc040 = nv_rd32(priv, 0xc040);
+	nv_wr32(priv, 0xc040, savedc040 & maskc040);
+
+	nv_wr32(priv, NMNMreg, NMNM);
+	if (NMNMreg == 0x4024)
+		nv_wr32(priv, 0x403c, NMNM);
+
+	nv_wr32(priv, Preg, Pval);
+	if (mpll) {
+		Pval &= ~(8 << 20);
+		nv_wr32(priv, 0x4020, Pval);
+		nv_wr32(priv, 0x4038, Pval);
+		nv_wr32(priv, 0x4600, saved4600);
+	}
+
+	nv_wr32(priv, 0xc040, savedc040);
+
+	if (mpll) {
+		nv_wr32(priv, 0x4020, Pval & ~(1 << 28));
+		nv_wr32(priv, 0x4038, Pval & ~(1 << 28));
+	}
+}
+
+int
+nv04_clock_pll_set(struct nouveau_clock *clk, u32 type, u32 freq)
+{
+	struct nv04_clock_priv *priv = (void *)clk;
+	struct nouveau_pll_vals pv;
+	struct nvbios_pll info;
+	int ret;
+
+	ret = nvbios_pll_parse(nouveau_bios(priv), type > 0x405c ?
+			       type : type - 4, &info);
+	if (ret)
+		return ret;
+
+	ret = clk->pll_calc(clk, &info, freq, &pv);
+	if (!ret)
+		return ret;
+
+	return clk->pll_prog(clk, type, &pv);
+}
+
+int
+nv04_clock_pll_calc(struct nouveau_clock *clock, struct nvbios_pll *info,
+		    int clk, struct nouveau_pll_vals *pv)
+{
+	int N1, M1, N2, M2, P;
+	int ret = nv04_pll_calc(clock, info, clk, &N1, &M1, &N2, &M2, &P);
+	if (ret) {
+		pv->refclk = info->refclk;
+		pv->N1 = N1;
+		pv->M1 = M1;
+		pv->N2 = N2;
+		pv->M2 = M2;
+		pv->log2P = P;
+	}
+	return ret;
+}
+
+int
+nv04_clock_pll_prog(struct nouveau_clock *clk, u32 reg1,
+		    struct nouveau_pll_vals *pv)
+{
+	struct nv04_clock_priv *priv = (void *)clk;
+	int cv = nouveau_bios(clk)->version.chip;
+
+	if (cv == 0x30 || cv == 0x31 || cv == 0x35 || cv == 0x36 ||
+	    cv >= 0x40) {
+		if (reg1 > 0x405c)
+			setPLL_double_highregs(priv, reg1, pv);
+		else
+			setPLL_double_lowregs(priv, reg1, pv);
+	} else
+		setPLL_single(priv, reg1, pv);
+
+	return 0;
+}
+
+static int
+nv04_clock_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+		struct nouveau_oclass *oclass, void *data, u32 size,
+		struct nouveau_object **pobject)
+{
+	struct nv04_clock_priv *priv;
+	int ret;
+
+	ret = nouveau_clock_create(parent, engine, oclass, &priv);
+	*pobject = nv_object(priv);
+	if (ret)
+		return ret;
+
+	priv->base.pll_set = nv04_clock_pll_set;
+	priv->base.pll_calc = nv04_clock_pll_calc;
+	priv->base.pll_prog = nv04_clock_pll_prog;
+	return 0;
+}
+
+struct nouveau_oclass
+nv04_clock_oclass = {
+	.handle = NV_SUBDEV(CLOCK, 0x04),
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nv04_clock_ctor,
+		.dtor = _nouveau_clock_dtor,
+		.init = _nouveau_clock_init,
+		.fini = _nouveau_clock_fini,
+	},
+};
diff --git a/drivers/gpu/drm/nouveau/nouveau_ramht.h b/drivers/gpu/drm/nouveau/core/subdev/clock/nv40.c
index c82de98fee0e..a4b2b7ebf9af 100644
--- a/drivers/gpu/drm/nouveau/nouveau_ramht.h
+++ b/drivers/gpu/drm/nouveau/core/subdev/clock/nv40.c
@@ -1,5 +1,5 @@
 /*
- * Copyright 2010 Red Hat Inc.
+ * Copyright 2012 Red Hat Inc.
  *
  * Permission is hereby granted, free of charge, to any person obtaining a
  * copy of this software and associated documentation files (the "Software"),
@@ -22,34 +22,38 @@
  * Authors: Ben Skeggs
  */
 
-#ifndef __NOUVEAU_RAMHT_H__
-#define __NOUVEAU_RAMHT_H__
+#include <subdev/clock.h>
 
-struct nouveau_ramht_entry {
-	struct list_head head;
-	struct nouveau_channel *channel;
-	struct nouveau_gpuobj *gpuobj;
-	u32 handle;
+struct nv40_clock_priv {
+	struct nouveau_clock base;
 };
 
-struct nouveau_ramht {
-	struct drm_device *dev;
-	struct kref refcount;
-	spinlock_t lock;
-	struct nouveau_gpuobj *gpuobj;
-	struct list_head entries;
-	int bits;
-};
+static int
+nv40_clock_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+		struct nouveau_oclass *oclass, void *data, u32 size,
+		struct nouveau_object **pobject)
+{
+	struct nv40_clock_priv *priv;
+	int ret;
 
-extern int  nouveau_ramht_new(struct drm_device *, struct nouveau_gpuobj *,
-			      struct nouveau_ramht **);
-extern void nouveau_ramht_ref(struct nouveau_ramht *, struct nouveau_ramht **,
-			      struct nouveau_channel *unref_channel);
+	ret = nouveau_clock_create(parent, engine, oclass, &priv);
+	*pobject = nv_object(priv);
+	if (ret)
+		return ret;
 
-extern int  nouveau_ramht_insert(struct nouveau_channel *, u32 handle,
-				 struct nouveau_gpuobj *);
-extern int  nouveau_ramht_remove(struct nouveau_channel *, u32 handle);
-extern struct nouveau_gpuobj *
-nouveau_ramht_find(struct nouveau_channel *chan, u32 handle);
+	priv->base.pll_set = nv04_clock_pll_set;
+	priv->base.pll_calc = nv04_clock_pll_calc;
+	priv->base.pll_prog = nv04_clock_pll_prog;
+	return 0;
+}
 
-#endif
+struct nouveau_oclass
+nv40_clock_oclass = {
+	.handle = NV_SUBDEV(CLOCK, 0x40),
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nv40_clock_ctor,
+		.dtor = _nouveau_clock_dtor,
+		.init = _nouveau_clock_init,
+		.fini = _nouveau_clock_fini,
+	},
+};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/clock/nv50.c b/drivers/gpu/drm/nouveau/core/subdev/clock/nv50.c
new file mode 100644
index 000000000000..fd181fbceddb
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/clock/nv50.c
@@ -0,0 +1,105 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <subdev/clock.h>
+#include <subdev/bios.h>
+#include <subdev/bios/pll.h>
+
+#include "pll.h"
+
+struct nv50_clock_priv {
+	struct nouveau_clock base;
+};
+
+static int
+nv50_clock_pll_set(struct nouveau_clock *clk, u32 type, u32 freq)
+{
+	struct nv50_clock_priv *priv = (void *)clk;
+	struct nouveau_bios *bios = nouveau_bios(priv);
+	struct nvbios_pll info;
+	int N1, M1, N2, M2, P;
+	int ret;
+
+	ret = nvbios_pll_parse(bios, type, &info);
+	if (ret) {
+		nv_error(clk, "failed to retrieve pll data, %d\n", ret);
+		return ret;
+	}
+
+	ret = nv04_pll_calc(clk, &info, freq, &N1, &M1, &N2, &M2, &P);
+	if (!ret) {
+		nv_error(clk, "failed pll calculation\n");
+		return ret;
+	}
+
+	switch (info.type) {
+	case PLL_VPLL0:
+	case PLL_VPLL1:
+		nv_wr32(priv, info.reg + 0, 0x10000611);
+		nv_mask(priv, info.reg + 4, 0x00ff00ff, (M1 << 16) | N1);
+		nv_mask(priv, info.reg + 8, 0x7fff00ff, (P  << 28) |
+							(M2 << 16) | N2);
+		break;
+	case PLL_MEMORY:
+		nv_mask(priv, info.reg + 0, 0x01ff0000, (P << 22) |
+						        (info.bias_p << 19) |
+							(P << 16));
+		nv_wr32(priv, info.reg + 4, (N1 << 8) | M1);
+		break;
+	default:
+		nv_mask(priv, info.reg + 0, 0x00070000, (P << 16));
+		nv_wr32(priv, info.reg + 4, (N1 << 8) | M1);
+		break;
+	}
+
+	return 0;
+}
+
+static int
+nv50_clock_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+		struct nouveau_oclass *oclass, void *data, u32 size,
+		struct nouveau_object **pobject)
+{
+	struct nv50_clock_priv *priv;
+	int ret;
+
+	ret = nouveau_clock_create(parent, engine, oclass, &priv);
+	*pobject = nv_object(priv);
+	if (ret)
+		return ret;
+
+	priv->base.pll_set = nv50_clock_pll_set;
+	return 0;
+}
+
+struct nouveau_oclass
+nv50_clock_oclass = {
+	.handle = NV_SUBDEV(CLOCK, 0x50),
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nv50_clock_ctor,
+		.dtor = _nouveau_clock_dtor,
+		.init = _nouveau_clock_init,
+		.fini = _nouveau_clock_fini,
+	},
+};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/clock/nva3.c b/drivers/gpu/drm/nouveau/core/subdev/clock/nva3.c
new file mode 100644
index 000000000000..cc8d7d162d7c
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/clock/nva3.c
@@ -0,0 +1,95 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <subdev/clock.h>
+#include <subdev/bios.h>
+#include <subdev/bios/pll.h>
+
+#include "pll.h"
+
+struct nva3_clock_priv {
+	struct nouveau_clock base;
+};
+
+static int
+nva3_clock_pll_set(struct nouveau_clock *clk, u32 type, u32 freq)
+{
+	struct nva3_clock_priv *priv = (void *)clk;
+	struct nouveau_bios *bios = nouveau_bios(priv);
+	struct nvbios_pll info;
+	int N, fN, M, P;
+	int ret;
+
+	ret = nvbios_pll_parse(bios, type, &info);
+	if (ret)
+		return ret;
+
+	ret = nva3_pll_calc(clk, &info, freq, &N, &fN, &M, &P);
+	if (ret < 0)
+		return ret;
+
+	switch (info.type) {
+	case PLL_VPLL0:
+	case PLL_VPLL1:
+		nv_wr32(priv, info.reg + 0, 0x50000610);
+		nv_mask(priv, info.reg + 4, 0x003fffff,
+					    (P << 16) | (M << 8) | N);
+		nv_wr32(priv, info.reg + 8, fN);
+		break;
+	default:
+		nv_warn(priv, "0x%08x/%dKhz unimplemented\n", type, freq);
+		ret = -EINVAL;
+		break;
+	}
+
+	return ret;
+}
+
+static int
+nva3_clock_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+		struct nouveau_oclass *oclass, void *data, u32 size,
+		struct nouveau_object **pobject)
+{
+	struct nva3_clock_priv *priv;
+	int ret;
+
+	ret = nouveau_clock_create(parent, engine, oclass, &priv);
+	*pobject = nv_object(priv);
+	if (ret)
+		return ret;
+
+	priv->base.pll_set = nva3_clock_pll_set;
+	return 0;
+}
+
+struct nouveau_oclass
+nva3_clock_oclass = {
+	.handle = NV_SUBDEV(CLOCK, 0xa3),
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nva3_clock_ctor,
+		.dtor = _nouveau_clock_dtor,
+		.init = _nouveau_clock_init,
+		.fini = _nouveau_clock_fini,
+	},
+};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/clock/nvc0.c b/drivers/gpu/drm/nouveau/core/subdev/clock/nvc0.c
new file mode 100644
index 000000000000..5ccce0b17bf3
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/clock/nvc0.c
@@ -0,0 +1,94 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <subdev/clock.h>
+#include <subdev/bios.h>
+#include <subdev/bios/pll.h>
+
+#include "pll.h"
+
+struct nvc0_clock_priv {
+	struct nouveau_clock base;
+};
+
+static int
+nvc0_clock_pll_set(struct nouveau_clock *clk, u32 type, u32 freq)
+{
+	struct nvc0_clock_priv *priv = (void *)clk;
+	struct nouveau_bios *bios = nouveau_bios(priv);
+	struct nvbios_pll info;
+	int N, fN, M, P;
+	int ret;
+
+	ret = nvbios_pll_parse(bios, type, &info);
+	if (ret)
+		return ret;
+
+	ret = nva3_pll_calc(clk, &info, freq, &N, &fN, &M, &P);
+	if (ret < 0)
+		return ret;
+
+	switch (info.type) {
+	case PLL_VPLL0:
+	case PLL_VPLL1:
+		nv_mask(priv, info.reg + 0x0c, 0x00000000, 0x00000100);
+		nv_wr32(priv, info.reg + 0x04, (P << 16) | (N << 8) | M);
+		nv_wr32(priv, info.reg + 0x10, fN << 16);
+		break;
+	default:
+		nv_warn(priv, "0x%08x/%dKhz unimplemented\n", type, freq);
+		ret = -EINVAL;
+		break;
+	}
+
+	return ret;
+}
+
+static int
+nvc0_clock_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+		struct nouveau_oclass *oclass, void *data, u32 size,
+		struct nouveau_object **pobject)
+{
+	struct nvc0_clock_priv *priv;
+	int ret;
+
+	ret = nouveau_clock_create(parent, engine, oclass, &priv);
+	*pobject = nv_object(priv);
+	if (ret)
+		return ret;
+
+	priv->base.pll_set = nvc0_clock_pll_set;
+	return 0;
+}
+
+struct nouveau_oclass
+nvc0_clock_oclass = {
+	.handle = NV_SUBDEV(CLOCK, 0xc0),
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nvc0_clock_ctor,
+		.dtor = _nouveau_clock_dtor,
+		.init = _nouveau_clock_init,
+		.fini = _nouveau_clock_fini,
+	},
+};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/clock/pll.h b/drivers/gpu/drm/nouveau/core/subdev/clock/pll.h
new file mode 100644
index 000000000000..ef2c0078f337
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/clock/pll.h
@@ -0,0 +1,9 @@
+#ifndef __NOUVEAU_PLL_H__
+#define __NOUVEAU_PLL_H__
+
+int nv04_pll_calc(struct nouveau_clock *, struct nvbios_pll *, u32 freq,
+		  int *N1, int *M1, int *N2, int *M2, int *P);
+int nva3_pll_calc(struct nouveau_clock *, struct nvbios_pll *, u32 freq,
+		  int *N, int *fN, int *M, int *P);
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/core/subdev/clock/pllnv04.c b/drivers/gpu/drm/nouveau/core/subdev/clock/pllnv04.c
new file mode 100644
index 000000000000..a2ab6d051ba8
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/clock/pllnv04.c
@@ -0,0 +1,242 @@
+/*
+ * Copyright 1993-2003 NVIDIA, Corporation
+ * Copyright 2007-2009 Stuart Bennett
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF
+ * OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <subdev/clock.h>
+#include <subdev/bios.h>
+#include <subdev/bios/pll.h>
+
+#include "pll.h"
+
+static int
+getMNP_single(struct nouveau_clock *clock, struct nvbios_pll *info, int clk,
+	      int *pN, int *pM, int *pP)
+{
+	/* Find M, N and P for a single stage PLL
+	 *
+	 * Note that some bioses (NV3x) have lookup tables of precomputed MNP
+	 * values, but we're too lazy to use those atm
+	 *
+	 * "clk" parameter in kHz
+	 * returns calculated clock
+	 */
+	int cv = nouveau_bios(clock)->version.chip;
+	int minvco = info->vco1.min_freq, maxvco = info->vco1.max_freq;
+	int minM = info->vco1.min_m, maxM = info->vco1.max_m;
+	int minN = info->vco1.min_n, maxN = info->vco1.max_n;
+	int minU = info->vco1.min_inputfreq;
+	int maxU = info->vco1.max_inputfreq;
+	int minP = info->min_p;
+	int maxP = info->max_p_usable;
+	int crystal = info->refclk;
+	int M, N, thisP, P;
+	int clkP, calcclk;
+	int delta, bestdelta = INT_MAX;
+	int bestclk = 0;
+
+	/* this division verified for nv20, nv18, nv28 (Haiku), and nv34 */
+	/* possibly correlated with introduction of 27MHz crystal */
+	if (cv < 0x17 || cv == 0x1a || cv == 0x20) {
+		if (clk > 250000)
+			maxM = 6;
+		if (clk > 340000)
+			maxM = 2;
+	} else if (cv < 0x40) {
+		if (clk > 150000)
+			maxM = 6;
+		if (clk > 200000)
+			maxM = 4;
+		if (clk > 340000)
+			maxM = 2;
+	}
+
+	P = 1 << maxP;
+	if ((clk * P) < minvco) {
+		minvco = clk * maxP;
+		maxvco = minvco * 2;
+	}
+
+	if (clk + clk/200 > maxvco)	/* +0.5% */
+		maxvco = clk + clk/200;
+
+	/* NV34 goes maxlog2P->0, NV20 goes 0->maxlog2P */
+	for (thisP = minP; thisP <= maxP; thisP++) {
+		P = 1 << thisP;
+		clkP = clk * P;
+
+		if (clkP < minvco)
+			continue;
+		if (clkP > maxvco)
+			return bestclk;
+
+		for (M = minM; M <= maxM; M++) {
+			if (crystal/M < minU)
+				return bestclk;
+			if (crystal/M > maxU)
+				continue;
+
+			/* add crystal/2 to round better */
+			N = (clkP * M + crystal/2) / crystal;
+
+			if (N < minN)
+				continue;
+			if (N > maxN)
+				break;
+
+			/* more rounding additions */
+			calcclk = ((N * crystal + P/2) / P + M/2) / M;
+			delta = abs(calcclk - clk);
+			/* we do an exhaustive search rather than terminating
+			 * on an optimality condition...
+			 */
+			if (delta < bestdelta) {
+				bestdelta = delta;
+				bestclk = calcclk;
+				*pN = N;
+				*pM = M;
+				*pP = thisP;
+				if (delta == 0)	/* except this one */
+					return bestclk;
+			}
+		}
+	}
+
+	return bestclk;
+}
+
+static int
+getMNP_double(struct nouveau_clock *clock, struct nvbios_pll *info, int clk,
+	      int *pN1, int *pM1, int *pN2, int *pM2, int *pP)
+{
+	/* Find M, N and P for a two stage PLL
+	 *
+	 * Note that some bioses (NV30+) have lookup tables of precomputed MNP
+	 * values, but we're too lazy to use those atm
+	 *
+	 * "clk" parameter in kHz
+	 * returns calculated clock
+	 */
+	int chip_version = nouveau_bios(clock)->version.chip;
+	int minvco1 = info->vco1.min_freq, maxvco1 = info->vco1.max_freq;
+	int minvco2 = info->vco2.min_freq, maxvco2 = info->vco2.max_freq;
+	int minU1 = info->vco1.min_inputfreq, minU2 = info->vco2.min_inputfreq;
+	int maxU1 = info->vco1.max_inputfreq, maxU2 = info->vco2.max_inputfreq;
+	int minM1 = info->vco1.min_m, maxM1 = info->vco1.max_m;
+	int minN1 = info->vco1.min_n, maxN1 = info->vco1.max_n;
+	int minM2 = info->vco2.min_m, maxM2 = info->vco2.max_m;
+	int minN2 = info->vco2.min_n, maxN2 = info->vco2.max_n;
+	int maxlog2P = info->max_p_usable;
+	int crystal = info->refclk;
+	bool fixedgain2 = (minM2 == maxM2 && minN2 == maxN2);
+	int M1, N1, M2, N2, log2P;
+	int clkP, calcclk1, calcclk2, calcclkout;
+	int delta, bestdelta = INT_MAX;
+	int bestclk = 0;
+
+	int vco2 = (maxvco2 - maxvco2/200) / 2;
+	for (log2P = 0; clk && log2P < maxlog2P && clk <= (vco2 >> log2P); log2P++)
+		;
+	clkP = clk << log2P;
+
+	if (maxvco2 < clk + clk/200)	/* +0.5% */
+		maxvco2 = clk + clk/200;
+
+	for (M1 = minM1; M1 <= maxM1; M1++) {
+		if (crystal/M1 < minU1)
+			return bestclk;
+		if (crystal/M1 > maxU1)
+			continue;
+
+		for (N1 = minN1; N1 <= maxN1; N1++) {
+			calcclk1 = crystal * N1 / M1;
+			if (calcclk1 < minvco1)
+				continue;
+			if (calcclk1 > maxvco1)
+				break;
+
+			for (M2 = minM2; M2 <= maxM2; M2++) {
+				if (calcclk1/M2 < minU2)
+					break;
+				if (calcclk1/M2 > maxU2)
+					continue;
+
+				/* add calcclk1/2 to round better */
+				N2 = (clkP * M2 + calcclk1/2) / calcclk1;
+				if (N2 < minN2)
+					continue;
+				if (N2 > maxN2)
+					break;
+
+				if (!fixedgain2) {
+					if (chip_version < 0x60)
+						if (N2/M2 < 4 || N2/M2 > 10)
+							continue;
+
+					calcclk2 = calcclk1 * N2 / M2;
+					if (calcclk2 < minvco2)
+						break;
+					if (calcclk2 > maxvco2)
+						continue;
+				} else
+					calcclk2 = calcclk1;
+
+				calcclkout = calcclk2 >> log2P;
+				delta = abs(calcclkout - clk);
+				/* we do an exhaustive search rather than terminating
+				 * on an optimality condition...
+				 */
+				if (delta < bestdelta) {
+					bestdelta = delta;
+					bestclk = calcclkout;
+					*pN1 = N1;
+					*pM1 = M1;
+					*pN2 = N2;
+					*pM2 = M2;
+					*pP = log2P;
+					if (delta == 0)	/* except this one */
+						return bestclk;
+				}
+			}
+		}
+	}
+
+	return bestclk;
+}
+
+int
+nv04_pll_calc(struct nouveau_clock *clk, struct nvbios_pll *info, u32 freq,
+	      int *N1, int *M1, int *N2, int *M2, int *P)
+{
+	int ret;
+
+	if (!info->vco2.max_freq) {
+		ret = getMNP_single(clk, info, freq, N1, M1, P);
+		*N2 = 1;
+		*M2 = 1;
+	} else {
+		ret = getMNP_double(clk, info, freq, N1, M1, N2, M2, P);
+	}
+
+	if (!ret)
+		nv_error(clk, "unable to compute acceptable pll values\n");
+	return ret;
+}
diff --git a/drivers/gpu/drm/nouveau/nv50_calc.c b/drivers/gpu/drm/nouveau/core/subdev/clock/pllnva3.c
index 4d019eb76f7d..eed5c16cf610 100644
--- a/drivers/gpu/drm/nouveau/nv50_calc.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/clock/pllnva3.c
@@ -22,60 +22,43 @@
  * Authors: Ben Skeggs
  */
 
-#include <drm/drmP.h>
-#include "nouveau_drv.h"
-#include "nouveau_hw.h"
+#include <subdev/clock.h>
+#include <subdev/bios.h>
+#include <subdev/bios/pll.h>
 
-int
-nv50_calc_pll(struct drm_device *dev, struct pll_lims *pll, int clk,
-	      int *N1, int *M1, int *N2, int *M2, int *P)
-{
-	struct nouveau_pll_vals pll_vals;
-	int ret;
-
-	ret = nouveau_calc_pll_mnp(dev, pll, clk, &pll_vals);
-	if (ret <= 0)
-		return ret;
-
-	*N1 = pll_vals.N1;
-	*M1 = pll_vals.M1;
-	*N2 = pll_vals.N2;
-	*M2 = pll_vals.M2;
-	*P = pll_vals.log2P;
-	return ret;
-}
+#include "pll.h"
 
 int
-nva3_calc_pll(struct drm_device *dev, struct pll_lims *pll, int clk,
-	      int *pN, int *pfN, int *pM, int *P)
+nva3_pll_calc(struct nouveau_clock *clock, struct nvbios_pll *info,
+	      u32 freq, int *pN, int *pfN, int *pM, int *P)
 {
 	u32 best_err = ~0, err;
 	int M, lM, hM, N, fN;
 
-	*P = pll->vco1.maxfreq / clk;
-	if (*P > pll->max_p)
-		*P = pll->max_p;
-	if (*P < pll->min_p)
-		*P = pll->min_p;
+	*P = info->vco1.max_freq / freq;
+	if (*P > info->max_p)
+		*P = info->max_p;
+	if (*P < info->min_p)
+		*P = info->min_p;
 
-	lM = (pll->refclk + pll->vco1.max_inputfreq) / pll->vco1.max_inputfreq;
-	lM = max(lM, (int)pll->vco1.min_m);
-	hM = (pll->refclk + pll->vco1.min_inputfreq) / pll->vco1.min_inputfreq;
-	hM = min(hM, (int)pll->vco1.max_m);
+	lM = (info->refclk + info->vco1.max_inputfreq) / info->vco1.max_inputfreq;
+	lM = max(lM, (int)info->vco1.min_m);
+	hM = (info->refclk + info->vco1.min_inputfreq) / info->vco1.min_inputfreq;
+	hM = min(hM, (int)info->vco1.max_m);
 
 	for (M = lM; M <= hM; M++) {
-		u32 tmp = clk * *P * M;
-		N  = tmp / pll->refclk;
-		fN = tmp % pll->refclk;
-		if (!pfN && fN >= pll->refclk / 2)
+		u32 tmp = freq * *P * M;
+		N  = tmp / info->refclk;
+		fN = tmp % info->refclk;
+		if (!pfN && fN >= info->refclk / 2)
 			N++;
 
-		if (N < pll->vco1.min_n)
+		if (N < info->vco1.min_n)
 			continue;
-		if (N > pll->vco1.max_n)
+		if (N > info->vco1.max_n)
 			break;
 
-		err = abs(clk - (pll->refclk * N / M / *P));
+		err = abs(freq - (info->refclk * N / M / *P));
 		if (err < best_err) {
 			best_err = err;
 			*pN = N;
@@ -83,15 +66,15 @@ nva3_calc_pll(struct drm_device *dev, struct pll_lims *pll, int clk,
 		}
 
 		if (pfN) {
-			*pfN = (((fN << 13) / pll->refclk) - 4096) & 0xffff;
-			return clk;
+			*pfN = (((fN << 13) / info->refclk) - 4096) & 0xffff;
+			return freq;
 		}
 	}
 
 	if (unlikely(best_err == ~0)) {
-		NV_ERROR(dev, "unable to find matching pll values\n");
+		nv_error(clock, "unable to find matching pll values\n");
 		return -EINVAL;
 	}
 
-	return pll->refclk * *pN / *pM / *P;
+	return info->refclk * *pN / *pM / *P;
 }
diff --git a/drivers/gpu/drm/nouveau/core/subdev/device/base.c b/drivers/gpu/drm/nouveau/core/subdev/device/base.c
new file mode 100644
index 000000000000..ca9a4648bd8a
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/device/base.c
@@ -0,0 +1,472 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <core/object.h>
+#include <core/device.h>
+#include <core/client.h>
+#include <core/device.h>
+#include <core/option.h>
+
+#include <core/class.h>
+
+#include <subdev/device.h>
+
+static DEFINE_MUTEX(nv_devices_mutex);
+static LIST_HEAD(nv_devices);
+
+struct nouveau_device *
+nouveau_device_find(u64 name)
+{
+	struct nouveau_device *device, *match = NULL;
+	mutex_lock(&nv_devices_mutex);
+	list_for_each_entry(device, &nv_devices, head) {
+		if (device->handle == name) {
+			match = device;
+			break;
+		}
+	}
+	mutex_unlock(&nv_devices_mutex);
+	return match;
+}
+
+/******************************************************************************
+ * nouveau_devobj (0x0080): class implementation
+ *****************************************************************************/
+struct nouveau_devobj {
+	struct nouveau_parent base;
+	struct nouveau_object *subdev[NVDEV_SUBDEV_NR];
+	bool created;
+};
+
+static const u64 disable_map[] = {
+	[NVDEV_SUBDEV_VBIOS]	= NV_DEVICE_DISABLE_VBIOS,
+	[NVDEV_SUBDEV_GPIO]	= NV_DEVICE_DISABLE_CORE,
+	[NVDEV_SUBDEV_I2C]	= NV_DEVICE_DISABLE_CORE,
+	[NVDEV_SUBDEV_DEVINIT]	= NV_DEVICE_DISABLE_CORE,
+	[NVDEV_SUBDEV_MC]	= NV_DEVICE_DISABLE_CORE,
+	[NVDEV_SUBDEV_TIMER]	= NV_DEVICE_DISABLE_CORE,
+	[NVDEV_SUBDEV_FB]	= NV_DEVICE_DISABLE_CORE,
+	[NVDEV_SUBDEV_VM]	= NV_DEVICE_DISABLE_CORE,
+	[NVDEV_SUBDEV_INSTMEM]	= NV_DEVICE_DISABLE_CORE,
+	[NVDEV_SUBDEV_BAR]	= NV_DEVICE_DISABLE_CORE,
+	[NVDEV_SUBDEV_VOLT]	= NV_DEVICE_DISABLE_CORE,
+	[NVDEV_SUBDEV_CLOCK]	= NV_DEVICE_DISABLE_CORE,
+	[NVDEV_SUBDEV_THERM]	= NV_DEVICE_DISABLE_CORE,
+	[NVDEV_ENGINE_DMAOBJ]	= NV_DEVICE_DISABLE_CORE,
+	[NVDEV_ENGINE_GR]	= NV_DEVICE_DISABLE_GRAPH,
+	[NVDEV_ENGINE_MPEG]	= NV_DEVICE_DISABLE_MPEG,
+	[NVDEV_ENGINE_ME]	= NV_DEVICE_DISABLE_ME,
+	[NVDEV_ENGINE_VP]	= NV_DEVICE_DISABLE_VP,
+	[NVDEV_ENGINE_CRYPT]	= NV_DEVICE_DISABLE_CRYPT,
+	[NVDEV_ENGINE_BSP]	= NV_DEVICE_DISABLE_BSP,
+	[NVDEV_ENGINE_PPP]	= NV_DEVICE_DISABLE_PPP,
+	[NVDEV_ENGINE_COPY0]	= NV_DEVICE_DISABLE_COPY0,
+	[NVDEV_ENGINE_COPY1]	= NV_DEVICE_DISABLE_COPY1,
+	[NVDEV_ENGINE_UNK1C1]	= NV_DEVICE_DISABLE_UNK1C1,
+	[NVDEV_ENGINE_FIFO]	= NV_DEVICE_DISABLE_FIFO,
+	[NVDEV_ENGINE_DISP]	= NV_DEVICE_DISABLE_DISP,
+	[NVDEV_SUBDEV_NR]	= 0,
+};
+
+static int
+nouveau_devobj_ctor(struct nouveau_object *parent,
+		    struct nouveau_object *engine,
+		    struct nouveau_oclass *oclass, void *data, u32 size,
+		    struct nouveau_object **pobject)
+{
+	struct nouveau_client *client = nv_client(parent);
+	struct nouveau_device *device;
+	struct nouveau_devobj *devobj;
+	struct nv_device_class *args = data;
+	u64 disable, boot0, strap;
+	u64 mmio_base, mmio_size;
+	void __iomem *map;
+	int ret, i, c;
+
+	if (size < sizeof(struct nv_device_class))
+		return -EINVAL;
+
+	/* find the device subdev that matches what the client requested */
+	device = nv_device(client->device);
+	if (args->device != ~0) {
+		device = nouveau_device_find(args->device);
+		if (!device)
+			return -ENODEV;
+	}
+
+	ret = nouveau_parent_create(parent, nv_object(device), oclass, 0, NULL,
+				    (1ULL << NVDEV_ENGINE_DMAOBJ) |
+				    (1ULL << NVDEV_ENGINE_FIFO) |
+				    (1ULL << NVDEV_ENGINE_DISP), &devobj);
+	*pobject = nv_object(devobj);
+	if (ret)
+		return ret;
+
+	mmio_base = pci_resource_start(device->pdev, 0);
+	mmio_size = pci_resource_len(device->pdev, 0);
+
+	/* translate api disable mask into internal mapping */
+	disable = args->debug0;
+	for (i = 0; i < NVDEV_SUBDEV_NR; i++) {
+		if (args->disable & disable_map[i])
+			disable |= (1ULL << i);
+	}
+
+	/* identify the chipset, and determine classes of subdev/engines */
+	if (!(args->disable & NV_DEVICE_DISABLE_IDENTIFY) &&
+	    !device->card_type) {
+		map = ioremap(mmio_base, 0x102000);
+		if (map == NULL)
+			return -ENOMEM;
+
+		/* switch mmio to cpu's native endianness */
+#ifndef __BIG_ENDIAN
+		if (ioread32_native(map + 0x000004) != 0x00000000)
+#else
+		if (ioread32_native(map + 0x000004) == 0x00000000)
+#endif
+			iowrite32_native(0x01000001, map + 0x000004);
+
+		/* read boot0 and strapping information */
+		boot0 = ioread32_native(map + 0x000000);
+		strap = ioread32_native(map + 0x101000);
+		iounmap(map);
+
+		/* determine chipset and derive architecture from it */
+		if ((boot0 & 0x0f000000) > 0) {
+			device->chipset = (boot0 & 0xff00000) >> 20;
+			switch (device->chipset & 0xf0) {
+			case 0x10: device->card_type = NV_10; break;
+			case 0x20: device->card_type = NV_20; break;
+			case 0x30: device->card_type = NV_30; break;
+			case 0x40:
+			case 0x60: device->card_type = NV_40; break;
+			case 0x50:
+			case 0x80:
+			case 0x90:
+			case 0xa0: device->card_type = NV_50; break;
+			case 0xc0: device->card_type = NV_C0; break;
+			case 0xd0: device->card_type = NV_D0; break;
+			case 0xe0: device->card_type = NV_E0; break;
+			default:
+				break;
+			}
+		} else
+		if ((boot0 & 0xff00fff0) == 0x20004000) {
+			if (boot0 & 0x00f00000)
+				device->chipset = 0x05;
+			else
+				device->chipset = 0x04;
+			device->card_type = NV_04;
+		}
+
+		switch (device->card_type) {
+		case NV_04: ret = nv04_identify(device); break;
+		case NV_10: ret = nv10_identify(device); break;
+		case NV_20: ret = nv20_identify(device); break;
+		case NV_30: ret = nv30_identify(device); break;
+		case NV_40: ret = nv40_identify(device); break;
+		case NV_50: ret = nv50_identify(device); break;
+		case NV_C0:
+		case NV_D0: ret = nvc0_identify(device); break;
+		case NV_E0: ret = nve0_identify(device); break;
+		default:
+			ret = -EINVAL;
+			break;
+		}
+
+		if (ret) {
+			nv_error(device, "unknown chipset, 0x%08x\n", boot0);
+			return ret;
+		}
+
+		nv_info(device, "BOOT0  : 0x%08x\n", boot0);
+		nv_info(device, "Chipset: %s (NV%02X)\n",
+			device->cname, device->chipset);
+		nv_info(device, "Family : NV%02X\n", device->card_type);
+
+		/* determine frequency of timing crystal */
+		if ( device->chipset < 0x17 ||
+		    (device->chipset >= 0x20 && device->chipset <= 0x25))
+			strap &= 0x00000040;
+		else
+			strap &= 0x00400040;
+
+		switch (strap) {
+		case 0x00000000: device->crystal = 13500; break;
+		case 0x00000040: device->crystal = 14318; break;
+		case 0x00400000: device->crystal = 27000; break;
+		case 0x00400040: device->crystal = 25000; break;
+		}
+
+		nv_debug(device, "crystal freq: %dKHz\n", device->crystal);
+	}
+
+	if (!(args->disable & NV_DEVICE_DISABLE_MMIO) &&
+	    !nv_subdev(device)->mmio) {
+		nv_subdev(device)->mmio  = ioremap(mmio_base, mmio_size);
+		if (!nv_subdev(device)->mmio) {
+			nv_error(device, "unable to map device registers\n");
+			return -ENOMEM;
+		}
+	}
+
+	/* ensure requested subsystems are available for use */
+	for (i = 0, c = 0; i < NVDEV_SUBDEV_NR; i++) {
+		if (!(oclass = device->oclass[i]) || (disable & (1ULL << i)))
+			continue;
+
+		if (!device->subdev[i]) {
+			ret = nouveau_object_ctor(nv_object(device), NULL,
+						  oclass, NULL, i,
+						  &devobj->subdev[i]);
+			if (ret == -ENODEV)
+				continue;
+			if (ret)
+				return ret;
+
+			if (nv_iclass(devobj->subdev[i], NV_ENGINE_CLASS))
+				nouveau_subdev_reset(devobj->subdev[i]);
+		} else {
+			nouveau_object_ref(device->subdev[i],
+					  &devobj->subdev[i]);
+		}
+
+		/* note: can't init *any* subdevs until devinit has been run
+		 * due to not knowing exactly what the vbios init tables will
+		 * mess with.  devinit also can't be run until all of its
+		 * dependencies have been created.
+		 *
+		 * this code delays init of any subdev until all of devinit's
+		 * dependencies have been created, and then initialises each
+		 * subdev in turn as they're created.
+		 */
+		while (i >= NVDEV_SUBDEV_DEVINIT_LAST && c <= i) {
+			struct nouveau_object *subdev = devobj->subdev[c++];
+			if (subdev && !nv_iclass(subdev, NV_ENGINE_CLASS)) {
+				ret = nouveau_object_inc(subdev);
+				if (ret)
+					return ret;
+			}
+		}
+	}
+
+	return 0;
+}
+
+static void
+nouveau_devobj_dtor(struct nouveau_object *object)
+{
+	struct nouveau_devobj *devobj = (void *)object;
+	int i;
+
+	for (i = NVDEV_SUBDEV_NR - 1; i >= 0; i--)
+		nouveau_object_ref(NULL, &devobj->subdev[i]);
+
+	nouveau_parent_destroy(&devobj->base);
+}
+
+static int
+nouveau_devobj_init(struct nouveau_object *object)
+{
+	struct nouveau_devobj *devobj = (void *)object;
+	struct nouveau_object *subdev;
+	int ret, i;
+
+	ret = nouveau_parent_init(&devobj->base);
+	if (ret)
+		return ret;
+
+	for (i = 0; devobj->created && i < NVDEV_SUBDEV_NR; i++) {
+		if ((subdev = devobj->subdev[i])) {
+			if (!nv_iclass(subdev, NV_ENGINE_CLASS)) {
+				ret = nouveau_object_inc(subdev);
+				if (ret)
+					goto fail;
+			}
+		}
+	}
+
+	devobj->created = true;
+	return 0;
+
+fail:
+	for (--i; i >= 0; i--) {
+		if ((subdev = devobj->subdev[i])) {
+			if (!nv_iclass(subdev, NV_ENGINE_CLASS))
+				nouveau_object_dec(subdev, false);
+		}
+	}
+
+	return ret;
+}
+
+static int
+nouveau_devobj_fini(struct nouveau_object *object, bool suspend)
+{
+	struct nouveau_devobj *devobj = (void *)object;
+	struct nouveau_object *subdev;
+	int ret, i;
+
+	for (i = NVDEV_SUBDEV_NR - 1; i >= 0; i--) {
+		if ((subdev = devobj->subdev[i])) {
+			if (!nv_iclass(subdev, NV_ENGINE_CLASS)) {
+				ret = nouveau_object_dec(subdev, suspend);
+				if (ret && suspend)
+					goto fail;
+			}
+		}
+	}
+
+	ret = nouveau_parent_fini(&devobj->base, suspend);
+fail:
+	for (; ret && suspend && i < NVDEV_SUBDEV_NR; i++) {
+		if ((subdev = devobj->subdev[i])) {
+			if (!nv_iclass(subdev, NV_ENGINE_CLASS)) {
+				ret = nouveau_object_inc(subdev);
+				if (ret) {
+					/* XXX */
+				}
+			}
+		}
+	}
+
+	return ret;
+}
+
+static u8
+nouveau_devobj_rd08(struct nouveau_object *object, u32 addr)
+{
+	return nv_rd08(object->engine, addr);
+}
+
+static u16
+nouveau_devobj_rd16(struct nouveau_object *object, u32 addr)
+{
+	return nv_rd16(object->engine, addr);
+}
+
+static u32
+nouveau_devobj_rd32(struct nouveau_object *object, u32 addr)
+{
+	return nv_rd32(object->engine, addr);
+}
+
+static void
+nouveau_devobj_wr08(struct nouveau_object *object, u32 addr, u8 data)
+{
+	nv_wr08(object->engine, addr, data);
+}
+
+static void
+nouveau_devobj_wr16(struct nouveau_object *object, u32 addr, u16 data)
+{
+	nv_wr16(object->engine, addr, data);
+}
+
+static void
+nouveau_devobj_wr32(struct nouveau_object *object, u32 addr, u32 data)
+{
+	nv_wr32(object->engine, addr, data);
+}
+
+static struct nouveau_ofuncs
+nouveau_devobj_ofuncs = {
+	.ctor = nouveau_devobj_ctor,
+	.dtor = nouveau_devobj_dtor,
+	.init = nouveau_devobj_init,
+	.fini = nouveau_devobj_fini,
+	.rd08 = nouveau_devobj_rd08,
+	.rd16 = nouveau_devobj_rd16,
+	.rd32 = nouveau_devobj_rd32,
+	.wr08 = nouveau_devobj_wr08,
+	.wr16 = nouveau_devobj_wr16,
+	.wr32 = nouveau_devobj_wr32,
+};
+
+/******************************************************************************
+ * nouveau_device: engine functions
+ *****************************************************************************/
+struct nouveau_oclass
+nouveau_device_sclass[] = {
+	{ 0x0080, &nouveau_devobj_ofuncs },
+	{}
+};
+
+static void
+nouveau_device_dtor(struct nouveau_object *object)
+{
+	struct nouveau_device *device = (void *)object;
+
+	mutex_lock(&nv_devices_mutex);
+	list_del(&device->head);
+	mutex_unlock(&nv_devices_mutex);
+
+	if (device->base.mmio)
+		iounmap(device->base.mmio);
+
+	nouveau_subdev_destroy(&device->base);
+}
+
+static struct nouveau_oclass
+nouveau_device_oclass = {
+	.handle = NV_SUBDEV(DEVICE, 0x00),
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.dtor = nouveau_device_dtor,
+	},
+};
+
+int
+nouveau_device_create_(struct pci_dev *pdev, u64 name, const char *sname,
+		       const char *cfg, const char *dbg,
+		       int length, void **pobject)
+{
+	struct nouveau_device *device;
+	int ret = -EEXIST;
+
+	mutex_lock(&nv_devices_mutex);
+	list_for_each_entry(device, &nv_devices, head) {
+		if (device->handle == name)
+			goto done;
+	}
+
+	ret = nouveau_subdev_create_(NULL, NULL, &nouveau_device_oclass, 0,
+				     "DEVICE", "device", length, pobject);
+	device = *pobject;
+	if (ret)
+		goto done;
+
+	atomic_set(&nv_object(device)->usecount, 2);
+	device->pdev = pdev;
+	device->handle = name;
+	device->cfgopt = cfg;
+	device->dbgopt = dbg;
+	device->name = sname;
+
+	nv_subdev(device)->debug = nouveau_dbgopt(device->dbgopt, "DEVICE");
+	list_add(&device->head, &nv_devices);
+done:
+	mutex_unlock(&nv_devices_mutex);
+	return ret;
+}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/device/nv04.c b/drivers/gpu/drm/nouveau/core/subdev/device/nv04.c
new file mode 100644
index 000000000000..8626d0d6cbbc
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/device/nv04.c
@@ -0,0 +1,86 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <subdev/device.h>
+#include <subdev/bios.h>
+#include <subdev/i2c.h>
+#include <subdev/clock.h>
+#include <subdev/devinit.h>
+#include <subdev/mc.h>
+#include <subdev/timer.h>
+#include <subdev/fb.h>
+#include <subdev/instmem.h>
+#include <subdev/vm.h>
+
+#include <engine/dmaobj.h>
+#include <engine/fifo.h>
+#include <engine/software.h>
+#include <engine/graph.h>
+#include <engine/disp.h>
+
+int
+nv04_identify(struct nouveau_device *device)
+{
+	switch (device->chipset) {
+	case 0x04:
+		device->cname = "NV04";
+		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nouveau_bios_oclass;
+		device->oclass[NVDEV_SUBDEV_I2C    ] = &nouveau_i2c_oclass;
+		device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nv04_clock_oclass;
+		device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv04_devinit_oclass;
+		device->oclass[NVDEV_SUBDEV_MC     ] = &nv04_mc_oclass;
+		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
+		device->oclass[NVDEV_SUBDEV_FB     ] = &nv04_fb_oclass;
+		device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
+		device->oclass[NVDEV_SUBDEV_VM     ] = &nv04_vmmgr_oclass;
+		device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
+		device->oclass[NVDEV_ENGINE_FIFO   ] = &nv04_fifo_oclass;
+		device->oclass[NVDEV_ENGINE_SW     ] = &nv04_software_oclass;
+		device->oclass[NVDEV_ENGINE_GR     ] = &nv04_graph_oclass;
+		device->oclass[NVDEV_ENGINE_DISP   ] = &nv04_disp_oclass;
+		break;
+	case 0x05:
+		device->cname = "NV05";
+		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nouveau_bios_oclass;
+		device->oclass[NVDEV_SUBDEV_I2C    ] = &nouveau_i2c_oclass;
+		device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nv04_clock_oclass;
+		device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv05_devinit_oclass;
+		device->oclass[NVDEV_SUBDEV_MC     ] = &nv04_mc_oclass;
+		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
+		device->oclass[NVDEV_SUBDEV_FB     ] = &nv04_fb_oclass;
+		device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
+		device->oclass[NVDEV_SUBDEV_VM     ] = &nv04_vmmgr_oclass;
+		device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
+		device->oclass[NVDEV_ENGINE_FIFO   ] = &nv04_fifo_oclass;
+		device->oclass[NVDEV_ENGINE_SW     ] = &nv04_software_oclass;
+		device->oclass[NVDEV_ENGINE_GR     ] = &nv04_graph_oclass;
+		device->oclass[NVDEV_ENGINE_DISP   ] = &nv04_disp_oclass;
+		break;
+	default:
+		nv_fatal(device, "unknown RIVA chipset\n");
+		return -EINVAL;
+	}
+
+	return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/device/nv10.c b/drivers/gpu/drm/nouveau/core/subdev/device/nv10.c
new file mode 100644
index 000000000000..f09accfd0e31
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/device/nv10.c
@@ -0,0 +1,195 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <subdev/device.h>
+#include <subdev/bios.h>
+#include <subdev/gpio.h>
+#include <subdev/i2c.h>
+#include <subdev/clock.h>
+#include <subdev/devinit.h>
+#include <subdev/mc.h>
+#include <subdev/timer.h>
+#include <subdev/fb.h>
+#include <subdev/instmem.h>
+#include <subdev/vm.h>
+
+#include <engine/dmaobj.h>
+#include <engine/fifo.h>
+#include <engine/software.h>
+#include <engine/graph.h>
+#include <engine/disp.h>
+
+int
+nv10_identify(struct nouveau_device *device)
+{
+	switch (device->chipset) {
+	case 0x10:
+		device->cname = "NV10";
+		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nouveau_bios_oclass;
+		device->oclass[NVDEV_SUBDEV_GPIO   ] = &nv10_gpio_oclass;
+		device->oclass[NVDEV_SUBDEV_I2C    ] = &nouveau_i2c_oclass;
+		device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nv04_clock_oclass;
+		device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv10_devinit_oclass;
+		device->oclass[NVDEV_SUBDEV_MC     ] = &nv04_mc_oclass;
+		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
+		device->oclass[NVDEV_SUBDEV_FB     ] = &nv10_fb_oclass;
+		device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
+		device->oclass[NVDEV_SUBDEV_VM     ] = &nv04_vmmgr_oclass;
+		device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
+		device->oclass[NVDEV_ENGINE_GR     ] = &nv10_graph_oclass;
+		device->oclass[NVDEV_ENGINE_DISP   ] = &nv04_disp_oclass;
+		break;
+	case 0x15:
+		device->cname = "NV15";
+		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nouveau_bios_oclass;
+		device->oclass[NVDEV_SUBDEV_GPIO   ] = &nv10_gpio_oclass;
+		device->oclass[NVDEV_SUBDEV_I2C    ] = &nouveau_i2c_oclass;
+		device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nv04_clock_oclass;
+		device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv10_devinit_oclass;
+		device->oclass[NVDEV_SUBDEV_MC     ] = &nv04_mc_oclass;
+		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
+		device->oclass[NVDEV_SUBDEV_FB     ] = &nv10_fb_oclass;
+		device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
+		device->oclass[NVDEV_SUBDEV_VM     ] = &nv04_vmmgr_oclass;
+		device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
+		device->oclass[NVDEV_ENGINE_FIFO   ] = &nv10_fifo_oclass;
+		device->oclass[NVDEV_ENGINE_SW     ] = &nv10_software_oclass;
+		device->oclass[NVDEV_ENGINE_GR     ] = &nv10_graph_oclass;
+		device->oclass[NVDEV_ENGINE_DISP   ] = &nv04_disp_oclass;
+		break;
+	case 0x16:
+		device->cname = "NV16";
+		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nouveau_bios_oclass;
+		device->oclass[NVDEV_SUBDEV_GPIO   ] = &nv10_gpio_oclass;
+		device->oclass[NVDEV_SUBDEV_I2C    ] = &nouveau_i2c_oclass;
+		device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nv04_clock_oclass;
+		device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv10_devinit_oclass;
+		device->oclass[NVDEV_SUBDEV_MC     ] = &nv04_mc_oclass;
+		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
+		device->oclass[NVDEV_SUBDEV_FB     ] = &nv10_fb_oclass;
+		device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
+		device->oclass[NVDEV_SUBDEV_VM     ] = &nv04_vmmgr_oclass;
+		device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
+		device->oclass[NVDEV_ENGINE_FIFO   ] = &nv10_fifo_oclass;
+		device->oclass[NVDEV_ENGINE_SW     ] = &nv10_software_oclass;
+		device->oclass[NVDEV_ENGINE_GR     ] = &nv10_graph_oclass;
+		device->oclass[NVDEV_ENGINE_DISP   ] = &nv04_disp_oclass;
+		break;
+	case 0x1a:
+		device->cname = "nForce";
+		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nouveau_bios_oclass;
+		device->oclass[NVDEV_SUBDEV_GPIO   ] = &nv10_gpio_oclass;
+		device->oclass[NVDEV_SUBDEV_I2C    ] = &nouveau_i2c_oclass;
+		device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nv04_clock_oclass;
+		device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
+		device->oclass[NVDEV_SUBDEV_MC     ] = &nv04_mc_oclass;
+		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
+		device->oclass[NVDEV_SUBDEV_FB     ] = &nv10_fb_oclass;
+		device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
+		device->oclass[NVDEV_SUBDEV_VM     ] = &nv04_vmmgr_oclass;
+		device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
+		device->oclass[NVDEV_ENGINE_FIFO   ] = &nv10_fifo_oclass;
+		device->oclass[NVDEV_ENGINE_SW     ] = &nv10_software_oclass;
+		device->oclass[NVDEV_ENGINE_GR     ] = &nv10_graph_oclass;
+		device->oclass[NVDEV_ENGINE_DISP   ] = &nv04_disp_oclass;
+		break;
+	case 0x11:
+		device->cname = "NV11";
+		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nouveau_bios_oclass;
+		device->oclass[NVDEV_SUBDEV_GPIO   ] = &nv10_gpio_oclass;
+		device->oclass[NVDEV_SUBDEV_I2C    ] = &nouveau_i2c_oclass;
+		device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nv04_clock_oclass;
+		device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv10_devinit_oclass;
+		device->oclass[NVDEV_SUBDEV_MC     ] = &nv04_mc_oclass;
+		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
+		device->oclass[NVDEV_SUBDEV_FB     ] = &nv10_fb_oclass;
+		device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
+		device->oclass[NVDEV_SUBDEV_VM     ] = &nv04_vmmgr_oclass;
+		device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
+		device->oclass[NVDEV_ENGINE_FIFO   ] = &nv10_fifo_oclass;
+		device->oclass[NVDEV_ENGINE_SW     ] = &nv10_software_oclass;
+		device->oclass[NVDEV_ENGINE_GR     ] = &nv10_graph_oclass;
+		device->oclass[NVDEV_ENGINE_DISP   ] = &nv04_disp_oclass;
+		break;
+	case 0x17:
+		device->cname = "NV17";
+		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nouveau_bios_oclass;
+		device->oclass[NVDEV_SUBDEV_GPIO   ] = &nv10_gpio_oclass;
+		device->oclass[NVDEV_SUBDEV_I2C    ] = &nouveau_i2c_oclass;
+		device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nv04_clock_oclass;
+		device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv10_devinit_oclass;
+		device->oclass[NVDEV_SUBDEV_MC     ] = &nv04_mc_oclass;
+		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
+		device->oclass[NVDEV_SUBDEV_FB     ] = &nv10_fb_oclass;
+		device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
+		device->oclass[NVDEV_SUBDEV_VM     ] = &nv04_vmmgr_oclass;
+		device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
+		device->oclass[NVDEV_ENGINE_FIFO   ] = &nv17_fifo_oclass;
+		device->oclass[NVDEV_ENGINE_SW     ] = &nv10_software_oclass;
+		device->oclass[NVDEV_ENGINE_GR     ] = &nv10_graph_oclass;
+		device->oclass[NVDEV_ENGINE_DISP   ] = &nv04_disp_oclass;
+		break;
+	case 0x1f:
+		device->cname = "nForce2";
+		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nouveau_bios_oclass;
+		device->oclass[NVDEV_SUBDEV_GPIO   ] = &nv10_gpio_oclass;
+		device->oclass[NVDEV_SUBDEV_I2C    ] = &nouveau_i2c_oclass;
+		device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nv04_clock_oclass;
+		device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
+		device->oclass[NVDEV_SUBDEV_MC     ] = &nv04_mc_oclass;
+		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
+		device->oclass[NVDEV_SUBDEV_FB     ] = &nv10_fb_oclass;
+		device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
+		device->oclass[NVDEV_SUBDEV_VM     ] = &nv04_vmmgr_oclass;
+		device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
+		device->oclass[NVDEV_ENGINE_FIFO   ] = &nv17_fifo_oclass;
+		device->oclass[NVDEV_ENGINE_SW     ] = &nv10_software_oclass;
+		device->oclass[NVDEV_ENGINE_GR     ] = &nv10_graph_oclass;
+		device->oclass[NVDEV_ENGINE_DISP   ] = &nv04_disp_oclass;
+		break;
+	case 0x18:
+		device->cname = "NV18";
+		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nouveau_bios_oclass;
+		device->oclass[NVDEV_SUBDEV_GPIO   ] = &nv10_gpio_oclass;
+		device->oclass[NVDEV_SUBDEV_I2C    ] = &nouveau_i2c_oclass;
+		device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nv04_clock_oclass;
+		device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv10_devinit_oclass;
+		device->oclass[NVDEV_SUBDEV_MC     ] = &nv04_mc_oclass;
+		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
+		device->oclass[NVDEV_SUBDEV_FB     ] = &nv10_fb_oclass;
+		device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
+		device->oclass[NVDEV_SUBDEV_VM     ] = &nv04_vmmgr_oclass;
+		device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
+		device->oclass[NVDEV_ENGINE_FIFO   ] = &nv17_fifo_oclass;
+		device->oclass[NVDEV_ENGINE_SW     ] = &nv10_software_oclass;
+		device->oclass[NVDEV_ENGINE_GR     ] = &nv10_graph_oclass;
+		device->oclass[NVDEV_ENGINE_DISP   ] = &nv04_disp_oclass;
+		break;
+	default:
+		nv_fatal(device, "unknown Celsius chipset\n");
+		return -EINVAL;
+	}
+
+	return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/device/nv20.c b/drivers/gpu/drm/nouveau/core/subdev/device/nv20.c
new file mode 100644
index 000000000000..5fa58b7369b5
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/device/nv20.c
@@ -0,0 +1,126 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <subdev/device.h>
+#include <subdev/bios.h>
+#include <subdev/gpio.h>
+#include <subdev/i2c.h>
+#include <subdev/clock.h>
+#include <subdev/therm.h>
+#include <subdev/devinit.h>
+#include <subdev/mc.h>
+#include <subdev/timer.h>
+#include <subdev/fb.h>
+#include <subdev/instmem.h>
+#include <subdev/vm.h>
+
+#include <engine/dmaobj.h>
+#include <engine/fifo.h>
+#include <engine/software.h>
+#include <engine/graph.h>
+#include <engine/disp.h>
+
+int
+nv20_identify(struct nouveau_device *device)
+{
+	switch (device->chipset) {
+	case 0x20:
+		device->cname = "NV20";
+		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nouveau_bios_oclass;
+		device->oclass[NVDEV_SUBDEV_GPIO   ] = &nv10_gpio_oclass;
+		device->oclass[NVDEV_SUBDEV_I2C    ] = &nouveau_i2c_oclass;
+		device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nv04_clock_oclass;
+		device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv20_devinit_oclass;
+		device->oclass[NVDEV_SUBDEV_MC     ] = &nv04_mc_oclass;
+		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
+		device->oclass[NVDEV_SUBDEV_FB     ] = &nv20_fb_oclass;
+		device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
+		device->oclass[NVDEV_SUBDEV_VM     ] = &nv04_vmmgr_oclass;
+		device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
+		device->oclass[NVDEV_ENGINE_FIFO   ] = &nv17_fifo_oclass;
+		device->oclass[NVDEV_ENGINE_SW     ] = &nv10_software_oclass;
+		device->oclass[NVDEV_ENGINE_GR     ] = &nv20_graph_oclass;
+		device->oclass[NVDEV_ENGINE_DISP   ] = &nv04_disp_oclass;
+		break;
+	case 0x25:
+		device->cname = "NV25";
+		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nouveau_bios_oclass;
+		device->oclass[NVDEV_SUBDEV_GPIO   ] = &nv10_gpio_oclass;
+		device->oclass[NVDEV_SUBDEV_I2C    ] = &nouveau_i2c_oclass;
+		device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nv04_clock_oclass;
+		device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv20_devinit_oclass;
+		device->oclass[NVDEV_SUBDEV_MC     ] = &nv04_mc_oclass;
+		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
+		device->oclass[NVDEV_SUBDEV_FB     ] = &nv20_fb_oclass;
+		device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
+		device->oclass[NVDEV_SUBDEV_VM     ] = &nv04_vmmgr_oclass;
+		device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
+		device->oclass[NVDEV_ENGINE_FIFO   ] = &nv17_fifo_oclass;
+		device->oclass[NVDEV_ENGINE_SW     ] = &nv10_software_oclass;
+		device->oclass[NVDEV_ENGINE_GR     ] = &nv25_graph_oclass;
+		device->oclass[NVDEV_ENGINE_DISP   ] = &nv04_disp_oclass;
+		break;
+	case 0x28:
+		device->cname = "NV28";
+		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nouveau_bios_oclass;
+		device->oclass[NVDEV_SUBDEV_GPIO   ] = &nv10_gpio_oclass;
+		device->oclass[NVDEV_SUBDEV_I2C    ] = &nouveau_i2c_oclass;
+		device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nv04_clock_oclass;
+		device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv20_devinit_oclass;
+		device->oclass[NVDEV_SUBDEV_MC     ] = &nv04_mc_oclass;
+		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
+		device->oclass[NVDEV_SUBDEV_FB     ] = &nv20_fb_oclass;
+		device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
+		device->oclass[NVDEV_SUBDEV_VM     ] = &nv04_vmmgr_oclass;
+		device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
+		device->oclass[NVDEV_ENGINE_FIFO   ] = &nv17_fifo_oclass;
+		device->oclass[NVDEV_ENGINE_SW     ] = &nv10_software_oclass;
+		device->oclass[NVDEV_ENGINE_GR     ] = &nv25_graph_oclass;
+		device->oclass[NVDEV_ENGINE_DISP   ] = &nv04_disp_oclass;
+		break;
+	case 0x2a:
+		device->cname = "NV2A";
+		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nouveau_bios_oclass;
+		device->oclass[NVDEV_SUBDEV_GPIO   ] = &nv10_gpio_oclass;
+		device->oclass[NVDEV_SUBDEV_I2C    ] = &nouveau_i2c_oclass;
+		device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nv04_clock_oclass;
+		device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv20_devinit_oclass;
+		device->oclass[NVDEV_SUBDEV_MC     ] = &nv04_mc_oclass;
+		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
+		device->oclass[NVDEV_SUBDEV_FB     ] = &nv20_fb_oclass;
+		device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
+		device->oclass[NVDEV_SUBDEV_VM     ] = &nv04_vmmgr_oclass;
+		device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
+		device->oclass[NVDEV_ENGINE_FIFO   ] = &nv17_fifo_oclass;
+		device->oclass[NVDEV_ENGINE_SW     ] = &nv10_software_oclass;
+		device->oclass[NVDEV_ENGINE_GR     ] = &nv2a_graph_oclass;
+		device->oclass[NVDEV_ENGINE_DISP   ] = &nv04_disp_oclass;
+		break;
+	default:
+		nv_fatal(device, "unknown Kelvin chipset\n");
+		return -EINVAL;
+	}
+
+	return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/device/nv30.c b/drivers/gpu/drm/nouveau/core/subdev/device/nv30.c
new file mode 100644
index 000000000000..7f4b8fe6cccc
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/device/nv30.c
@@ -0,0 +1,147 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <subdev/device.h>
+#include <subdev/bios.h>
+#include <subdev/gpio.h>
+#include <subdev/i2c.h>
+#include <subdev/clock.h>
+#include <subdev/devinit.h>
+#include <subdev/mc.h>
+#include <subdev/timer.h>
+#include <subdev/fb.h>
+#include <subdev/instmem.h>
+#include <subdev/vm.h>
+
+#include <engine/dmaobj.h>
+#include <engine/fifo.h>
+#include <engine/software.h>
+#include <engine/graph.h>
+#include <engine/mpeg.h>
+#include <engine/disp.h>
+
+int
+nv30_identify(struct nouveau_device *device)
+{
+	switch (device->chipset) {
+	case 0x30:
+		device->cname = "NV30";
+		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nouveau_bios_oclass;
+		device->oclass[NVDEV_SUBDEV_GPIO   ] = &nv10_gpio_oclass;
+		device->oclass[NVDEV_SUBDEV_I2C    ] = &nouveau_i2c_oclass;
+		device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nv04_clock_oclass;
+		device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv20_devinit_oclass;
+		device->oclass[NVDEV_SUBDEV_MC     ] = &nv04_mc_oclass;
+		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
+		device->oclass[NVDEV_SUBDEV_FB     ] = &nv30_fb_oclass;
+		device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
+		device->oclass[NVDEV_SUBDEV_VM     ] = &nv04_vmmgr_oclass;
+		device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
+		device->oclass[NVDEV_ENGINE_FIFO   ] = &nv17_fifo_oclass;
+		device->oclass[NVDEV_ENGINE_SW     ] = &nv10_software_oclass;
+		device->oclass[NVDEV_ENGINE_GR     ] = &nv30_graph_oclass;
+		device->oclass[NVDEV_ENGINE_DISP   ] = &nv04_disp_oclass;
+		break;
+	case 0x35:
+		device->cname = "NV35";
+		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nouveau_bios_oclass;
+		device->oclass[NVDEV_SUBDEV_GPIO   ] = &nv10_gpio_oclass;
+		device->oclass[NVDEV_SUBDEV_I2C    ] = &nouveau_i2c_oclass;
+		device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nv04_clock_oclass;
+		device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv20_devinit_oclass;
+		device->oclass[NVDEV_SUBDEV_MC     ] = &nv04_mc_oclass;
+		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
+		device->oclass[NVDEV_SUBDEV_FB     ] = &nv30_fb_oclass;
+		device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
+		device->oclass[NVDEV_SUBDEV_VM     ] = &nv04_vmmgr_oclass;
+		device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
+		device->oclass[NVDEV_ENGINE_FIFO   ] = &nv17_fifo_oclass;
+		device->oclass[NVDEV_ENGINE_SW     ] = &nv10_software_oclass;
+		device->oclass[NVDEV_ENGINE_GR     ] = &nv35_graph_oclass;
+		device->oclass[NVDEV_ENGINE_DISP   ] = &nv04_disp_oclass;
+		break;
+	case 0x31:
+		device->cname = "NV31";
+		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nouveau_bios_oclass;
+		device->oclass[NVDEV_SUBDEV_GPIO   ] = &nv10_gpio_oclass;
+		device->oclass[NVDEV_SUBDEV_I2C    ] = &nouveau_i2c_oclass;
+		device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nv04_clock_oclass;
+		device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv20_devinit_oclass;
+		device->oclass[NVDEV_SUBDEV_MC     ] = &nv04_mc_oclass;
+		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
+		device->oclass[NVDEV_SUBDEV_FB     ] = &nv30_fb_oclass;
+		device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
+		device->oclass[NVDEV_SUBDEV_VM     ] = &nv04_vmmgr_oclass;
+		device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
+		device->oclass[NVDEV_ENGINE_FIFO   ] = &nv17_fifo_oclass;
+		device->oclass[NVDEV_ENGINE_SW     ] = &nv10_software_oclass;
+		device->oclass[NVDEV_ENGINE_GR     ] = &nv30_graph_oclass;
+		device->oclass[NVDEV_ENGINE_MPEG   ] = &nv31_mpeg_oclass;
+		device->oclass[NVDEV_ENGINE_DISP   ] = &nv04_disp_oclass;
+		break;
+	case 0x36:
+		device->cname = "NV36";
+		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nouveau_bios_oclass;
+		device->oclass[NVDEV_SUBDEV_GPIO   ] = &nv10_gpio_oclass;
+		device->oclass[NVDEV_SUBDEV_I2C    ] = &nouveau_i2c_oclass;
+		device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nv04_clock_oclass;
+		device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv20_devinit_oclass;
+		device->oclass[NVDEV_SUBDEV_MC     ] = &nv04_mc_oclass;
+		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
+		device->oclass[NVDEV_SUBDEV_FB     ] = &nv30_fb_oclass;
+		device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
+		device->oclass[NVDEV_SUBDEV_VM     ] = &nv04_vmmgr_oclass;
+		device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
+		device->oclass[NVDEV_ENGINE_FIFO   ] = &nv17_fifo_oclass;
+		device->oclass[NVDEV_ENGINE_SW     ] = &nv10_software_oclass;
+		device->oclass[NVDEV_ENGINE_GR     ] = &nv35_graph_oclass;
+		device->oclass[NVDEV_ENGINE_MPEG   ] = &nv31_mpeg_oclass;
+		device->oclass[NVDEV_ENGINE_DISP   ] = &nv04_disp_oclass;
+		break;
+	case 0x34:
+		device->cname = "NV34";
+		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nouveau_bios_oclass;
+		device->oclass[NVDEV_SUBDEV_GPIO   ] = &nv10_gpio_oclass;
+		device->oclass[NVDEV_SUBDEV_I2C    ] = &nouveau_i2c_oclass;
+		device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nv04_clock_oclass;
+		device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv10_devinit_oclass;
+		device->oclass[NVDEV_SUBDEV_MC     ] = &nv04_mc_oclass;
+		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
+		device->oclass[NVDEV_SUBDEV_FB     ] = &nv30_fb_oclass;
+		device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
+		device->oclass[NVDEV_SUBDEV_VM     ] = &nv04_vmmgr_oclass;
+		device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
+		device->oclass[NVDEV_ENGINE_FIFO   ] = &nv17_fifo_oclass;
+		device->oclass[NVDEV_ENGINE_SW     ] = &nv10_software_oclass;
+		device->oclass[NVDEV_ENGINE_GR     ] = &nv34_graph_oclass;
+		device->oclass[NVDEV_ENGINE_MPEG   ] = &nv31_mpeg_oclass;
+		device->oclass[NVDEV_ENGINE_DISP   ] = &nv04_disp_oclass;
+		break;
+	default:
+		nv_fatal(device, "unknown Rankine chipset\n");
+		return -EINVAL;
+	}
+
+	return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/device/nv40.c b/drivers/gpu/drm/nouveau/core/subdev/device/nv40.c
new file mode 100644
index 000000000000..42deadca0f0a
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/device/nv40.c
@@ -0,0 +1,375 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <subdev/device.h>
+#include <subdev/bios.h>
+#include <subdev/gpio.h>
+#include <subdev/i2c.h>
+#include <subdev/clock.h>
+#include <subdev/therm.h>
+#include <subdev/devinit.h>
+#include <subdev/mc.h>
+#include <subdev/timer.h>
+#include <subdev/fb.h>
+#include <subdev/instmem.h>
+#include <subdev/vm.h>
+
+#include <engine/dmaobj.h>
+#include <engine/fifo.h>
+#include <engine/software.h>
+#include <engine/graph.h>
+#include <engine/mpeg.h>
+#include <engine/disp.h>
+
+int
+nv40_identify(struct nouveau_device *device)
+{
+	switch (device->chipset) {
+	case 0x40:
+		device->cname = "NV40";
+		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nouveau_bios_oclass;
+		device->oclass[NVDEV_SUBDEV_GPIO   ] = &nv10_gpio_oclass;
+		device->oclass[NVDEV_SUBDEV_I2C    ] = &nouveau_i2c_oclass;
+		device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nv40_clock_oclass;
+		device->oclass[NVDEV_SUBDEV_THERM  ] = &nv40_therm_oclass;
+		device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
+		device->oclass[NVDEV_SUBDEV_MC     ] = &nv04_mc_oclass;
+		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
+		device->oclass[NVDEV_SUBDEV_FB     ] = &nv40_fb_oclass;
+		device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
+		device->oclass[NVDEV_SUBDEV_VM     ] = &nv04_vmmgr_oclass;
+		device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
+		device->oclass[NVDEV_ENGINE_FIFO   ] = &nv40_fifo_oclass;
+		device->oclass[NVDEV_ENGINE_SW     ] = &nv10_software_oclass;
+		device->oclass[NVDEV_ENGINE_GR     ] = &nv40_graph_oclass;
+		device->oclass[NVDEV_ENGINE_MPEG   ] = &nv40_mpeg_oclass;
+		device->oclass[NVDEV_ENGINE_DISP   ] = &nv04_disp_oclass;
+		break;
+	case 0x41:
+		device->cname = "NV41";
+		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nouveau_bios_oclass;
+		device->oclass[NVDEV_SUBDEV_GPIO   ] = &nv10_gpio_oclass;
+		device->oclass[NVDEV_SUBDEV_I2C    ] = &nouveau_i2c_oclass;
+		device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nv40_clock_oclass;
+		device->oclass[NVDEV_SUBDEV_THERM  ] = &nv40_therm_oclass;
+		device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
+		device->oclass[NVDEV_SUBDEV_MC     ] = &nv04_mc_oclass;
+		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
+		device->oclass[NVDEV_SUBDEV_FB     ] = &nv40_fb_oclass;
+		device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
+		device->oclass[NVDEV_SUBDEV_VM     ] = &nv41_vmmgr_oclass;
+		device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
+		device->oclass[NVDEV_ENGINE_FIFO   ] = &nv40_fifo_oclass;
+		device->oclass[NVDEV_ENGINE_SW     ] = &nv10_software_oclass;
+		device->oclass[NVDEV_ENGINE_GR     ] = &nv40_graph_oclass;
+		device->oclass[NVDEV_ENGINE_MPEG   ] = &nv40_mpeg_oclass;
+		device->oclass[NVDEV_ENGINE_DISP   ] = &nv04_disp_oclass;
+		break;
+	case 0x42:
+		device->cname = "NV42";
+		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nouveau_bios_oclass;
+		device->oclass[NVDEV_SUBDEV_GPIO   ] = &nv10_gpio_oclass;
+		device->oclass[NVDEV_SUBDEV_I2C    ] = &nouveau_i2c_oclass;
+		device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nv40_clock_oclass;
+		device->oclass[NVDEV_SUBDEV_THERM  ] = &nv40_therm_oclass;
+		device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
+		device->oclass[NVDEV_SUBDEV_MC     ] = &nv04_mc_oclass;
+		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
+		device->oclass[NVDEV_SUBDEV_FB     ] = &nv40_fb_oclass;
+		device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
+		device->oclass[NVDEV_SUBDEV_VM     ] = &nv41_vmmgr_oclass;
+		device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
+		device->oclass[NVDEV_ENGINE_FIFO   ] = &nv40_fifo_oclass;
+		device->oclass[NVDEV_ENGINE_SW     ] = &nv10_software_oclass;
+		device->oclass[NVDEV_ENGINE_GR     ] = &nv40_graph_oclass;
+		device->oclass[NVDEV_ENGINE_MPEG   ] = &nv40_mpeg_oclass;
+		device->oclass[NVDEV_ENGINE_DISP   ] = &nv04_disp_oclass;
+		break;
+	case 0x43:
+		device->cname = "NV43";
+		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nouveau_bios_oclass;
+		device->oclass[NVDEV_SUBDEV_GPIO   ] = &nv10_gpio_oclass;
+		device->oclass[NVDEV_SUBDEV_I2C    ] = &nouveau_i2c_oclass;
+		device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nv40_clock_oclass;
+		device->oclass[NVDEV_SUBDEV_THERM  ] = &nv40_therm_oclass;
+		device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
+		device->oclass[NVDEV_SUBDEV_MC     ] = &nv04_mc_oclass;
+		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
+		device->oclass[NVDEV_SUBDEV_FB     ] = &nv40_fb_oclass;
+		device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
+		device->oclass[NVDEV_SUBDEV_VM     ] = &nv41_vmmgr_oclass;
+		device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
+		device->oclass[NVDEV_ENGINE_FIFO   ] = &nv40_fifo_oclass;
+		device->oclass[NVDEV_ENGINE_SW     ] = &nv10_software_oclass;
+		device->oclass[NVDEV_ENGINE_GR     ] = &nv40_graph_oclass;
+		device->oclass[NVDEV_ENGINE_MPEG   ] = &nv40_mpeg_oclass;
+		device->oclass[NVDEV_ENGINE_DISP   ] = &nv04_disp_oclass;
+		break;
+	case 0x45:
+		device->cname = "NV45";
+		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nouveau_bios_oclass;
+		device->oclass[NVDEV_SUBDEV_GPIO   ] = &nv10_gpio_oclass;
+		device->oclass[NVDEV_SUBDEV_I2C    ] = &nouveau_i2c_oclass;
+		device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nv40_clock_oclass;
+		device->oclass[NVDEV_SUBDEV_THERM  ] = &nv40_therm_oclass;
+		device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
+		device->oclass[NVDEV_SUBDEV_MC     ] = &nv04_mc_oclass;
+		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
+		device->oclass[NVDEV_SUBDEV_FB     ] = &nv40_fb_oclass;
+		device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
+		device->oclass[NVDEV_SUBDEV_VM     ] = &nv04_vmmgr_oclass;
+		device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
+		device->oclass[NVDEV_ENGINE_FIFO   ] = &nv40_fifo_oclass;
+		device->oclass[NVDEV_ENGINE_SW     ] = &nv10_software_oclass;
+		device->oclass[NVDEV_ENGINE_GR     ] = &nv40_graph_oclass;
+		device->oclass[NVDEV_ENGINE_MPEG   ] = &nv40_mpeg_oclass;
+		device->oclass[NVDEV_ENGINE_DISP   ] = &nv04_disp_oclass;
+		break;
+	case 0x47:
+		device->cname = "G70";
+		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nouveau_bios_oclass;
+		device->oclass[NVDEV_SUBDEV_GPIO   ] = &nv10_gpio_oclass;
+		device->oclass[NVDEV_SUBDEV_I2C    ] = &nouveau_i2c_oclass;
+		device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nv40_clock_oclass;
+		device->oclass[NVDEV_SUBDEV_THERM  ] = &nv40_therm_oclass;
+		device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
+		device->oclass[NVDEV_SUBDEV_MC     ] = &nv04_mc_oclass;
+		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
+		device->oclass[NVDEV_SUBDEV_FB     ] = &nv40_fb_oclass;
+		device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
+		device->oclass[NVDEV_SUBDEV_VM     ] = &nv41_vmmgr_oclass;
+		device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
+		device->oclass[NVDEV_ENGINE_FIFO   ] = &nv40_fifo_oclass;
+		device->oclass[NVDEV_ENGINE_SW     ] = &nv10_software_oclass;
+		device->oclass[NVDEV_ENGINE_GR     ] = &nv40_graph_oclass;
+		device->oclass[NVDEV_ENGINE_MPEG   ] = &nv40_mpeg_oclass;
+		device->oclass[NVDEV_ENGINE_DISP   ] = &nv04_disp_oclass;
+		break;
+	case 0x49:
+		device->cname = "G71";
+		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nouveau_bios_oclass;
+		device->oclass[NVDEV_SUBDEV_GPIO   ] = &nv10_gpio_oclass;
+		device->oclass[NVDEV_SUBDEV_I2C    ] = &nouveau_i2c_oclass;
+		device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nv40_clock_oclass;
+		device->oclass[NVDEV_SUBDEV_THERM  ] = &nv40_therm_oclass;
+		device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
+		device->oclass[NVDEV_SUBDEV_MC     ] = &nv04_mc_oclass;
+		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
+		device->oclass[NVDEV_SUBDEV_FB     ] = &nv40_fb_oclass;
+		device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
+		device->oclass[NVDEV_SUBDEV_VM     ] = &nv41_vmmgr_oclass;
+		device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
+		device->oclass[NVDEV_ENGINE_FIFO   ] = &nv40_fifo_oclass;
+		device->oclass[NVDEV_ENGINE_SW     ] = &nv10_software_oclass;
+		device->oclass[NVDEV_ENGINE_GR     ] = &nv40_graph_oclass;
+		device->oclass[NVDEV_ENGINE_MPEG   ] = &nv40_mpeg_oclass;
+		device->oclass[NVDEV_ENGINE_DISP   ] = &nv04_disp_oclass;
+		break;
+	case 0x4b:
+		device->cname = "G73";
+		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nouveau_bios_oclass;
+		device->oclass[NVDEV_SUBDEV_GPIO   ] = &nv10_gpio_oclass;
+		device->oclass[NVDEV_SUBDEV_I2C    ] = &nouveau_i2c_oclass;
+		device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nv40_clock_oclass;
+		device->oclass[NVDEV_SUBDEV_THERM  ] = &nv40_therm_oclass;
+		device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
+		device->oclass[NVDEV_SUBDEV_MC     ] = &nv04_mc_oclass;
+		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
+		device->oclass[NVDEV_SUBDEV_FB     ] = &nv40_fb_oclass;
+		device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
+		device->oclass[NVDEV_SUBDEV_VM     ] = &nv41_vmmgr_oclass;
+		device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
+		device->oclass[NVDEV_ENGINE_FIFO   ] = &nv40_fifo_oclass;
+		device->oclass[NVDEV_ENGINE_SW     ] = &nv10_software_oclass;
+		device->oclass[NVDEV_ENGINE_GR     ] = &nv40_graph_oclass;
+		device->oclass[NVDEV_ENGINE_MPEG   ] = &nv40_mpeg_oclass;
+		device->oclass[NVDEV_ENGINE_DISP   ] = &nv04_disp_oclass;
+		break;
+	case 0x44:
+		device->cname = "NV44";
+		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nouveau_bios_oclass;
+		device->oclass[NVDEV_SUBDEV_GPIO   ] = &nv10_gpio_oclass;
+		device->oclass[NVDEV_SUBDEV_I2C    ] = &nouveau_i2c_oclass;
+		device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nv40_clock_oclass;
+		device->oclass[NVDEV_SUBDEV_THERM  ] = &nv40_therm_oclass;
+		device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
+		device->oclass[NVDEV_SUBDEV_MC     ] = &nv44_mc_oclass;
+		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
+		device->oclass[NVDEV_SUBDEV_FB     ] = &nv40_fb_oclass;
+		device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
+		device->oclass[NVDEV_SUBDEV_VM     ] = &nv44_vmmgr_oclass;
+		device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
+		device->oclass[NVDEV_ENGINE_FIFO   ] = &nv40_fifo_oclass;
+		device->oclass[NVDEV_ENGINE_SW     ] = &nv10_software_oclass;
+		device->oclass[NVDEV_ENGINE_GR     ] = &nv40_graph_oclass;
+		device->oclass[NVDEV_ENGINE_MPEG   ] = &nv40_mpeg_oclass;
+		device->oclass[NVDEV_ENGINE_DISP   ] = &nv04_disp_oclass;
+		break;
+	case 0x46:
+		device->cname = "G72";
+		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nouveau_bios_oclass;
+		device->oclass[NVDEV_SUBDEV_GPIO   ] = &nv10_gpio_oclass;
+		device->oclass[NVDEV_SUBDEV_I2C    ] = &nouveau_i2c_oclass;
+		device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nv40_clock_oclass;
+		device->oclass[NVDEV_SUBDEV_THERM  ] = &nv40_therm_oclass;
+		device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
+		device->oclass[NVDEV_SUBDEV_MC     ] = &nv44_mc_oclass;
+		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
+		device->oclass[NVDEV_SUBDEV_FB     ] = &nv40_fb_oclass;
+		device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
+		device->oclass[NVDEV_SUBDEV_VM     ] = &nv44_vmmgr_oclass;
+		device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
+		device->oclass[NVDEV_ENGINE_FIFO   ] = &nv40_fifo_oclass;
+		device->oclass[NVDEV_ENGINE_SW     ] = &nv10_software_oclass;
+		device->oclass[NVDEV_ENGINE_GR     ] = &nv40_graph_oclass;
+		device->oclass[NVDEV_ENGINE_MPEG   ] = &nv40_mpeg_oclass;
+		device->oclass[NVDEV_ENGINE_DISP   ] = &nv04_disp_oclass;
+		break;
+	case 0x4a:
+		device->cname = "NV44A";
+		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nouveau_bios_oclass;
+		device->oclass[NVDEV_SUBDEV_GPIO   ] = &nv10_gpio_oclass;
+		device->oclass[NVDEV_SUBDEV_I2C    ] = &nouveau_i2c_oclass;
+		device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nv40_clock_oclass;
+		device->oclass[NVDEV_SUBDEV_THERM  ] = &nv40_therm_oclass;
+		device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
+		device->oclass[NVDEV_SUBDEV_MC     ] = &nv44_mc_oclass;
+		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
+		device->oclass[NVDEV_SUBDEV_FB     ] = &nv40_fb_oclass;
+		device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
+		device->oclass[NVDEV_SUBDEV_VM     ] = &nv44_vmmgr_oclass;
+		device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
+		device->oclass[NVDEV_ENGINE_FIFO   ] = &nv40_fifo_oclass;
+		device->oclass[NVDEV_ENGINE_SW     ] = &nv10_software_oclass;
+		device->oclass[NVDEV_ENGINE_GR     ] = &nv40_graph_oclass;
+		device->oclass[NVDEV_ENGINE_MPEG   ] = &nv40_mpeg_oclass;
+		device->oclass[NVDEV_ENGINE_DISP   ] = &nv04_disp_oclass;
+		break;
+	case 0x4c:
+		device->cname = "C61";
+		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nouveau_bios_oclass;
+		device->oclass[NVDEV_SUBDEV_GPIO   ] = &nv10_gpio_oclass;
+		device->oclass[NVDEV_SUBDEV_I2C    ] = &nouveau_i2c_oclass;
+		device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nv40_clock_oclass;
+		device->oclass[NVDEV_SUBDEV_THERM  ] = &nv40_therm_oclass;
+		device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
+		device->oclass[NVDEV_SUBDEV_MC     ] = &nv44_mc_oclass;
+		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
+		device->oclass[NVDEV_SUBDEV_FB     ] = &nv40_fb_oclass;
+		device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
+		device->oclass[NVDEV_SUBDEV_VM     ] = &nv44_vmmgr_oclass;
+		device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
+		device->oclass[NVDEV_ENGINE_FIFO   ] = &nv40_fifo_oclass;
+		device->oclass[NVDEV_ENGINE_SW     ] = &nv10_software_oclass;
+		device->oclass[NVDEV_ENGINE_GR     ] = &nv40_graph_oclass;
+		device->oclass[NVDEV_ENGINE_MPEG   ] = &nv40_mpeg_oclass;
+		device->oclass[NVDEV_ENGINE_DISP   ] = &nv04_disp_oclass;
+		break;
+	case 0x4e:
+		device->cname = "C51";
+		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nouveau_bios_oclass;
+		device->oclass[NVDEV_SUBDEV_GPIO   ] = &nv10_gpio_oclass;
+		device->oclass[NVDEV_SUBDEV_I2C    ] = &nouveau_i2c_oclass;
+		device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nv40_clock_oclass;
+		device->oclass[NVDEV_SUBDEV_THERM  ] = &nv40_therm_oclass;
+		device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
+		device->oclass[NVDEV_SUBDEV_MC     ] = &nv44_mc_oclass;
+		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
+		device->oclass[NVDEV_SUBDEV_FB     ] = &nv40_fb_oclass;
+		device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
+		device->oclass[NVDEV_SUBDEV_VM     ] = &nv44_vmmgr_oclass;
+		device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
+		device->oclass[NVDEV_ENGINE_FIFO   ] = &nv40_fifo_oclass;
+		device->oclass[NVDEV_ENGINE_SW     ] = &nv10_software_oclass;
+		device->oclass[NVDEV_ENGINE_GR     ] = &nv40_graph_oclass;
+		device->oclass[NVDEV_ENGINE_MPEG   ] = &nv40_mpeg_oclass;
+		device->oclass[NVDEV_ENGINE_DISP   ] = &nv04_disp_oclass;
+		break;
+	case 0x63:
+		device->cname = "C73";
+		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nouveau_bios_oclass;
+		device->oclass[NVDEV_SUBDEV_GPIO   ] = &nv10_gpio_oclass;
+		device->oclass[NVDEV_SUBDEV_I2C    ] = &nouveau_i2c_oclass;
+		device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nv40_clock_oclass;
+		device->oclass[NVDEV_SUBDEV_THERM  ] = &nv40_therm_oclass;
+		device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
+		device->oclass[NVDEV_SUBDEV_MC     ] = &nv44_mc_oclass;
+		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
+		device->oclass[NVDEV_SUBDEV_FB     ] = &nv40_fb_oclass;
+		device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
+		device->oclass[NVDEV_SUBDEV_VM     ] = &nv44_vmmgr_oclass;
+		device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
+		device->oclass[NVDEV_ENGINE_FIFO   ] = &nv40_fifo_oclass;
+		device->oclass[NVDEV_ENGINE_SW     ] = &nv10_software_oclass;
+		device->oclass[NVDEV_ENGINE_GR     ] = &nv40_graph_oclass;
+		device->oclass[NVDEV_ENGINE_MPEG   ] = &nv40_mpeg_oclass;
+		device->oclass[NVDEV_ENGINE_DISP   ] = &nv04_disp_oclass;
+		break;
+	case 0x67:
+		device->cname = "C67";
+		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nouveau_bios_oclass;
+		device->oclass[NVDEV_SUBDEV_GPIO   ] = &nv10_gpio_oclass;
+		device->oclass[NVDEV_SUBDEV_I2C    ] = &nouveau_i2c_oclass;
+		device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nv40_clock_oclass;
+		device->oclass[NVDEV_SUBDEV_THERM  ] = &nv40_therm_oclass;
+		device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
+		device->oclass[NVDEV_SUBDEV_MC     ] = &nv44_mc_oclass;
+		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
+		device->oclass[NVDEV_SUBDEV_FB     ] = &nv40_fb_oclass;
+		device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
+		device->oclass[NVDEV_SUBDEV_VM     ] = &nv44_vmmgr_oclass;
+		device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
+		device->oclass[NVDEV_ENGINE_FIFO   ] = &nv40_fifo_oclass;
+		device->oclass[NVDEV_ENGINE_SW     ] = &nv10_software_oclass;
+		device->oclass[NVDEV_ENGINE_GR     ] = &nv40_graph_oclass;
+		device->oclass[NVDEV_ENGINE_MPEG   ] = &nv40_mpeg_oclass;
+		device->oclass[NVDEV_ENGINE_DISP   ] = &nv04_disp_oclass;
+		break;
+	case 0x68:
+		device->cname = "C68";
+		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nouveau_bios_oclass;
+		device->oclass[NVDEV_SUBDEV_GPIO   ] = &nv10_gpio_oclass;
+		device->oclass[NVDEV_SUBDEV_I2C    ] = &nouveau_i2c_oclass;
+		device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nv40_clock_oclass;
+		device->oclass[NVDEV_SUBDEV_THERM  ] = &nv40_therm_oclass;
+		device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
+		device->oclass[NVDEV_SUBDEV_MC     ] = &nv44_mc_oclass;
+		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
+		device->oclass[NVDEV_SUBDEV_FB     ] = &nv40_fb_oclass;
+		device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
+		device->oclass[NVDEV_SUBDEV_VM     ] = &nv44_vmmgr_oclass;
+		device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
+		device->oclass[NVDEV_ENGINE_FIFO   ] = &nv40_fifo_oclass;
+		device->oclass[NVDEV_ENGINE_SW     ] = &nv10_software_oclass;
+		device->oclass[NVDEV_ENGINE_GR     ] = &nv40_graph_oclass;
+		device->oclass[NVDEV_ENGINE_MPEG   ] = &nv40_mpeg_oclass;
+		device->oclass[NVDEV_ENGINE_DISP   ] = &nv04_disp_oclass;
+		break;
+	default:
+		nv_fatal(device, "unknown Curie chipset\n");
+		return -EINVAL;
+	}
+
+	return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/device/nv50.c b/drivers/gpu/drm/nouveau/core/subdev/device/nv50.c
new file mode 100644
index 000000000000..fec3bcc9a6fc
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/device/nv50.c
@@ -0,0 +1,410 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <subdev/device.h>
+#include <subdev/bios.h>
+#include <subdev/gpio.h>
+#include <subdev/i2c.h>
+#include <subdev/clock.h>
+#include <subdev/therm.h>
+#include <subdev/mxm.h>
+#include <subdev/devinit.h>
+#include <subdev/mc.h>
+#include <subdev/timer.h>
+#include <subdev/fb.h>
+#include <subdev/instmem.h>
+#include <subdev/vm.h>
+#include <subdev/bar.h>
+
+#include <engine/dmaobj.h>
+#include <engine/fifo.h>
+#include <engine/software.h>
+#include <engine/graph.h>
+#include <engine/mpeg.h>
+#include <engine/vp.h>
+#include <engine/crypt.h>
+#include <engine/bsp.h>
+#include <engine/ppp.h>
+#include <engine/copy.h>
+#include <engine/disp.h>
+
+int
+nv50_identify(struct nouveau_device *device)
+{
+	switch (device->chipset) {
+	case 0x50:
+		device->cname = "G80";
+		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nouveau_bios_oclass;
+		device->oclass[NVDEV_SUBDEV_GPIO   ] = &nv50_gpio_oclass;
+		device->oclass[NVDEV_SUBDEV_I2C    ] = &nouveau_i2c_oclass;
+		device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nv50_clock_oclass;
+		device->oclass[NVDEV_SUBDEV_THERM  ] = &nv50_therm_oclass;
+		device->oclass[NVDEV_SUBDEV_MXM    ] = &nv50_mxm_oclass;
+		device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
+		device->oclass[NVDEV_SUBDEV_MC     ] = &nv50_mc_oclass;
+		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
+		device->oclass[NVDEV_SUBDEV_FB     ] = &nv50_fb_oclass;
+		device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
+		device->oclass[NVDEV_SUBDEV_VM     ] = &nv50_vmmgr_oclass;
+		device->oclass[NVDEV_SUBDEV_BAR    ] = &nv50_bar_oclass;
+		device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv50_dmaeng_oclass;
+		device->oclass[NVDEV_ENGINE_FIFO   ] = &nv50_fifo_oclass;
+		device->oclass[NVDEV_ENGINE_SW     ] = &nv50_software_oclass;
+		device->oclass[NVDEV_ENGINE_GR     ] = &nv50_graph_oclass;
+		device->oclass[NVDEV_ENGINE_MPEG   ] = &nv50_mpeg_oclass;
+		device->oclass[NVDEV_ENGINE_DISP   ] = &nv50_disp_oclass;
+		break;
+	case 0x84:
+		device->cname = "G84";
+		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nouveau_bios_oclass;
+		device->oclass[NVDEV_SUBDEV_GPIO   ] = &nv50_gpio_oclass;
+		device->oclass[NVDEV_SUBDEV_I2C    ] = &nouveau_i2c_oclass;
+		device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nv50_clock_oclass;
+		device->oclass[NVDEV_SUBDEV_THERM  ] = &nv50_therm_oclass;
+		device->oclass[NVDEV_SUBDEV_MXM    ] = &nv50_mxm_oclass;
+		device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
+		device->oclass[NVDEV_SUBDEV_MC     ] = &nv50_mc_oclass;
+		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
+		device->oclass[NVDEV_SUBDEV_FB     ] = &nv50_fb_oclass;
+		device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
+		device->oclass[NVDEV_SUBDEV_VM     ] = &nv50_vmmgr_oclass;
+		device->oclass[NVDEV_SUBDEV_BAR    ] = &nv50_bar_oclass;
+		device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv50_dmaeng_oclass;
+		device->oclass[NVDEV_ENGINE_FIFO   ] = &nv84_fifo_oclass;
+		device->oclass[NVDEV_ENGINE_SW     ] = &nv50_software_oclass;
+		device->oclass[NVDEV_ENGINE_GR     ] = &nv50_graph_oclass;
+		device->oclass[NVDEV_ENGINE_MPEG   ] = &nv84_mpeg_oclass;
+		device->oclass[NVDEV_ENGINE_VP     ] = &nv84_vp_oclass;
+		device->oclass[NVDEV_ENGINE_CRYPT  ] = &nv84_crypt_oclass;
+		device->oclass[NVDEV_ENGINE_BSP    ] = &nv84_bsp_oclass;
+		device->oclass[NVDEV_ENGINE_DISP   ] = &nv50_disp_oclass;
+		break;
+	case 0x86:
+		device->cname = "G86";
+		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nouveau_bios_oclass;
+		device->oclass[NVDEV_SUBDEV_GPIO   ] = &nv50_gpio_oclass;
+		device->oclass[NVDEV_SUBDEV_I2C    ] = &nouveau_i2c_oclass;
+		device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nv50_clock_oclass;
+		device->oclass[NVDEV_SUBDEV_THERM  ] = &nv50_therm_oclass;
+		device->oclass[NVDEV_SUBDEV_MXM    ] = &nv50_mxm_oclass;
+		device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
+		device->oclass[NVDEV_SUBDEV_MC     ] = &nv50_mc_oclass;
+		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
+		device->oclass[NVDEV_SUBDEV_FB     ] = &nv50_fb_oclass;
+		device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
+		device->oclass[NVDEV_SUBDEV_VM     ] = &nv50_vmmgr_oclass;
+		device->oclass[NVDEV_SUBDEV_BAR    ] = &nv50_bar_oclass;
+		device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv50_dmaeng_oclass;
+		device->oclass[NVDEV_ENGINE_FIFO   ] = &nv84_fifo_oclass;
+		device->oclass[NVDEV_ENGINE_SW     ] = &nv50_software_oclass;
+		device->oclass[NVDEV_ENGINE_GR     ] = &nv50_graph_oclass;
+		device->oclass[NVDEV_ENGINE_MPEG   ] = &nv84_mpeg_oclass;
+		device->oclass[NVDEV_ENGINE_VP     ] = &nv84_vp_oclass;
+		device->oclass[NVDEV_ENGINE_CRYPT  ] = &nv84_crypt_oclass;
+		device->oclass[NVDEV_ENGINE_BSP    ] = &nv84_bsp_oclass;
+		device->oclass[NVDEV_ENGINE_DISP   ] = &nv50_disp_oclass;
+		break;
+	case 0x92:
+		device->cname = "G92";
+		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nouveau_bios_oclass;
+		device->oclass[NVDEV_SUBDEV_GPIO   ] = &nv50_gpio_oclass;
+		device->oclass[NVDEV_SUBDEV_I2C    ] = &nouveau_i2c_oclass;
+		device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nv50_clock_oclass;
+		device->oclass[NVDEV_SUBDEV_THERM  ] = &nv50_therm_oclass;
+		device->oclass[NVDEV_SUBDEV_MXM    ] = &nv50_mxm_oclass;
+		device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
+		device->oclass[NVDEV_SUBDEV_MC     ] = &nv50_mc_oclass;
+		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
+		device->oclass[NVDEV_SUBDEV_FB     ] = &nv50_fb_oclass;
+		device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
+		device->oclass[NVDEV_SUBDEV_VM     ] = &nv50_vmmgr_oclass;
+		device->oclass[NVDEV_SUBDEV_BAR    ] = &nv50_bar_oclass;
+		device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv50_dmaeng_oclass;
+		device->oclass[NVDEV_ENGINE_FIFO   ] = &nv84_fifo_oclass;
+		device->oclass[NVDEV_ENGINE_SW     ] = &nv50_software_oclass;
+		device->oclass[NVDEV_ENGINE_GR     ] = &nv50_graph_oclass;
+		device->oclass[NVDEV_ENGINE_MPEG   ] = &nv84_mpeg_oclass;
+		device->oclass[NVDEV_ENGINE_VP     ] = &nv84_vp_oclass;
+		device->oclass[NVDEV_ENGINE_CRYPT  ] = &nv84_crypt_oclass;
+		device->oclass[NVDEV_ENGINE_BSP    ] = &nv84_bsp_oclass;
+		device->oclass[NVDEV_ENGINE_DISP   ] = &nv50_disp_oclass;
+		break;
+	case 0x94:
+		device->cname = "G94";
+		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nouveau_bios_oclass;
+		device->oclass[NVDEV_SUBDEV_GPIO   ] = &nv50_gpio_oclass;
+		device->oclass[NVDEV_SUBDEV_I2C    ] = &nouveau_i2c_oclass;
+		device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nv50_clock_oclass;
+		device->oclass[NVDEV_SUBDEV_THERM  ] = &nv50_therm_oclass;
+		device->oclass[NVDEV_SUBDEV_MXM    ] = &nv50_mxm_oclass;
+		device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
+		device->oclass[NVDEV_SUBDEV_MC     ] = &nv50_mc_oclass;
+		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
+		device->oclass[NVDEV_SUBDEV_FB     ] = &nv50_fb_oclass;
+		device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
+		device->oclass[NVDEV_SUBDEV_VM     ] = &nv50_vmmgr_oclass;
+		device->oclass[NVDEV_SUBDEV_BAR    ] = &nv50_bar_oclass;
+		device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv50_dmaeng_oclass;
+		device->oclass[NVDEV_ENGINE_FIFO   ] = &nv84_fifo_oclass;
+		device->oclass[NVDEV_ENGINE_SW     ] = &nv50_software_oclass;
+		device->oclass[NVDEV_ENGINE_GR     ] = &nv50_graph_oclass;
+		device->oclass[NVDEV_ENGINE_MPEG   ] = &nv84_mpeg_oclass;
+		device->oclass[NVDEV_ENGINE_VP     ] = &nv84_vp_oclass;
+		device->oclass[NVDEV_ENGINE_CRYPT  ] = &nv84_crypt_oclass;
+		device->oclass[NVDEV_ENGINE_BSP    ] = &nv84_bsp_oclass;
+		device->oclass[NVDEV_ENGINE_DISP   ] = &nv50_disp_oclass;
+		break;
+	case 0x96:
+		device->cname = "G96";
+		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nouveau_bios_oclass;
+		device->oclass[NVDEV_SUBDEV_GPIO   ] = &nv50_gpio_oclass;
+		device->oclass[NVDEV_SUBDEV_I2C    ] = &nouveau_i2c_oclass;
+		device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nv50_clock_oclass;
+		device->oclass[NVDEV_SUBDEV_THERM  ] = &nv50_therm_oclass;
+		device->oclass[NVDEV_SUBDEV_MXM    ] = &nv50_mxm_oclass;
+		device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
+		device->oclass[NVDEV_SUBDEV_MC     ] = &nv50_mc_oclass;
+		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
+		device->oclass[NVDEV_SUBDEV_FB     ] = &nv50_fb_oclass;
+		device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
+		device->oclass[NVDEV_SUBDEV_VM     ] = &nv50_vmmgr_oclass;
+		device->oclass[NVDEV_SUBDEV_BAR    ] = &nv50_bar_oclass;
+		device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv50_dmaeng_oclass;
+		device->oclass[NVDEV_ENGINE_FIFO   ] = &nv84_fifo_oclass;
+		device->oclass[NVDEV_ENGINE_SW     ] = &nv50_software_oclass;
+		device->oclass[NVDEV_ENGINE_GR     ] = &nv50_graph_oclass;
+		device->oclass[NVDEV_ENGINE_MPEG   ] = &nv84_mpeg_oclass;
+		device->oclass[NVDEV_ENGINE_VP     ] = &nv84_vp_oclass;
+		device->oclass[NVDEV_ENGINE_CRYPT  ] = &nv84_crypt_oclass;
+		device->oclass[NVDEV_ENGINE_BSP    ] = &nv84_bsp_oclass;
+		device->oclass[NVDEV_ENGINE_DISP   ] = &nv50_disp_oclass;
+		break;
+	case 0x98:
+		device->cname = "G98";
+		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nouveau_bios_oclass;
+		device->oclass[NVDEV_SUBDEV_GPIO   ] = &nv50_gpio_oclass;
+		device->oclass[NVDEV_SUBDEV_I2C    ] = &nouveau_i2c_oclass;
+		device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nv50_clock_oclass;
+		device->oclass[NVDEV_SUBDEV_THERM  ] = &nv50_therm_oclass;
+		device->oclass[NVDEV_SUBDEV_MXM    ] = &nv50_mxm_oclass;
+		device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
+		device->oclass[NVDEV_SUBDEV_MC     ] = &nv98_mc_oclass;
+		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
+		device->oclass[NVDEV_SUBDEV_FB     ] = &nv50_fb_oclass;
+		device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
+		device->oclass[NVDEV_SUBDEV_VM     ] = &nv50_vmmgr_oclass;
+		device->oclass[NVDEV_SUBDEV_BAR    ] = &nv50_bar_oclass;
+		device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv50_dmaeng_oclass;
+		device->oclass[NVDEV_ENGINE_FIFO   ] = &nv84_fifo_oclass;
+		device->oclass[NVDEV_ENGINE_SW     ] = &nv50_software_oclass;
+		device->oclass[NVDEV_ENGINE_GR     ] = &nv50_graph_oclass;
+		device->oclass[NVDEV_ENGINE_VP     ] = &nv84_vp_oclass;
+		device->oclass[NVDEV_ENGINE_CRYPT  ] = &nv98_crypt_oclass;
+		device->oclass[NVDEV_ENGINE_BSP    ] = &nv84_bsp_oclass;
+		device->oclass[NVDEV_ENGINE_PPP    ] = &nv98_ppp_oclass;
+		device->oclass[NVDEV_ENGINE_DISP   ] = &nv50_disp_oclass;
+		break;
+	case 0xa0:
+		device->cname = "G200";
+		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nouveau_bios_oclass;
+		device->oclass[NVDEV_SUBDEV_GPIO   ] = &nv50_gpio_oclass;
+		device->oclass[NVDEV_SUBDEV_I2C    ] = &nouveau_i2c_oclass;
+		device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nv50_clock_oclass;
+		device->oclass[NVDEV_SUBDEV_THERM  ] = &nv50_therm_oclass;
+		device->oclass[NVDEV_SUBDEV_MXM    ] = &nv50_mxm_oclass;
+		device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
+		device->oclass[NVDEV_SUBDEV_MC     ] = &nv98_mc_oclass;
+		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
+		device->oclass[NVDEV_SUBDEV_FB     ] = &nv50_fb_oclass;
+		device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
+		device->oclass[NVDEV_SUBDEV_VM     ] = &nv50_vmmgr_oclass;
+		device->oclass[NVDEV_SUBDEV_BAR    ] = &nv50_bar_oclass;
+		device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv50_dmaeng_oclass;
+		device->oclass[NVDEV_ENGINE_FIFO   ] = &nv84_fifo_oclass;
+		device->oclass[NVDEV_ENGINE_SW     ] = &nv50_software_oclass;
+		device->oclass[NVDEV_ENGINE_GR     ] = &nv50_graph_oclass;
+		device->oclass[NVDEV_ENGINE_MPEG   ] = &nv84_mpeg_oclass;
+		device->oclass[NVDEV_ENGINE_VP     ] = &nv84_vp_oclass;
+		device->oclass[NVDEV_ENGINE_CRYPT  ] = &nv84_crypt_oclass;
+		device->oclass[NVDEV_ENGINE_BSP    ] = &nv84_bsp_oclass;
+		device->oclass[NVDEV_ENGINE_DISP   ] = &nv50_disp_oclass;
+		break;
+	case 0xaa:
+		device->cname = "MCP77/MCP78";
+		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nouveau_bios_oclass;
+		device->oclass[NVDEV_SUBDEV_GPIO   ] = &nv50_gpio_oclass;
+		device->oclass[NVDEV_SUBDEV_I2C    ] = &nouveau_i2c_oclass;
+		device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nv50_clock_oclass;
+		device->oclass[NVDEV_SUBDEV_THERM  ] = &nv50_therm_oclass;
+		device->oclass[NVDEV_SUBDEV_MXM    ] = &nv50_mxm_oclass;
+		device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
+		device->oclass[NVDEV_SUBDEV_MC     ] = &nv98_mc_oclass;
+		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
+		device->oclass[NVDEV_SUBDEV_FB     ] = &nv50_fb_oclass;
+		device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
+		device->oclass[NVDEV_SUBDEV_VM     ] = &nv50_vmmgr_oclass;
+		device->oclass[NVDEV_SUBDEV_BAR    ] = &nv50_bar_oclass;
+		device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv50_dmaeng_oclass;
+		device->oclass[NVDEV_ENGINE_FIFO   ] = &nv84_fifo_oclass;
+		device->oclass[NVDEV_ENGINE_SW     ] = &nv50_software_oclass;
+		device->oclass[NVDEV_ENGINE_GR     ] = &nv50_graph_oclass;
+		device->oclass[NVDEV_ENGINE_VP     ] = &nv84_vp_oclass;
+		device->oclass[NVDEV_ENGINE_CRYPT  ] = &nv98_crypt_oclass;
+		device->oclass[NVDEV_ENGINE_BSP    ] = &nv84_bsp_oclass;
+		device->oclass[NVDEV_ENGINE_PPP    ] = &nv98_ppp_oclass;
+		device->oclass[NVDEV_ENGINE_DISP   ] = &nv50_disp_oclass;
+		break;
+	case 0xac:
+		device->cname = "MCP79/MCP7A";
+		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nouveau_bios_oclass;
+		device->oclass[NVDEV_SUBDEV_GPIO   ] = &nv50_gpio_oclass;
+		device->oclass[NVDEV_SUBDEV_I2C    ] = &nouveau_i2c_oclass;
+		device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nv50_clock_oclass;
+		device->oclass[NVDEV_SUBDEV_THERM  ] = &nv50_therm_oclass;
+		device->oclass[NVDEV_SUBDEV_MXM    ] = &nv50_mxm_oclass;
+		device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
+		device->oclass[NVDEV_SUBDEV_MC     ] = &nv98_mc_oclass;
+		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
+		device->oclass[NVDEV_SUBDEV_FB     ] = &nv50_fb_oclass;
+		device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
+		device->oclass[NVDEV_SUBDEV_VM     ] = &nv50_vmmgr_oclass;
+		device->oclass[NVDEV_SUBDEV_BAR    ] = &nv50_bar_oclass;
+		device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv50_dmaeng_oclass;
+		device->oclass[NVDEV_ENGINE_FIFO   ] = &nv84_fifo_oclass;
+		device->oclass[NVDEV_ENGINE_SW     ] = &nv50_software_oclass;
+		device->oclass[NVDEV_ENGINE_GR     ] = &nv50_graph_oclass;
+		device->oclass[NVDEV_ENGINE_VP     ] = &nv84_vp_oclass;
+		device->oclass[NVDEV_ENGINE_CRYPT  ] = &nv98_crypt_oclass;
+		device->oclass[NVDEV_ENGINE_BSP    ] = &nv84_bsp_oclass;
+		device->oclass[NVDEV_ENGINE_PPP    ] = &nv98_ppp_oclass;
+		device->oclass[NVDEV_ENGINE_DISP   ] = &nv50_disp_oclass;
+		break;
+	case 0xa3:
+		device->cname = "GT215";
+		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nouveau_bios_oclass;
+		device->oclass[NVDEV_SUBDEV_GPIO   ] = &nv50_gpio_oclass;
+		device->oclass[NVDEV_SUBDEV_I2C    ] = &nouveau_i2c_oclass;
+		device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nva3_clock_oclass;
+		device->oclass[NVDEV_SUBDEV_THERM  ] = &nv50_therm_oclass;
+		device->oclass[NVDEV_SUBDEV_MXM    ] = &nv50_mxm_oclass;
+		device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
+		device->oclass[NVDEV_SUBDEV_MC     ] = &nv98_mc_oclass;
+		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
+		device->oclass[NVDEV_SUBDEV_FB     ] = &nv50_fb_oclass;
+		device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
+		device->oclass[NVDEV_SUBDEV_VM     ] = &nv50_vmmgr_oclass;
+		device->oclass[NVDEV_SUBDEV_BAR    ] = &nv50_bar_oclass;
+		device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv50_dmaeng_oclass;
+		device->oclass[NVDEV_ENGINE_FIFO   ] = &nv84_fifo_oclass;
+		device->oclass[NVDEV_ENGINE_SW     ] = &nv50_software_oclass;
+		device->oclass[NVDEV_ENGINE_GR     ] = &nv50_graph_oclass;
+		device->oclass[NVDEV_ENGINE_MPEG   ] = &nv84_mpeg_oclass;
+		device->oclass[NVDEV_ENGINE_VP     ] = &nv84_vp_oclass;
+		device->oclass[NVDEV_ENGINE_BSP    ] = &nv84_bsp_oclass;
+		device->oclass[NVDEV_ENGINE_PPP    ] = &nv98_ppp_oclass;
+		device->oclass[NVDEV_ENGINE_COPY0  ] = &nva3_copy_oclass;
+		device->oclass[NVDEV_ENGINE_DISP   ] = &nv50_disp_oclass;
+		break;
+	case 0xa5:
+		device->cname = "GT216";
+		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nouveau_bios_oclass;
+		device->oclass[NVDEV_SUBDEV_GPIO   ] = &nv50_gpio_oclass;
+		device->oclass[NVDEV_SUBDEV_I2C    ] = &nouveau_i2c_oclass;
+		device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nva3_clock_oclass;
+		device->oclass[NVDEV_SUBDEV_THERM  ] = &nv50_therm_oclass;
+		device->oclass[NVDEV_SUBDEV_MXM    ] = &nv50_mxm_oclass;
+		device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
+		device->oclass[NVDEV_SUBDEV_MC     ] = &nv98_mc_oclass;
+		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
+		device->oclass[NVDEV_SUBDEV_FB     ] = &nv50_fb_oclass;
+		device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
+		device->oclass[NVDEV_SUBDEV_VM     ] = &nv50_vmmgr_oclass;
+		device->oclass[NVDEV_SUBDEV_BAR    ] = &nv50_bar_oclass;
+		device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv50_dmaeng_oclass;
+		device->oclass[NVDEV_ENGINE_FIFO   ] = &nv84_fifo_oclass;
+		device->oclass[NVDEV_ENGINE_SW     ] = &nv50_software_oclass;
+		device->oclass[NVDEV_ENGINE_GR     ] = &nv50_graph_oclass;
+		device->oclass[NVDEV_ENGINE_VP     ] = &nv84_vp_oclass;
+		device->oclass[NVDEV_ENGINE_BSP    ] = &nv84_bsp_oclass;
+		device->oclass[NVDEV_ENGINE_PPP    ] = &nv98_ppp_oclass;
+		device->oclass[NVDEV_ENGINE_COPY0  ] = &nva3_copy_oclass;
+		device->oclass[NVDEV_ENGINE_DISP   ] = &nv50_disp_oclass;
+		break;
+	case 0xa8:
+		device->cname = "GT218";
+		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nouveau_bios_oclass;
+		device->oclass[NVDEV_SUBDEV_GPIO   ] = &nv50_gpio_oclass;
+		device->oclass[NVDEV_SUBDEV_I2C    ] = &nouveau_i2c_oclass;
+		device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nva3_clock_oclass;
+		device->oclass[NVDEV_SUBDEV_THERM  ] = &nv50_therm_oclass;
+		device->oclass[NVDEV_SUBDEV_MXM    ] = &nv50_mxm_oclass;
+		device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
+		device->oclass[NVDEV_SUBDEV_MC     ] = &nv98_mc_oclass;
+		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
+		device->oclass[NVDEV_SUBDEV_FB     ] = &nv50_fb_oclass;
+		device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
+		device->oclass[NVDEV_SUBDEV_VM     ] = &nv50_vmmgr_oclass;
+		device->oclass[NVDEV_SUBDEV_BAR    ] = &nv50_bar_oclass;
+		device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv50_dmaeng_oclass;
+		device->oclass[NVDEV_ENGINE_FIFO   ] = &nv84_fifo_oclass;
+		device->oclass[NVDEV_ENGINE_SW     ] = &nv50_software_oclass;
+		device->oclass[NVDEV_ENGINE_GR     ] = &nv50_graph_oclass;
+		device->oclass[NVDEV_ENGINE_VP     ] = &nv84_vp_oclass;
+		device->oclass[NVDEV_ENGINE_BSP    ] = &nv84_bsp_oclass;
+		device->oclass[NVDEV_ENGINE_PPP    ] = &nv98_ppp_oclass;
+		device->oclass[NVDEV_ENGINE_COPY0  ] = &nva3_copy_oclass;
+		device->oclass[NVDEV_ENGINE_DISP   ] = &nv50_disp_oclass;
+		break;
+	case 0xaf:
+		device->cname = "MCP89";
+		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nouveau_bios_oclass;
+		device->oclass[NVDEV_SUBDEV_GPIO   ] = &nv50_gpio_oclass;
+		device->oclass[NVDEV_SUBDEV_I2C    ] = &nouveau_i2c_oclass;
+		device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nva3_clock_oclass;
+		device->oclass[NVDEV_SUBDEV_THERM  ] = &nv50_therm_oclass;
+		device->oclass[NVDEV_SUBDEV_MXM    ] = &nv50_mxm_oclass;
+		device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
+		device->oclass[NVDEV_SUBDEV_MC     ] = &nv98_mc_oclass;
+		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
+		device->oclass[NVDEV_SUBDEV_FB     ] = &nv50_fb_oclass;
+		device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
+		device->oclass[NVDEV_SUBDEV_VM     ] = &nv50_vmmgr_oclass;
+		device->oclass[NVDEV_SUBDEV_BAR    ] = &nv50_bar_oclass;
+		device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv50_dmaeng_oclass;
+		device->oclass[NVDEV_ENGINE_FIFO   ] = &nv84_fifo_oclass;
+		device->oclass[NVDEV_ENGINE_SW     ] = &nv50_software_oclass;
+		device->oclass[NVDEV_ENGINE_GR     ] = &nv50_graph_oclass;
+		device->oclass[NVDEV_ENGINE_VP     ] = &nv84_vp_oclass;
+		device->oclass[NVDEV_ENGINE_BSP    ] = &nv84_bsp_oclass;
+		device->oclass[NVDEV_ENGINE_PPP    ] = &nv98_ppp_oclass;
+		device->oclass[NVDEV_ENGINE_COPY0  ] = &nva3_copy_oclass;
+		device->oclass[NVDEV_ENGINE_DISP   ] = &nv50_disp_oclass;
+		break;
+	default:
+		nv_fatal(device, "unknown Tesla chipset\n");
+		return -EINVAL;
+	}
+
+	return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/device/nvc0.c b/drivers/gpu/drm/nouveau/core/subdev/device/nvc0.c
new file mode 100644
index 000000000000..6697f0f9c293
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/device/nvc0.c
@@ -0,0 +1,285 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <subdev/device.h>
+#include <subdev/bios.h>
+#include <subdev/gpio.h>
+#include <subdev/i2c.h>
+#include <subdev/clock.h>
+#include <subdev/therm.h>
+#include <subdev/mxm.h>
+#include <subdev/devinit.h>
+#include <subdev/mc.h>
+#include <subdev/timer.h>
+#include <subdev/fb.h>
+#include <subdev/ltcg.h>
+#include <subdev/ibus.h>
+#include <subdev/instmem.h>
+#include <subdev/vm.h>
+#include <subdev/bar.h>
+
+#include <engine/dmaobj.h>
+#include <engine/fifo.h>
+#include <engine/software.h>
+#include <engine/graph.h>
+#include <engine/vp.h>
+#include <engine/bsp.h>
+#include <engine/ppp.h>
+#include <engine/copy.h>
+#include <engine/disp.h>
+
+int
+nvc0_identify(struct nouveau_device *device)
+{
+	switch (device->chipset) {
+	case 0xc0:
+		device->cname = "GF100";
+		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nouveau_bios_oclass;
+		device->oclass[NVDEV_SUBDEV_GPIO   ] = &nv50_gpio_oclass;
+		device->oclass[NVDEV_SUBDEV_I2C    ] = &nouveau_i2c_oclass;
+		device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nvc0_clock_oclass;
+		device->oclass[NVDEV_SUBDEV_THERM  ] = &nv50_therm_oclass;
+		device->oclass[NVDEV_SUBDEV_MXM    ] = &nv50_mxm_oclass;
+		device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
+		device->oclass[NVDEV_SUBDEV_MC     ] = &nvc0_mc_oclass;
+		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
+		device->oclass[NVDEV_SUBDEV_FB     ] = &nvc0_fb_oclass;
+		device->oclass[NVDEV_SUBDEV_LTCG   ] = &nvc0_ltcg_oclass;
+		device->oclass[NVDEV_SUBDEV_IBUS   ] = &nvc0_ibus_oclass;
+		device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
+		device->oclass[NVDEV_SUBDEV_VM     ] = &nvc0_vmmgr_oclass;
+		device->oclass[NVDEV_SUBDEV_BAR    ] = &nvc0_bar_oclass;
+		device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvc0_dmaeng_oclass;
+		device->oclass[NVDEV_ENGINE_FIFO   ] = &nvc0_fifo_oclass;
+		device->oclass[NVDEV_ENGINE_SW     ] = &nvc0_software_oclass;
+		device->oclass[NVDEV_ENGINE_GR     ] = &nvc0_graph_oclass;
+		device->oclass[NVDEV_ENGINE_VP     ] = &nv84_vp_oclass;
+		device->oclass[NVDEV_ENGINE_BSP    ] = &nv84_bsp_oclass;
+		device->oclass[NVDEV_ENGINE_PPP    ] = &nv98_ppp_oclass;
+		device->oclass[NVDEV_ENGINE_COPY0  ] = &nvc0_copy0_oclass;
+		device->oclass[NVDEV_ENGINE_COPY1  ] = &nvc0_copy1_oclass;
+		device->oclass[NVDEV_ENGINE_DISP   ] = &nv50_disp_oclass;
+		break;
+	case 0xc4:
+		device->cname = "GF104";
+		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nouveau_bios_oclass;
+		device->oclass[NVDEV_SUBDEV_GPIO   ] = &nv50_gpio_oclass;
+		device->oclass[NVDEV_SUBDEV_I2C    ] = &nouveau_i2c_oclass;
+		device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nvc0_clock_oclass;
+		device->oclass[NVDEV_SUBDEV_THERM  ] = &nv50_therm_oclass;
+		device->oclass[NVDEV_SUBDEV_MXM    ] = &nv50_mxm_oclass;
+		device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
+		device->oclass[NVDEV_SUBDEV_MC     ] = &nvc0_mc_oclass;
+		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
+		device->oclass[NVDEV_SUBDEV_FB     ] = &nvc0_fb_oclass;
+		device->oclass[NVDEV_SUBDEV_LTCG   ] = &nvc0_ltcg_oclass;
+		device->oclass[NVDEV_SUBDEV_IBUS   ] = &nvc0_ibus_oclass;
+		device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
+		device->oclass[NVDEV_SUBDEV_VM     ] = &nvc0_vmmgr_oclass;
+		device->oclass[NVDEV_SUBDEV_BAR    ] = &nvc0_bar_oclass;
+		device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvc0_dmaeng_oclass;
+		device->oclass[NVDEV_ENGINE_FIFO   ] = &nvc0_fifo_oclass;
+		device->oclass[NVDEV_ENGINE_SW     ] = &nvc0_software_oclass;
+		device->oclass[NVDEV_ENGINE_GR     ] = &nvc0_graph_oclass;
+		device->oclass[NVDEV_ENGINE_VP     ] = &nv84_vp_oclass;
+		device->oclass[NVDEV_ENGINE_BSP    ] = &nv84_bsp_oclass;
+		device->oclass[NVDEV_ENGINE_PPP    ] = &nv98_ppp_oclass;
+		device->oclass[NVDEV_ENGINE_COPY0  ] = &nvc0_copy0_oclass;
+		device->oclass[NVDEV_ENGINE_COPY1  ] = &nvc0_copy1_oclass;
+		device->oclass[NVDEV_ENGINE_DISP   ] = &nv50_disp_oclass;
+		break;
+	case 0xc3:
+		device->cname = "GF106";
+		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nouveau_bios_oclass;
+		device->oclass[NVDEV_SUBDEV_GPIO   ] = &nv50_gpio_oclass;
+		device->oclass[NVDEV_SUBDEV_I2C    ] = &nouveau_i2c_oclass;
+		device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nvc0_clock_oclass;
+		device->oclass[NVDEV_SUBDEV_THERM  ] = &nv50_therm_oclass;
+		device->oclass[NVDEV_SUBDEV_MXM    ] = &nv50_mxm_oclass;
+		device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
+		device->oclass[NVDEV_SUBDEV_MC     ] = &nvc0_mc_oclass;
+		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
+		device->oclass[NVDEV_SUBDEV_FB     ] = &nvc0_fb_oclass;
+		device->oclass[NVDEV_SUBDEV_LTCG   ] = &nvc0_ltcg_oclass;
+		device->oclass[NVDEV_SUBDEV_IBUS   ] = &nvc0_ibus_oclass;
+		device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
+		device->oclass[NVDEV_SUBDEV_VM     ] = &nvc0_vmmgr_oclass;
+		device->oclass[NVDEV_SUBDEV_BAR    ] = &nvc0_bar_oclass;
+		device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvc0_dmaeng_oclass;
+		device->oclass[NVDEV_ENGINE_FIFO   ] = &nvc0_fifo_oclass;
+		device->oclass[NVDEV_ENGINE_SW     ] = &nvc0_software_oclass;
+		device->oclass[NVDEV_ENGINE_GR     ] = &nvc0_graph_oclass;
+		device->oclass[NVDEV_ENGINE_VP     ] = &nv84_vp_oclass;
+		device->oclass[NVDEV_ENGINE_BSP    ] = &nv84_bsp_oclass;
+		device->oclass[NVDEV_ENGINE_PPP    ] = &nv98_ppp_oclass;
+		device->oclass[NVDEV_ENGINE_COPY0  ] = &nvc0_copy0_oclass;
+		device->oclass[NVDEV_ENGINE_COPY1  ] = &nvc0_copy1_oclass;
+		device->oclass[NVDEV_ENGINE_DISP   ] = &nv50_disp_oclass;
+		break;
+	case 0xce:
+		device->cname = "GF114";
+		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nouveau_bios_oclass;
+		device->oclass[NVDEV_SUBDEV_GPIO   ] = &nv50_gpio_oclass;
+		device->oclass[NVDEV_SUBDEV_I2C    ] = &nouveau_i2c_oclass;
+		device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nvc0_clock_oclass;
+		device->oclass[NVDEV_SUBDEV_THERM  ] = &nv50_therm_oclass;
+		device->oclass[NVDEV_SUBDEV_MXM    ] = &nv50_mxm_oclass;
+		device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
+		device->oclass[NVDEV_SUBDEV_MC     ] = &nvc0_mc_oclass;
+		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
+		device->oclass[NVDEV_SUBDEV_FB     ] = &nvc0_fb_oclass;
+		device->oclass[NVDEV_SUBDEV_LTCG   ] = &nvc0_ltcg_oclass;
+		device->oclass[NVDEV_SUBDEV_IBUS   ] = &nvc0_ibus_oclass;
+		device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
+		device->oclass[NVDEV_SUBDEV_VM     ] = &nvc0_vmmgr_oclass;
+		device->oclass[NVDEV_SUBDEV_BAR    ] = &nvc0_bar_oclass;
+		device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvc0_dmaeng_oclass;
+		device->oclass[NVDEV_ENGINE_FIFO   ] = &nvc0_fifo_oclass;
+		device->oclass[NVDEV_ENGINE_SW     ] = &nvc0_software_oclass;
+		device->oclass[NVDEV_ENGINE_GR     ] = &nvc0_graph_oclass;
+		device->oclass[NVDEV_ENGINE_VP     ] = &nv84_vp_oclass;
+		device->oclass[NVDEV_ENGINE_BSP    ] = &nv84_bsp_oclass;
+		device->oclass[NVDEV_ENGINE_PPP    ] = &nv98_ppp_oclass;
+		device->oclass[NVDEV_ENGINE_COPY0  ] = &nvc0_copy0_oclass;
+		device->oclass[NVDEV_ENGINE_COPY1  ] = &nvc0_copy1_oclass;
+		device->oclass[NVDEV_ENGINE_DISP   ] = &nv50_disp_oclass;
+		break;
+	case 0xcf:
+		device->cname = "GF116";
+		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nouveau_bios_oclass;
+		device->oclass[NVDEV_SUBDEV_GPIO   ] = &nv50_gpio_oclass;
+		device->oclass[NVDEV_SUBDEV_I2C    ] = &nouveau_i2c_oclass;
+		device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nvc0_clock_oclass;
+		device->oclass[NVDEV_SUBDEV_THERM  ] = &nv50_therm_oclass;
+		device->oclass[NVDEV_SUBDEV_MXM    ] = &nv50_mxm_oclass;
+		device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
+		device->oclass[NVDEV_SUBDEV_MC     ] = &nvc0_mc_oclass;
+		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
+		device->oclass[NVDEV_SUBDEV_FB     ] = &nvc0_fb_oclass;
+		device->oclass[NVDEV_SUBDEV_LTCG   ] = &nvc0_ltcg_oclass;
+		device->oclass[NVDEV_SUBDEV_IBUS   ] = &nvc0_ibus_oclass;
+		device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
+		device->oclass[NVDEV_SUBDEV_VM     ] = &nvc0_vmmgr_oclass;
+		device->oclass[NVDEV_SUBDEV_BAR    ] = &nvc0_bar_oclass;
+		device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvc0_dmaeng_oclass;
+		device->oclass[NVDEV_ENGINE_FIFO   ] = &nvc0_fifo_oclass;
+		device->oclass[NVDEV_ENGINE_SW     ] = &nvc0_software_oclass;
+		device->oclass[NVDEV_ENGINE_GR     ] = &nvc0_graph_oclass;
+		device->oclass[NVDEV_ENGINE_VP     ] = &nv84_vp_oclass;
+		device->oclass[NVDEV_ENGINE_BSP    ] = &nv84_bsp_oclass;
+		device->oclass[NVDEV_ENGINE_PPP    ] = &nv98_ppp_oclass;
+		device->oclass[NVDEV_ENGINE_COPY0  ] = &nvc0_copy0_oclass;
+		device->oclass[NVDEV_ENGINE_COPY1  ] = &nvc0_copy1_oclass;
+		device->oclass[NVDEV_ENGINE_DISP   ] = &nv50_disp_oclass;
+		break;
+	case 0xc1:
+		device->cname = "GF108";
+		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nouveau_bios_oclass;
+		device->oclass[NVDEV_SUBDEV_GPIO   ] = &nv50_gpio_oclass;
+		device->oclass[NVDEV_SUBDEV_I2C    ] = &nouveau_i2c_oclass;
+		device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nvc0_clock_oclass;
+		device->oclass[NVDEV_SUBDEV_THERM  ] = &nv50_therm_oclass;
+		device->oclass[NVDEV_SUBDEV_MXM    ] = &nv50_mxm_oclass;
+		device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
+		device->oclass[NVDEV_SUBDEV_MC     ] = &nvc0_mc_oclass;
+		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
+		device->oclass[NVDEV_SUBDEV_FB     ] = &nvc0_fb_oclass;
+		device->oclass[NVDEV_SUBDEV_LTCG   ] = &nvc0_ltcg_oclass;
+		device->oclass[NVDEV_SUBDEV_IBUS   ] = &nvc0_ibus_oclass;
+		device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
+		device->oclass[NVDEV_SUBDEV_VM     ] = &nvc0_vmmgr_oclass;
+		device->oclass[NVDEV_SUBDEV_BAR    ] = &nvc0_bar_oclass;
+		device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvc0_dmaeng_oclass;
+		device->oclass[NVDEV_ENGINE_FIFO   ] = &nvc0_fifo_oclass;
+		device->oclass[NVDEV_ENGINE_SW     ] = &nvc0_software_oclass;
+		device->oclass[NVDEV_ENGINE_GR     ] = &nvc0_graph_oclass;
+		device->oclass[NVDEV_ENGINE_VP     ] = &nv84_vp_oclass;
+		device->oclass[NVDEV_ENGINE_BSP    ] = &nv84_bsp_oclass;
+		device->oclass[NVDEV_ENGINE_PPP    ] = &nv98_ppp_oclass;
+		device->oclass[NVDEV_ENGINE_COPY0  ] = &nvc0_copy0_oclass;
+		device->oclass[NVDEV_ENGINE_COPY1  ] = &nvc0_copy1_oclass;
+		device->oclass[NVDEV_ENGINE_DISP   ] = &nv50_disp_oclass;
+		break;
+	case 0xc8:
+		device->cname = "GF110";
+		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nouveau_bios_oclass;
+		device->oclass[NVDEV_SUBDEV_GPIO   ] = &nv50_gpio_oclass;
+		device->oclass[NVDEV_SUBDEV_I2C    ] = &nouveau_i2c_oclass;
+		device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nvc0_clock_oclass;
+		device->oclass[NVDEV_SUBDEV_THERM  ] = &nv50_therm_oclass;
+		device->oclass[NVDEV_SUBDEV_MXM    ] = &nv50_mxm_oclass;
+		device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
+		device->oclass[NVDEV_SUBDEV_MC     ] = &nvc0_mc_oclass;
+		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
+		device->oclass[NVDEV_SUBDEV_FB     ] = &nvc0_fb_oclass;
+		device->oclass[NVDEV_SUBDEV_LTCG   ] = &nvc0_ltcg_oclass;
+		device->oclass[NVDEV_SUBDEV_IBUS   ] = &nvc0_ibus_oclass;
+		device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
+		device->oclass[NVDEV_SUBDEV_VM     ] = &nvc0_vmmgr_oclass;
+		device->oclass[NVDEV_SUBDEV_BAR    ] = &nvc0_bar_oclass;
+		device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvc0_dmaeng_oclass;
+		device->oclass[NVDEV_ENGINE_FIFO   ] = &nvc0_fifo_oclass;
+		device->oclass[NVDEV_ENGINE_SW     ] = &nvc0_software_oclass;
+		device->oclass[NVDEV_ENGINE_GR     ] = &nvc0_graph_oclass;
+		device->oclass[NVDEV_ENGINE_VP     ] = &nv84_vp_oclass;
+		device->oclass[NVDEV_ENGINE_BSP    ] = &nv84_bsp_oclass;
+		device->oclass[NVDEV_ENGINE_PPP    ] = &nv98_ppp_oclass;
+		device->oclass[NVDEV_ENGINE_COPY0  ] = &nvc0_copy0_oclass;
+		device->oclass[NVDEV_ENGINE_COPY1  ] = &nvc0_copy1_oclass;
+		device->oclass[NVDEV_ENGINE_DISP   ] = &nv50_disp_oclass;
+		break;
+	case 0xd9:
+		device->cname = "GF119";
+		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nouveau_bios_oclass;
+		device->oclass[NVDEV_SUBDEV_GPIO   ] = &nvd0_gpio_oclass;
+		device->oclass[NVDEV_SUBDEV_I2C    ] = &nouveau_i2c_oclass;
+		device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nvc0_clock_oclass;
+		device->oclass[NVDEV_SUBDEV_THERM  ] = &nv50_therm_oclass;
+		device->oclass[NVDEV_SUBDEV_MXM    ] = &nv50_mxm_oclass;
+		device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
+		device->oclass[NVDEV_SUBDEV_MC     ] = &nvc0_mc_oclass;
+		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
+		device->oclass[NVDEV_SUBDEV_FB     ] = &nvc0_fb_oclass;
+		device->oclass[NVDEV_SUBDEV_LTCG   ] = &nvc0_ltcg_oclass;
+		device->oclass[NVDEV_SUBDEV_IBUS   ] = &nvc0_ibus_oclass;
+		device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
+		device->oclass[NVDEV_SUBDEV_VM     ] = &nvc0_vmmgr_oclass;
+		device->oclass[NVDEV_SUBDEV_BAR    ] = &nvc0_bar_oclass;
+		device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvc0_dmaeng_oclass;
+		device->oclass[NVDEV_ENGINE_FIFO   ] = &nvc0_fifo_oclass;
+		device->oclass[NVDEV_ENGINE_SW     ] = &nvc0_software_oclass;
+		device->oclass[NVDEV_ENGINE_GR     ] = &nvc0_graph_oclass;
+		device->oclass[NVDEV_ENGINE_VP     ] = &nv84_vp_oclass;
+		device->oclass[NVDEV_ENGINE_BSP    ] = &nv84_bsp_oclass;
+		device->oclass[NVDEV_ENGINE_PPP    ] = &nv98_ppp_oclass;
+		device->oclass[NVDEV_ENGINE_COPY0  ] = &nvc0_copy0_oclass;
+		device->oclass[NVDEV_ENGINE_DISP   ] = &nvd0_disp_oclass;
+		break;
+	default:
+		nv_fatal(device, "unknown Fermi chipset\n");
+		return -EINVAL;
+	}
+
+	return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/device/nve0.c b/drivers/gpu/drm/nouveau/core/subdev/device/nve0.c
new file mode 100644
index 000000000000..4a280b7ab853
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/device/nve0.c
@@ -0,0 +1,109 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <subdev/device.h>
+#include <subdev/bios.h>
+#include <subdev/gpio.h>
+#include <subdev/i2c.h>
+#include <subdev/clock.h>
+#include <subdev/therm.h>
+#include <subdev/mxm.h>
+#include <subdev/devinit.h>
+#include <subdev/mc.h>
+#include <subdev/timer.h>
+#include <subdev/fb.h>
+#include <subdev/ltcg.h>
+#include <subdev/ibus.h>
+#include <subdev/instmem.h>
+#include <subdev/vm.h>
+#include <subdev/bar.h>
+
+#include <engine/dmaobj.h>
+#include <engine/fifo.h>
+#include <engine/software.h>
+#include <engine/graph.h>
+#include <engine/disp.h>
+#include <engine/copy.h>
+
+int
+nve0_identify(struct nouveau_device *device)
+{
+	switch (device->chipset) {
+	case 0xe4:
+		device->cname = "GK104";
+		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nouveau_bios_oclass;
+		device->oclass[NVDEV_SUBDEV_GPIO   ] = &nvd0_gpio_oclass;
+		device->oclass[NVDEV_SUBDEV_I2C    ] = &nouveau_i2c_oclass;
+		device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nvc0_clock_oclass;
+		device->oclass[NVDEV_SUBDEV_THERM  ] = &nv50_therm_oclass;
+		device->oclass[NVDEV_SUBDEV_MXM    ] = &nv50_mxm_oclass;
+		device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
+		device->oclass[NVDEV_SUBDEV_MC     ] = &nvc0_mc_oclass;
+		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
+		device->oclass[NVDEV_SUBDEV_FB     ] = &nvc0_fb_oclass;
+		device->oclass[NVDEV_SUBDEV_LTCG   ] = &nvc0_ltcg_oclass;
+		device->oclass[NVDEV_SUBDEV_IBUS   ] = &nve0_ibus_oclass;
+		device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
+		device->oclass[NVDEV_SUBDEV_VM     ] = &nvc0_vmmgr_oclass;
+		device->oclass[NVDEV_SUBDEV_BAR    ] = &nvc0_bar_oclass;
+		device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvc0_dmaeng_oclass;
+		device->oclass[NVDEV_ENGINE_FIFO   ] = &nve0_fifo_oclass;
+		device->oclass[NVDEV_ENGINE_SW     ] = &nvc0_software_oclass;
+		device->oclass[NVDEV_ENGINE_GR     ] = &nve0_graph_oclass;
+		device->oclass[NVDEV_ENGINE_DISP   ] = &nvd0_disp_oclass;
+		device->oclass[NVDEV_ENGINE_COPY0  ] = &nve0_copy0_oclass;
+		device->oclass[NVDEV_ENGINE_COPY1  ] = &nve0_copy1_oclass;
+		break;
+	case 0xe7:
+		device->cname = "GK107";
+		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nouveau_bios_oclass;
+		device->oclass[NVDEV_SUBDEV_GPIO   ] = &nvd0_gpio_oclass;
+		device->oclass[NVDEV_SUBDEV_I2C    ] = &nouveau_i2c_oclass;
+		device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nvc0_clock_oclass;
+		device->oclass[NVDEV_SUBDEV_THERM  ] = &nv50_therm_oclass;
+		device->oclass[NVDEV_SUBDEV_MXM    ] = &nv50_mxm_oclass;
+		device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
+		device->oclass[NVDEV_SUBDEV_MC     ] = &nvc0_mc_oclass;
+		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
+		device->oclass[NVDEV_SUBDEV_FB     ] = &nvc0_fb_oclass;
+		device->oclass[NVDEV_SUBDEV_LTCG   ] = &nvc0_ltcg_oclass;
+		device->oclass[NVDEV_SUBDEV_IBUS   ] = &nve0_ibus_oclass;
+		device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
+		device->oclass[NVDEV_SUBDEV_VM     ] = &nvc0_vmmgr_oclass;
+		device->oclass[NVDEV_SUBDEV_BAR    ] = &nvc0_bar_oclass;
+		device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvc0_dmaeng_oclass;
+		device->oclass[NVDEV_ENGINE_FIFO   ] = &nve0_fifo_oclass;
+		device->oclass[NVDEV_ENGINE_SW     ] = &nvc0_software_oclass;
+		device->oclass[NVDEV_ENGINE_GR     ] = &nve0_graph_oclass;
+		device->oclass[NVDEV_ENGINE_DISP   ] = &nvd0_disp_oclass;
+		device->oclass[NVDEV_ENGINE_COPY0  ] = &nve0_copy0_oclass;
+		device->oclass[NVDEV_ENGINE_COPY1  ] = &nve0_copy1_oclass;
+		break;
+	default:
+		nv_fatal(device, "unknown Kepler chipset\n");
+		return -EINVAL;
+	}
+
+	return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nv98_ppp.c b/drivers/gpu/drm/nouveau/core/subdev/devinit/base.c
index 1847963e27f3..5a07a39c1735 100644
--- a/drivers/gpu/drm/nouveau/nv98_ppp.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/devinit/base.c
@@ -1,5 +1,5 @@
 /*
- * Copyright 2011 Red Hat Inc.
+ * Copyright 2012 Red Hat Inc.
  *
  * Permission is hereby granted, free of charge, to any person obtaining a
  * copy of this software and associated documentation files (the "Software"),
@@ -22,57 +22,48 @@
  * Authors: Ben Skeggs
  */
 
-#include <drm/drmP.h>
-#include "nouveau_drv.h"
-#include "nouveau_util.h"
-#include "nouveau_vm.h"
-#include "nouveau_ramht.h"
+#include <core/option.h>
 
-struct nv98_ppp_engine {
-	struct nouveau_exec_engine base;
-};
+#include <subdev/devinit.h>
+#include <subdev/bios.h>
+#include <subdev/bios/init.h>
 
-static int
-nv98_ppp_fini(struct drm_device *dev, int engine, bool suspend)
+int
+nouveau_devinit_init(struct nouveau_devinit *devinit)
 {
-	if (!(nv_rd32(dev, 0x000200) & 0x00000002))
-		return 0;
+	int ret = nouveau_subdev_init(&devinit->base);
+	if (ret)
+		return ret;
 
-	nv_mask(dev, 0x000200, 0x00000002, 0x00000000);
-	return 0;
+	return nvbios_init(&devinit->base, devinit->post);
 }
 
-static int
-nv98_ppp_init(struct drm_device *dev, int engine)
-{
-	nv_mask(dev, 0x000200, 0x00000002, 0x00000000);
-	nv_mask(dev, 0x000200, 0x00000002, 0x00000002);
-	return 0;
-}
-
-static void
-nv98_ppp_destroy(struct drm_device *dev, int engine)
+int
+nouveau_devinit_fini(struct nouveau_devinit *devinit, bool suspend)
 {
-	struct nv98_ppp_engine *pppp = nv_engine(dev, engine);
+	/* force full reinit on resume */
+	if (suspend)
+		devinit->post = true;
 
-	NVOBJ_ENGINE_DEL(dev, PPP);
-
-	kfree(pppp);
+	return nouveau_subdev_fini(&devinit->base, suspend);
 }
 
 int
-nv98_ppp_create(struct drm_device *dev)
+nouveau_devinit_create_(struct nouveau_object *parent,
+			struct nouveau_object *engine,
+			struct nouveau_oclass *oclass,
+			int size, void **pobject)
 {
-	struct nv98_ppp_engine *pppp;
-
-	pppp = kzalloc(sizeof(*pppp), GFP_KERNEL);
-	if (!pppp)
-		return -ENOMEM;
+	struct nouveau_device *device = nv_device(parent);
+	struct nouveau_devinit *devinit;
+	int ret;
 
-	pppp->base.destroy = nv98_ppp_destroy;
-	pppp->base.init = nv98_ppp_init;
-	pppp->base.fini = nv98_ppp_fini;
+	ret = nouveau_subdev_create_(parent, engine, oclass, 0, "DEVINIT",
+				     "init", size, pobject);
+	devinit = *pobject;
+	if (ret)
+		return ret;
 
-	NVOBJ_ENGINE_ADD(dev, PPP, &pppp->base);
+	devinit->post = nouveau_boolopt(device->cfgopt, "NvForcePost", false);
 	return 0;
 }
diff --git a/drivers/gpu/drm/nouveau/core/subdev/devinit/fbmem.h b/drivers/gpu/drm/nouveau/core/subdev/devinit/fbmem.h
new file mode 100644
index 000000000000..6b56a0f4cb40
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/devinit/fbmem.h
@@ -0,0 +1,98 @@
+/*
+ * Copyright (C) 2010 Francisco Jerez.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#define NV04_PFB_BOOT_0						0x00100000
+#	define NV04_PFB_BOOT_0_RAM_AMOUNT			0x00000003
+#	define NV04_PFB_BOOT_0_RAM_AMOUNT_32MB			0x00000000
+#	define NV04_PFB_BOOT_0_RAM_AMOUNT_4MB			0x00000001
+#	define NV04_PFB_BOOT_0_RAM_AMOUNT_8MB			0x00000002
+#	define NV04_PFB_BOOT_0_RAM_AMOUNT_16MB			0x00000003
+#	define NV04_PFB_BOOT_0_RAM_WIDTH_128			0x00000004
+#	define NV04_PFB_BOOT_0_RAM_TYPE				0x00000028
+#	define NV04_PFB_BOOT_0_RAM_TYPE_SGRAM_8MBIT		0x00000000
+#	define NV04_PFB_BOOT_0_RAM_TYPE_SGRAM_16MBIT		0x00000008
+#	define NV04_PFB_BOOT_0_RAM_TYPE_SGRAM_16MBIT_4BANK	0x00000010
+#	define NV04_PFB_BOOT_0_RAM_TYPE_SDRAM_16MBIT		0x00000018
+#	define NV04_PFB_BOOT_0_RAM_TYPE_SDRAM_64MBIT		0x00000020
+#	define NV04_PFB_BOOT_0_RAM_TYPE_SDRAM_64MBITX16		0x00000028
+#	define NV04_PFB_BOOT_0_UMA_ENABLE			0x00000100
+#	define NV04_PFB_BOOT_0_UMA_SIZE				0x0000f000
+#define NV04_PFB_DEBUG_0					0x00100080
+#	define NV04_PFB_DEBUG_0_PAGE_MODE			0x00000001
+#	define NV04_PFB_DEBUG_0_REFRESH_OFF			0x00000010
+#	define NV04_PFB_DEBUG_0_REFRESH_COUNTX64		0x00003f00
+#	define NV04_PFB_DEBUG_0_REFRESH_SLOW_CLK		0x00004000
+#	define NV04_PFB_DEBUG_0_SAFE_MODE			0x00008000
+#	define NV04_PFB_DEBUG_0_ALOM_ENABLE			0x00010000
+#	define NV04_PFB_DEBUG_0_CASOE				0x00100000
+#	define NV04_PFB_DEBUG_0_CKE_INVERT			0x10000000
+#	define NV04_PFB_DEBUG_0_REFINC				0x20000000
+#	define NV04_PFB_DEBUG_0_SAVE_POWER_OFF			0x40000000
+#define NV04_PFB_CFG0						0x00100200
+#	define NV04_PFB_CFG0_SCRAMBLE				0x20000000
+#define NV04_PFB_CFG1						0x00100204
+#define NV04_PFB_SCRAMBLE(i)                         (0x00100400 + 4 * (i))
+
+#define NV10_PFB_REFCTRL					0x00100210
+#	define NV10_PFB_REFCTRL_VALID_1				(1 << 31)
+
+static inline struct io_mapping *
+fbmem_init(struct pci_dev *pdev)
+{
+	return io_mapping_create_wc(pci_resource_start(pdev, 1),
+				    pci_resource_len(pdev, 1));
+}
+
+static inline void
+fbmem_fini(struct io_mapping *fb)
+{
+	io_mapping_free(fb);
+}
+
+static inline u32
+fbmem_peek(struct io_mapping *fb, u32 off)
+{
+	u8 __iomem *p = io_mapping_map_atomic_wc(fb, off & PAGE_MASK);
+	u32 val = ioread32(p + (off & ~PAGE_MASK));
+	io_mapping_unmap_atomic(p);
+	return val;
+}
+
+static inline void
+fbmem_poke(struct io_mapping *fb, u32 off, u32 val)
+{
+	u8 __iomem *p = io_mapping_map_atomic_wc(fb, off & PAGE_MASK);
+	iowrite32(val, p + (off & ~PAGE_MASK));
+	wmb();
+	io_mapping_unmap_atomic(p);
+}
+
+static inline bool
+fbmem_readback(struct io_mapping *fb, u32 off, u32 val)
+{
+	fbmem_poke(fb, off, val);
+	return val == fbmem_peek(fb, off);
+}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/devinit/nv04.c b/drivers/gpu/drm/nouveau/core/subdev/devinit/nv04.c
new file mode 100644
index 000000000000..7a72d9394340
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/devinit/nv04.c
@@ -0,0 +1,189 @@
+/*
+ * Copyright (C) 2010 Francisco Jerez.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include <subdev/devinit.h>
+#include <subdev/vga.h>
+
+#include "fbmem.h"
+
+struct nv04_devinit_priv {
+	struct nouveau_devinit base;
+	int owner;
+};
+
+static void
+nv04_devinit_meminit(struct nouveau_devinit *devinit)
+{
+	struct nv04_devinit_priv *priv = (void *)devinit;
+	u32 patt = 0xdeadbeef;
+	struct io_mapping *fb;
+	int i;
+
+	/* Map the framebuffer aperture */
+	fb = fbmem_init(nv_device(priv)->pdev);
+	if (!fb) {
+		nv_error(priv, "failed to map fb\n");
+		return;
+	}
+
+	/* Sequencer and refresh off */
+	nv_wrvgas(priv, 0, 1, nv_rdvgas(priv, 0, 1) | 0x20);
+	nv_mask(priv, NV04_PFB_DEBUG_0, 0, NV04_PFB_DEBUG_0_REFRESH_OFF);
+
+	nv_mask(priv, NV04_PFB_BOOT_0, ~0,
+		      NV04_PFB_BOOT_0_RAM_AMOUNT_16MB |
+		      NV04_PFB_BOOT_0_RAM_WIDTH_128 |
+		      NV04_PFB_BOOT_0_RAM_TYPE_SGRAM_16MBIT);
+
+	for (i = 0; i < 4; i++)
+		fbmem_poke(fb, 4 * i, patt);
+
+	fbmem_poke(fb, 0x400000, patt + 1);
+
+	if (fbmem_peek(fb, 0) == patt + 1) {
+		nv_mask(priv, NV04_PFB_BOOT_0,
+			      NV04_PFB_BOOT_0_RAM_TYPE,
+			      NV04_PFB_BOOT_0_RAM_TYPE_SDRAM_16MBIT);
+		nv_mask(priv, NV04_PFB_DEBUG_0,
+			      NV04_PFB_DEBUG_0_REFRESH_OFF, 0);
+
+		for (i = 0; i < 4; i++)
+			fbmem_poke(fb, 4 * i, patt);
+
+		if ((fbmem_peek(fb, 0xc) & 0xffff) != (patt & 0xffff))
+			nv_mask(priv, NV04_PFB_BOOT_0,
+				      NV04_PFB_BOOT_0_RAM_WIDTH_128 |
+				      NV04_PFB_BOOT_0_RAM_AMOUNT,
+				      NV04_PFB_BOOT_0_RAM_AMOUNT_8MB);
+	} else
+	if ((fbmem_peek(fb, 0xc) & 0xffff0000) != (patt & 0xffff0000)) {
+		nv_mask(priv, NV04_PFB_BOOT_0,
+			      NV04_PFB_BOOT_0_RAM_WIDTH_128 |
+			      NV04_PFB_BOOT_0_RAM_AMOUNT,
+			      NV04_PFB_BOOT_0_RAM_AMOUNT_4MB);
+	} else
+	if (fbmem_peek(fb, 0) != patt) {
+		if (fbmem_readback(fb, 0x800000, patt))
+			nv_mask(priv, NV04_PFB_BOOT_0,
+				      NV04_PFB_BOOT_0_RAM_AMOUNT,
+				      NV04_PFB_BOOT_0_RAM_AMOUNT_8MB);
+		else
+			nv_mask(priv, NV04_PFB_BOOT_0,
+				      NV04_PFB_BOOT_0_RAM_AMOUNT,
+				      NV04_PFB_BOOT_0_RAM_AMOUNT_4MB);
+
+		nv_mask(priv, NV04_PFB_BOOT_0, NV04_PFB_BOOT_0_RAM_TYPE,
+			      NV04_PFB_BOOT_0_RAM_TYPE_SGRAM_8MBIT);
+	} else
+	if (!fbmem_readback(fb, 0x800000, patt)) {
+		nv_mask(priv, NV04_PFB_BOOT_0, NV04_PFB_BOOT_0_RAM_AMOUNT,
+			      NV04_PFB_BOOT_0_RAM_AMOUNT_8MB);
+
+	}
+
+	/* Refresh on, sequencer on */
+	nv_mask(priv, NV04_PFB_DEBUG_0, NV04_PFB_DEBUG_0_REFRESH_OFF, 0);
+	nv_wrvgas(priv, 0, 1, nv_rdvgas(priv, 0, 1) & ~0x20);
+	fbmem_fini(fb);
+}
+
+static int
+nv04_devinit_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+		  struct nouveau_oclass *oclass, void *data, u32 size,
+		  struct nouveau_object **pobject)
+{
+	struct nv04_devinit_priv *priv;
+	int ret;
+
+	ret = nouveau_devinit_create(parent, engine, oclass, &priv);
+	*pobject = nv_object(priv);
+	if (ret)
+		return ret;
+
+	priv->base.meminit = nv04_devinit_meminit;
+	priv->owner = -1;
+	return 0;
+}
+
+void
+nv04_devinit_dtor(struct nouveau_object *object)
+{
+	struct nv04_devinit_priv *priv = (void *)object;
+
+	/* restore vga owner saved at first init, and lock crtc regs  */
+	nv_wrvgaowner(priv, priv->owner);
+	nv_lockvgac(priv, true);
+
+	nouveau_devinit_destroy(&priv->base);
+}
+
+int
+nv04_devinit_init(struct nouveau_object *object)
+{
+	struct nv04_devinit_priv *priv = (void *)object;
+
+	if (!priv->base.post) {
+		u32 htotal = nv_rdvgac(priv, 0, 0x06);
+		htotal |= (nv_rdvgac(priv, 0, 0x07) & 0x01) << 8;
+		htotal |= (nv_rdvgac(priv, 0, 0x07) & 0x20) << 4;
+		htotal |= (nv_rdvgac(priv, 0, 0x25) & 0x01) << 10;
+		htotal |= (nv_rdvgac(priv, 0, 0x41) & 0x01) << 11;
+		if (!htotal) {
+			nv_info(priv, "adaptor not initialised\n");
+			priv->base.post = true;
+		}
+	}
+
+	return nouveau_devinit_init(&priv->base);
+}
+
+int
+nv04_devinit_fini(struct nouveau_object *object, bool suspend)
+{
+	struct nv04_devinit_priv *priv = (void *)object;
+
+	/* make i2c busses accessible */
+	nv_mask(priv, 0x000200, 0x00000001, 0x00000001);
+
+	/* unlock extended vga crtc regs, and unslave crtcs */
+	nv_lockvgac(priv, false);
+	if (priv->owner < 0)
+		priv->owner = nv_rdvgaowner(priv);
+	nv_wrvgaowner(priv, 0);
+
+	return nouveau_devinit_fini(&priv->base, suspend);
+}
+
+struct nouveau_oclass
+nv04_devinit_oclass = {
+	.handle = NV_SUBDEV(DEVINIT, 0x04),
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nv04_devinit_ctor,
+		.dtor = nv04_devinit_dtor,
+		.init = nv04_devinit_init,
+		.fini = nv04_devinit_fini,
+	},
+};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/devinit/nv05.c b/drivers/gpu/drm/nouveau/core/subdev/devinit/nv05.c
new file mode 100644
index 000000000000..191447d0d252
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/devinit/nv05.c
@@ -0,0 +1,159 @@
+/*
+ * Copyright (C) 2010 Francisco Jerez.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include <subdev/devinit.h>
+#include <subdev/bios.h>
+#include <subdev/bios/bmp.h>
+#include <subdev/vga.h>
+
+#include "fbmem.h"
+
+struct nv05_devinit_priv {
+	struct nouveau_devinit base;
+	u8 owner;
+};
+
+static void
+nv05_devinit_meminit(struct nouveau_devinit *devinit)
+{
+	static const u8 default_config_tab[][2] = {
+		{ 0x24, 0x00 },
+		{ 0x28, 0x00 },
+		{ 0x24, 0x01 },
+		{ 0x1f, 0x00 },
+		{ 0x0f, 0x00 },
+		{ 0x17, 0x00 },
+		{ 0x06, 0x00 },
+		{ 0x00, 0x00 }
+	};
+	struct nv05_devinit_priv *priv = (void *)devinit;
+	struct nouveau_bios *bios = nouveau_bios(priv);
+	struct io_mapping *fb;
+	u32 patt = 0xdeadbeef;
+	u16 data;
+	u8 strap, ramcfg[2];
+	int i, v;
+
+	/* Map the framebuffer aperture */
+	fb = fbmem_init(nv_device(priv)->pdev);
+	if (!fb) {
+		nv_error(priv, "failed to map fb\n");
+		return;
+	}
+
+	strap = (nv_rd32(priv, 0x101000) & 0x0000003c) >> 2;
+	if ((data = bmp_mem_init_table(bios))) {
+		ramcfg[0] = nv_ro08(bios, data + 2 * strap + 0);
+		ramcfg[1] = nv_ro08(bios, data + 2 * strap + 1);
+	} else {
+		ramcfg[0] = default_config_tab[strap][0];
+		ramcfg[1] = default_config_tab[strap][1];
+	}
+
+	/* Sequencer off */
+	nv_wrvgas(priv, 0, 1, nv_rdvgas(priv, 0, 1) | 0x20);
+
+	if (nv_rd32(priv, NV04_PFB_BOOT_0) & NV04_PFB_BOOT_0_UMA_ENABLE)
+		goto out;
+
+	nv_mask(priv, NV04_PFB_DEBUG_0, NV04_PFB_DEBUG_0_REFRESH_OFF, 0);
+
+	/* If present load the hardcoded scrambling table */
+	if (data) {
+		for (i = 0, data += 0x10; i < 8; i++, data += 4) {
+			u32 scramble = nv_ro32(bios, data);
+			nv_wr32(priv, NV04_PFB_SCRAMBLE(i), scramble);
+		}
+	}
+
+	/* Set memory type/width/length defaults depending on the straps */
+	nv_mask(priv, NV04_PFB_BOOT_0, 0x3f, ramcfg[0]);
+
+	if (ramcfg[1] & 0x80)
+		nv_mask(priv, NV04_PFB_CFG0, 0, NV04_PFB_CFG0_SCRAMBLE);
+
+	nv_mask(priv, NV04_PFB_CFG1, 0x700001, (ramcfg[1] & 1) << 20);
+	nv_mask(priv, NV04_PFB_CFG1, 0, 1);
+
+	/* Probe memory bus width */
+	for (i = 0; i < 4; i++)
+		fbmem_poke(fb, 4 * i, patt);
+
+	if (fbmem_peek(fb, 0xc) != patt)
+		nv_mask(priv, NV04_PFB_BOOT_0,
+			  NV04_PFB_BOOT_0_RAM_WIDTH_128, 0);
+
+	/* Probe memory length */
+	v = nv_rd32(priv, NV04_PFB_BOOT_0) & NV04_PFB_BOOT_0_RAM_AMOUNT;
+
+	if (v == NV04_PFB_BOOT_0_RAM_AMOUNT_32MB &&
+	    (!fbmem_readback(fb, 0x1000000, ++patt) ||
+	     !fbmem_readback(fb, 0, ++patt)))
+		nv_mask(priv, NV04_PFB_BOOT_0, NV04_PFB_BOOT_0_RAM_AMOUNT,
+			  NV04_PFB_BOOT_0_RAM_AMOUNT_16MB);
+
+	if (v == NV04_PFB_BOOT_0_RAM_AMOUNT_16MB &&
+	    !fbmem_readback(fb, 0x800000, ++patt))
+		nv_mask(priv, NV04_PFB_BOOT_0, NV04_PFB_BOOT_0_RAM_AMOUNT,
+			  NV04_PFB_BOOT_0_RAM_AMOUNT_8MB);
+
+	if (!fbmem_readback(fb, 0x400000, ++patt))
+		nv_mask(priv, NV04_PFB_BOOT_0, NV04_PFB_BOOT_0_RAM_AMOUNT,
+			  NV04_PFB_BOOT_0_RAM_AMOUNT_4MB);
+
+out:
+	/* Sequencer on */
+	nv_wrvgas(priv, 0, 1, nv_rdvgas(priv, 0, 1) & ~0x20);
+	fbmem_fini(fb);
+}
+
+static int
+nv05_devinit_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+		  struct nouveau_oclass *oclass, void *data, u32 size,
+		  struct nouveau_object **pobject)
+{
+	struct nv05_devinit_priv *priv;
+	int ret;
+
+	ret = nouveau_devinit_create(parent, engine, oclass, &priv);
+	*pobject = nv_object(priv);
+	if (ret)
+		return ret;
+
+	priv->base.meminit = nv05_devinit_meminit;
+	return 0;
+}
+
+struct nouveau_oclass
+nv05_devinit_oclass = {
+	.handle = NV_SUBDEV(DEVINIT, 0x05),
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nv05_devinit_ctor,
+		.dtor = nv04_devinit_dtor,
+		.init = nv04_devinit_init,
+		.fini = nv04_devinit_fini,
+	},
+};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/devinit/nv10.c b/drivers/gpu/drm/nouveau/core/subdev/devinit/nv10.c
new file mode 100644
index 000000000000..eb76ffab6b0c
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/devinit/nv10.c
@@ -0,0 +1,124 @@
+/*
+ * Copyright (C) 2010 Francisco Jerez.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include <subdev/devinit.h>
+#include <subdev/vga.h>
+
+#include "fbmem.h"
+
+struct nv10_devinit_priv {
+	struct nouveau_devinit base;
+	u8 owner;
+};
+
+static void
+nv10_devinit_meminit(struct nouveau_devinit *devinit)
+{
+	struct nv10_devinit_priv *priv = (void *)devinit;
+	const int mem_width[] = { 0x10, 0x00, 0x20 };
+	const int mem_width_count = nv_device(priv)->chipset >= 0x17 ? 3 : 2;
+	uint32_t patt = 0xdeadbeef;
+	struct io_mapping *fb;
+	int i, j, k;
+
+	/* Map the framebuffer aperture */
+	fb = fbmem_init(nv_device(priv)->pdev);
+	if (!fb) {
+		nv_error(priv, "failed to map fb\n");
+		return;
+	}
+
+	nv_wr32(priv, NV10_PFB_REFCTRL, NV10_PFB_REFCTRL_VALID_1);
+
+	/* Probe memory bus width */
+	for (i = 0; i < mem_width_count; i++) {
+		nv_mask(priv, NV04_PFB_CFG0, 0x30, mem_width[i]);
+
+		for (j = 0; j < 4; j++) {
+			for (k = 0; k < 4; k++)
+				fbmem_poke(fb, 0x1c, 0);
+
+			fbmem_poke(fb, 0x1c, patt);
+			fbmem_poke(fb, 0x3c, 0);
+
+			if (fbmem_peek(fb, 0x1c) == patt)
+				goto mem_width_found;
+		}
+	}
+
+mem_width_found:
+	patt <<= 1;
+
+	/* Probe amount of installed memory */
+	for (i = 0; i < 4; i++) {
+		int off = nv_rd32(priv, 0x10020c) - 0x100000;
+
+		fbmem_poke(fb, off, patt);
+		fbmem_poke(fb, 0, 0);
+
+		fbmem_peek(fb, 0);
+		fbmem_peek(fb, 0);
+		fbmem_peek(fb, 0);
+		fbmem_peek(fb, 0);
+
+		if (fbmem_peek(fb, off) == patt)
+			goto amount_found;
+	}
+
+	/* IC missing - disable the upper half memory space. */
+	nv_mask(priv, NV04_PFB_CFG0, 0x1000, 0);
+
+amount_found:
+	fbmem_fini(fb);
+}
+
+static int
+nv10_devinit_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+		  struct nouveau_oclass *oclass, void *data, u32 size,
+		  struct nouveau_object **pobject)
+{
+	struct nv10_devinit_priv *priv;
+	int ret;
+
+	ret = nouveau_devinit_create(parent, engine, oclass, &priv);
+	*pobject = nv_object(priv);
+	if (ret)
+		return ret;
+
+	priv->base.meminit = nv10_devinit_meminit;
+	return 0;
+}
+
+struct nouveau_oclass
+nv10_devinit_oclass = {
+	.handle = NV_SUBDEV(DEVINIT, 0x10),
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nv10_devinit_ctor,
+		.dtor = nv04_devinit_dtor,
+		.init = nv04_devinit_init,
+		.fini = nv04_devinit_fini,
+	},
+};
diff --git a/drivers/gpu/drm/nouveau/nouveau_i2c.h b/drivers/gpu/drm/nouveau/core/subdev/devinit/nv1a.c
index 326bf5e2035a..5b2ba630d913 100644
--- a/drivers/gpu/drm/nouveau/nouveau_i2c.h
+++ b/drivers/gpu/drm/nouveau/core/subdev/devinit/nv1a.c
@@ -1,5 +1,5 @@
 /*
- * Copyright 2009 Red Hat Inc.
+ * Copyright 2012 Red Hat Inc.
  *
  * Permission is hereby granted, free of charge, to any person obtaining a
  * copy of this software and associated documentation files (the "Software"),
@@ -18,42 +18,41 @@
  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
  */
 
-#ifndef __NOUVEAU_I2C_H__
-#define __NOUVEAU_I2C_H__
-
-#include <linux/i2c.h>
-#include <linux/i2c-algo-bit.h>
-#include <drm/drm_dp_helper.h>
-
-#define NV_I2C_PORT(n)    (0x00 + (n))
-#define NV_I2C_PORT_NUM    0x10
-#define NV_I2C_DEFAULT(n) (0x80 + (n))
+#include <subdev/devinit.h>
+#include <subdev/vga.h>
 
-struct nouveau_i2c_chan {
-	struct i2c_adapter adapter;
-	struct drm_device *dev;
-	struct i2c_algo_bit_data bit;
-	struct list_head head;
-	u8  index;
-	u8  type;
-	u32 dcb;
-	u32 drive;
-	u32 sense;
-	u32 state;
+struct nv1a_devinit_priv {
+	struct nouveau_devinit base;
+	u8 owner;
 };
 
-int  nouveau_i2c_init(struct drm_device *);
-void nouveau_i2c_fini(struct drm_device *);
-struct nouveau_i2c_chan *nouveau_i2c_find(struct drm_device *, u8 index);
-bool nouveau_probe_i2c_addr(struct nouveau_i2c_chan *i2c, int addr);
-int nouveau_i2c_identify(struct drm_device *dev, const char *what,
-			 struct i2c_board_info *info,
-			 bool (*match)(struct nouveau_i2c_chan *,
-				       struct i2c_board_info *),
-			 int index);
+static int
+nv1a_devinit_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+		  struct nouveau_oclass *oclass, void *data, u32 size,
+		  struct nouveau_object **pobject)
+{
+	struct nv1a_devinit_priv *priv;
+	int ret;
 
-extern const struct i2c_algorithm nouveau_dp_i2c_algo;
+	ret = nouveau_devinit_create(parent, engine, oclass, &priv);
+	*pobject = nv_object(priv);
+	if (ret)
+		return ret;
 
-#endif /* __NOUVEAU_I2C_H__ */
+	return 0;
+}
+
+struct nouveau_oclass
+nv1a_devinit_oclass = {
+	.handle = NV_SUBDEV(DEVINIT, 0x1a),
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nv1a_devinit_ctor,
+		.dtor = nv04_devinit_dtor,
+		.init = nv04_devinit_init,
+		.fini = nv04_devinit_fini,
+	},
+};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/devinit/nv20.c b/drivers/gpu/drm/nouveau/core/subdev/devinit/nv20.c
new file mode 100644
index 000000000000..eb32e99005e4
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/devinit/nv20.c
@@ -0,0 +1,96 @@
+/*
+ * Copyright (C) 2010 Francisco Jerez.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include <subdev/devinit.h>
+#include <subdev/vga.h>
+
+#include "fbmem.h"
+
+struct nv20_devinit_priv {
+	struct nouveau_devinit base;
+	u8 owner;
+};
+
+static void
+nv20_devinit_meminit(struct nouveau_devinit *devinit)
+{
+	struct nv20_devinit_priv *priv = (void *)devinit;
+	struct nouveau_device *device = nv_device(priv);
+	uint32_t mask = (device->chipset >= 0x25 ? 0x300 : 0x900);
+	uint32_t amount, off;
+	struct io_mapping *fb;
+
+	/* Map the framebuffer aperture */
+	fb = fbmem_init(nv_device(priv)->pdev);
+	if (!fb) {
+		nv_error(priv, "failed to map fb\n");
+		return;
+	}
+
+	nv_wr32(priv, NV10_PFB_REFCTRL, NV10_PFB_REFCTRL_VALID_1);
+
+	/* Allow full addressing */
+	nv_mask(priv, NV04_PFB_CFG0, 0, mask);
+
+	amount = nv_rd32(priv, 0x10020c);
+	for (off = amount; off > 0x2000000; off -= 0x2000000)
+		fbmem_poke(fb, off - 4, off);
+
+	amount = nv_rd32(priv, 0x10020c);
+	if (amount != fbmem_peek(fb, amount - 4))
+		/* IC missing - disable the upper half memory space. */
+		nv_mask(priv, NV04_PFB_CFG0, mask, 0);
+
+	fbmem_fini(fb);
+}
+
+static int
+nv20_devinit_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+		  struct nouveau_oclass *oclass, void *data, u32 size,
+		  struct nouveau_object **pobject)
+{
+	struct nv20_devinit_priv *priv;
+	int ret;
+
+	ret = nouveau_devinit_create(parent, engine, oclass, &priv);
+	*pobject = nv_object(priv);
+	if (ret)
+		return ret;
+
+	priv->base.meminit = nv20_devinit_meminit;
+	return 0;
+}
+
+struct nouveau_oclass
+nv20_devinit_oclass = {
+	.handle = NV_SUBDEV(DEVINIT, 0x20),
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nv20_devinit_ctor,
+		.dtor = nv04_devinit_dtor,
+		.init = nv04_devinit_init,
+		.fini = nv04_devinit_fini,
+	},
+};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/devinit/nv50.c b/drivers/gpu/drm/nouveau/core/subdev/devinit/nv50.c
new file mode 100644
index 000000000000..61becfa732e9
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/devinit/nv50.c
@@ -0,0 +1,87 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <subdev/devinit.h>
+#include <subdev/vga.h>
+
+struct nv50_devinit_priv {
+	struct nouveau_devinit base;
+};
+
+static int
+nv50_devinit_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+		  struct nouveau_oclass *oclass, void *data, u32 size,
+		  struct nouveau_object **pobject)
+{
+	struct nv50_devinit_priv *priv;
+	int ret;
+
+	ret = nouveau_devinit_create(parent, engine, oclass, &priv);
+	*pobject = nv_object(priv);
+	if (ret)
+		return ret;
+
+	return 0;
+}
+
+static void
+nv50_devinit_dtor(struct nouveau_object *object)
+{
+	struct nv50_devinit_priv *priv = (void *)object;
+	nouveau_devinit_destroy(&priv->base);
+}
+
+static int
+nv50_devinit_init(struct nouveau_object *object)
+{
+	struct nv50_devinit_priv *priv = (void *)object;
+
+	if (!priv->base.post) {
+		if (!nv_rdvgac(priv, 0, 0x00) &&
+		    !nv_rdvgac(priv, 0, 0x1a)) {
+			nv_info(priv, "adaptor not initialised\n");
+			priv->base.post = true;
+		}
+	}
+
+	return nouveau_devinit_init(&priv->base);
+}
+
+static int
+nv50_devinit_fini(struct nouveau_object *object, bool suspend)
+{
+	struct nv50_devinit_priv *priv = (void *)object;
+	return nouveau_devinit_fini(&priv->base, suspend);
+}
+
+struct nouveau_oclass
+nv50_devinit_oclass = {
+	.handle = NV_SUBDEV(DEVINIT, 0x50),
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nv50_devinit_ctor,
+		.dtor = nv50_devinit_dtor,
+		.init = nv50_devinit_init,
+		.fini = nv50_devinit_fini,
+	},
+};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/base.c b/drivers/gpu/drm/nouveau/core/subdev/fb/base.c
new file mode 100644
index 000000000000..f0086de8af31
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/base.c
@@ -0,0 +1,130 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include "subdev/fb.h"
+#include "subdev/bios.h"
+#include "subdev/bios/bit.h"
+
+int
+nouveau_fb_bios_memtype(struct nouveau_bios *bios)
+{
+	struct bit_entry M;
+	u8 ramcfg;
+
+	ramcfg = (nv_rd32(bios, 0x101000) & 0x0000003c) >> 2;
+	if (!bit_entry(bios, 'M', &M) && M.version == 2 && M.length >= 5) {
+		u16 table   = nv_ro16(bios, M.offset + 3);
+		u8  version = nv_ro08(bios, table + 0);
+		u8  header  = nv_ro08(bios, table + 1);
+		u8  record  = nv_ro08(bios, table + 2);
+		u8  entries = nv_ro08(bios, table + 3);
+		if (table && version == 0x10 && ramcfg < entries) {
+			u16 entry = table + header + (ramcfg * record);
+			switch (nv_ro08(bios, entry) & 0x0f) {
+			case 0: return NV_MEM_TYPE_DDR2;
+			case 1: return NV_MEM_TYPE_DDR3;
+			case 2: return NV_MEM_TYPE_GDDR3;
+			case 3: return NV_MEM_TYPE_GDDR5;
+			default:
+				break;
+			}
+
+		}
+	}
+
+	return NV_MEM_TYPE_UNKNOWN;
+}
+
+int
+nouveau_fb_init(struct nouveau_fb *pfb)
+{
+	int ret, i;
+
+	ret = nouveau_subdev_init(&pfb->base);
+	if (ret)
+		return ret;
+
+	for (i = 0; i < pfb->tile.regions; i++)
+		pfb->tile.prog(pfb, i, &pfb->tile.region[i]);
+
+	return 0;
+}
+
+int
+_nouveau_fb_init(struct nouveau_object *object)
+{
+	struct nouveau_fb *pfb = (void *)object;
+	return nouveau_fb_init(pfb);
+}
+
+void
+nouveau_fb_destroy(struct nouveau_fb *pfb)
+{
+	int i;
+
+	for (i = 0; i < pfb->tile.regions; i++)
+		pfb->tile.fini(pfb, i, &pfb->tile.region[i]);
+
+	if (pfb->tags.block_size)
+		nouveau_mm_fini(&pfb->tags);
+
+	if (pfb->vram.block_size)
+		nouveau_mm_fini(&pfb->vram);
+
+	nouveau_subdev_destroy(&pfb->base);
+}
+
+void
+_nouveau_fb_dtor(struct nouveau_object *object)
+{
+	struct nouveau_fb *pfb = (void *)object;
+	nouveau_fb_destroy(pfb);
+}
+
+int
+nouveau_fb_created(struct nouveau_fb *pfb)
+{
+	static const char *name[] = {
+		[NV_MEM_TYPE_UNKNOWN] = "unknown",
+		[NV_MEM_TYPE_STOLEN ] = "stolen system memory",
+		[NV_MEM_TYPE_SGRAM  ] = "SGRAM",
+		[NV_MEM_TYPE_SDRAM  ] = "SDRAM",
+		[NV_MEM_TYPE_DDR1   ] = "DDR1",
+		[NV_MEM_TYPE_DDR2   ] = "DDR2",
+		[NV_MEM_TYPE_DDR3   ] = "DDR3",
+		[NV_MEM_TYPE_GDDR2  ] = "GDDR2",
+		[NV_MEM_TYPE_GDDR3  ] = "GDDR3",
+		[NV_MEM_TYPE_GDDR4  ] = "GDDR4",
+		[NV_MEM_TYPE_GDDR5  ] = "GDDR5",
+	};
+
+	if (pfb->ram.size == 0) {
+		nv_fatal(pfb, "no vram detected!!\n");
+		return -ERANGE;
+	}
+
+	nv_info(pfb, "RAM type: %s\n", name[pfb->ram.type]);
+	nv_info(pfb, "RAM size: %d MiB\n", (int)(pfb->ram.size >> 20));
+	return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nv04.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nv04.c
new file mode 100644
index 000000000000..eb06836b69f7
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nv04.c
@@ -0,0 +1,130 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <subdev/fb.h>
+
+#define NV04_PFB_BOOT_0						0x00100000
+#	define NV04_PFB_BOOT_0_RAM_AMOUNT			0x00000003
+#	define NV04_PFB_BOOT_0_RAM_AMOUNT_32MB			0x00000000
+#	define NV04_PFB_BOOT_0_RAM_AMOUNT_4MB			0x00000001
+#	define NV04_PFB_BOOT_0_RAM_AMOUNT_8MB			0x00000002
+#	define NV04_PFB_BOOT_0_RAM_AMOUNT_16MB			0x00000003
+#	define NV04_PFB_BOOT_0_RAM_WIDTH_128			0x00000004
+#	define NV04_PFB_BOOT_0_RAM_TYPE				0x00000028
+#	define NV04_PFB_BOOT_0_RAM_TYPE_SGRAM_8MBIT		0x00000000
+#	define NV04_PFB_BOOT_0_RAM_TYPE_SGRAM_16MBIT		0x00000008
+#	define NV04_PFB_BOOT_0_RAM_TYPE_SGRAM_16MBIT_4BANK	0x00000010
+#	define NV04_PFB_BOOT_0_RAM_TYPE_SDRAM_16MBIT		0x00000018
+#	define NV04_PFB_BOOT_0_RAM_TYPE_SDRAM_64MBIT		0x00000020
+#	define NV04_PFB_BOOT_0_RAM_TYPE_SDRAM_64MBITX16		0x00000028
+#	define NV04_PFB_BOOT_0_UMA_ENABLE			0x00000100
+#	define NV04_PFB_BOOT_0_UMA_SIZE				0x0000f000
+#define NV04_PFB_CFG0						0x00100200
+
+struct nv04_fb_priv {
+	struct nouveau_fb base;
+};
+
+bool
+nv04_fb_memtype_valid(struct nouveau_fb *pfb, u32 tile_flags)
+{
+	if (!(tile_flags & 0xff00))
+		return true;
+
+	return false;
+}
+
+static int
+nv04_fb_init(struct nouveau_object *object)
+{
+	struct nv04_fb_priv *priv = (void *)object;
+	int ret;
+
+	ret = nouveau_fb_init(&priv->base);
+	if (ret)
+		return ret;
+
+	/* This is what the DDX did for NV_ARCH_04, but a mmio-trace shows
+	 * nvidia reading PFB_CFG_0, then writing back its original value.
+	 * (which was 0x701114 in this case)
+	 */
+	nv_wr32(priv, NV04_PFB_CFG0, 0x1114);
+	return 0;
+}
+
+static int
+nv04_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+	     struct nouveau_oclass *oclass, void *data, u32 size,
+	     struct nouveau_object **pobject)
+{
+	struct nv04_fb_priv *priv;
+	u32 boot0;
+	int ret;
+
+	ret = nouveau_fb_create(parent, engine, oclass, &priv);
+	*pobject = nv_object(priv);
+	if (ret)
+		return ret;
+
+	boot0 = nv_rd32(priv, NV04_PFB_BOOT_0);
+	if (boot0 & 0x00000100) {
+		priv->base.ram.size  = ((boot0 >> 12) & 0xf) * 2 + 2;
+		priv->base.ram.size *= 1024 * 1024;
+	} else {
+		switch (boot0 & NV04_PFB_BOOT_0_RAM_AMOUNT) {
+		case NV04_PFB_BOOT_0_RAM_AMOUNT_32MB:
+			priv->base.ram.size = 32 * 1024 * 1024;
+			break;
+		case NV04_PFB_BOOT_0_RAM_AMOUNT_16MB:
+			priv->base.ram.size = 16 * 1024 * 1024;
+			break;
+		case NV04_PFB_BOOT_0_RAM_AMOUNT_8MB:
+			priv->base.ram.size = 8 * 1024 * 1024;
+			break;
+		case NV04_PFB_BOOT_0_RAM_AMOUNT_4MB:
+			priv->base.ram.size = 4 * 1024 * 1024;
+			break;
+		}
+	}
+
+	if ((boot0 & 0x00000038) <= 0x10)
+		priv->base.ram.type = NV_MEM_TYPE_SGRAM;
+	else
+		priv->base.ram.type = NV_MEM_TYPE_SDRAM;
+
+
+	priv->base.memtype_valid = nv04_fb_memtype_valid;
+	return nouveau_fb_created(&priv->base);
+}
+
+struct nouveau_oclass
+nv04_fb_oclass = {
+	.handle = NV_SUBDEV(FB, 0x04),
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nv04_fb_ctor,
+		.dtor = _nouveau_fb_dtor,
+		.init = nv04_fb_init,
+		.fini = _nouveau_fb_fini,
+	},
+};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nv10.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nv10.c
new file mode 100644
index 000000000000..f037a422d2f4
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nv10.c
@@ -0,0 +1,120 @@
+/*
+ * Copyright (C) 2010 Francisco Jerez.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include <subdev/fb.h>
+
+struct nv10_fb_priv {
+	struct nouveau_fb base;
+};
+
+static void
+nv10_fb_tile_init(struct nouveau_fb *pfb, int i, u32 addr, u32 size, u32 pitch,
+		  u32 flags, struct nouveau_fb_tile *tile)
+{
+	tile->addr  = 0x80000000 | addr;
+	tile->limit = max(1u, addr + size) - 1;
+	tile->pitch = pitch;
+}
+
+static void
+nv10_fb_tile_fini(struct nouveau_fb *pfb, int i, struct nouveau_fb_tile *tile)
+{
+	tile->addr  = 0;
+	tile->limit = 0;
+	tile->pitch = 0;
+	tile->zcomp = 0;
+}
+
+void
+nv10_fb_tile_prog(struct nouveau_fb *pfb, int i, struct nouveau_fb_tile *tile)
+{
+	nv_wr32(pfb, 0x100244 + (i * 0x10), tile->limit);
+	nv_wr32(pfb, 0x100248 + (i * 0x10), tile->pitch);
+	nv_wr32(pfb, 0x100240 + (i * 0x10), tile->addr);
+}
+
+static int
+nv10_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+	     struct nouveau_oclass *oclass, void *data, u32 size,
+	     struct nouveau_object **pobject)
+{
+	struct nouveau_device *device = nv_device(parent);
+	struct nv10_fb_priv *priv;
+	int ret;
+
+	ret = nouveau_fb_create(parent, engine, oclass, &priv);
+	*pobject = nv_object(priv);
+	if (ret)
+		return ret;
+
+	if (device->chipset == 0x1a ||  device->chipset == 0x1f) {
+		struct pci_dev *bridge;
+		u32 mem, mib;
+
+		bridge = pci_get_bus_and_slot(0, PCI_DEVFN(0, 1));
+		if (!bridge) {
+			nv_fatal(device, "no bridge device\n");
+			return 0;
+		}
+
+		if (device->chipset == 0x1a) {
+			pci_read_config_dword(bridge, 0x7c, &mem);
+			mib = ((mem >> 6) & 31) + 1;
+		} else {
+			pci_read_config_dword(bridge, 0x84, &mem);
+			mib = ((mem >> 4) & 127) + 1;
+		}
+
+		priv->base.ram.type = NV_MEM_TYPE_STOLEN;
+		priv->base.ram.size = mib * 1024 * 1024;
+	} else {
+		u32 cfg0 = nv_rd32(priv, 0x100200);
+		if (cfg0 & 0x00000001)
+			priv->base.ram.type = NV_MEM_TYPE_DDR1;
+		else
+			priv->base.ram.type = NV_MEM_TYPE_SDRAM;
+
+		priv->base.ram.size = nv_rd32(priv, 0x10020c) & 0xff000000;
+	}
+
+	priv->base.memtype_valid = nv04_fb_memtype_valid;
+	priv->base.tile.regions = 8;
+	priv->base.tile.init = nv10_fb_tile_init;
+	priv->base.tile.fini = nv10_fb_tile_fini;
+	priv->base.tile.prog = nv10_fb_tile_prog;
+	return nouveau_fb_created(&priv->base);
+}
+
+struct nouveau_oclass
+nv10_fb_oclass = {
+	.handle = NV_SUBDEV(FB, 0x10),
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nv10_fb_ctor,
+		.dtor = _nouveau_fb_dtor,
+		.init = _nouveau_fb_init,
+		.fini = _nouveau_fb_fini,
+	},
+};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nv20.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nv20.c
new file mode 100644
index 000000000000..4b3578fcb7fb
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nv20.c
@@ -0,0 +1,136 @@
+/*
+ * Copyright (C) 2010 Francisco Jerez.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include <subdev/fb.h>
+
+struct nv20_fb_priv {
+	struct nouveau_fb base;
+};
+
+static void
+nv20_fb_tile_init(struct nouveau_fb *pfb, int i, u32 addr, u32 size, u32 pitch,
+		  u32 flags, struct nouveau_fb_tile *tile)
+{
+	struct nouveau_device *device = nv_device(pfb);
+	int bpp = (flags & 2) ? 32 : 16;
+
+	tile->addr  = 0x00000001 | addr;
+	tile->limit = max(1u, addr + size) - 1;
+	tile->pitch = pitch;
+
+	/* Allocate some of the on-die tag memory, used to store Z
+	 * compression meta-data (most likely just a bitmap determining
+	 * if a given tile is compressed or not).
+	 */
+	size /= 256;
+	if (flags & 4) {
+		if (!nouveau_mm_head(&pfb->tags, 1, size, size, 1, &tile->tag)) {
+			/* Enable Z compression */
+			tile->zcomp = tile->tag->offset;
+			if (device->chipset >= 0x25) {
+				if (bpp == 16)
+					tile->zcomp |= 0x00100000;
+				else
+					tile->zcomp |= 0x00200000;
+			} else {
+				tile->zcomp |= 0x80000000;
+				if (bpp != 16)
+					tile->zcomp |= 0x04000000;
+			}
+		}
+
+		tile->addr |= 2;
+	}
+}
+
+static void
+nv20_fb_tile_fini(struct nouveau_fb *pfb, int i, struct nouveau_fb_tile *tile)
+{
+	tile->addr  = 0;
+	tile->limit = 0;
+	tile->pitch = 0;
+	tile->zcomp = 0;
+	nouveau_mm_free(&pfb->tags, &tile->tag);
+}
+
+static void
+nv20_fb_tile_prog(struct nouveau_fb *pfb, int i, struct nouveau_fb_tile *tile)
+{
+	nv_wr32(pfb, 0x100244 + (i * 0x10), tile->limit);
+	nv_wr32(pfb, 0x100248 + (i * 0x10), tile->pitch);
+	nv_wr32(pfb, 0x100240 + (i * 0x10), tile->addr);
+	nv_wr32(pfb, 0x100300 + (i * 0x04), tile->zcomp);
+}
+
+static int
+nv20_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+	     struct nouveau_oclass *oclass, void *data, u32 size,
+	     struct nouveau_object **pobject)
+{
+	struct nouveau_device *device = nv_device(parent);
+	struct nv20_fb_priv *priv;
+	u32 pbus1218;
+	int ret;
+
+	ret = nouveau_fb_create(parent, engine, oclass, &priv);
+	*pobject = nv_object(priv);
+	if (ret)
+		return ret;
+
+	pbus1218 = nv_rd32(priv, 0x001218);
+	switch (pbus1218 & 0x00000300) {
+	case 0x00000000: priv->base.ram.type = NV_MEM_TYPE_SDRAM; break;
+	case 0x00000100: priv->base.ram.type = NV_MEM_TYPE_DDR1; break;
+	case 0x00000200: priv->base.ram.type = NV_MEM_TYPE_GDDR3; break;
+	case 0x00000300: priv->base.ram.type = NV_MEM_TYPE_GDDR2; break;
+	}
+	priv->base.ram.size = nv_rd32(priv, 0x10020c) & 0xff000000;
+
+	if (device->chipset >= 0x25)
+		ret = nouveau_mm_init(&priv->base.tags, 0, 64 * 1024, 1);
+	else
+		ret = nouveau_mm_init(&priv->base.tags, 0, 32 * 1024, 1);
+	if (ret)
+		return ret;
+
+	priv->base.memtype_valid = nv04_fb_memtype_valid;
+	priv->base.tile.regions = 8;
+	priv->base.tile.init = nv20_fb_tile_init;
+	priv->base.tile.fini = nv20_fb_tile_fini;
+	priv->base.tile.prog = nv20_fb_tile_prog;
+	return nouveau_fb_created(&priv->base);
+}
+
+struct nouveau_oclass
+nv20_fb_oclass = {
+	.handle = NV_SUBDEV(FB, 0x20),
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nv20_fb_ctor,
+		.dtor = _nouveau_fb_dtor,
+		.init = _nouveau_fb_init,
+		.fini = _nouveau_fb_fini,
+	},
+};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nv30.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nv30.c
new file mode 100644
index 000000000000..cba67bc91390
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nv30.c
@@ -0,0 +1,148 @@
+/*
+ * Copyright (C) 2010 Francisco Jerez.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include <subdev/fb.h>
+
+struct nv30_fb_priv {
+	struct nouveau_fb base;
+};
+
+void
+nv30_fb_tile_init(struct nouveau_fb *pfb, int i, u32 addr, u32 size, u32 pitch,
+		  u32 flags, struct nouveau_fb_tile *tile)
+{
+	tile->addr = addr | 1;
+	tile->limit = max(1u, addr + size) - 1;
+	tile->pitch = pitch;
+}
+
+void
+nv30_fb_tile_fini(struct nouveau_fb *pfb, int i, struct nouveau_fb_tile *tile)
+{
+	tile->addr  = 0;
+	tile->limit = 0;
+	tile->pitch = 0;
+}
+
+static int
+calc_bias(struct nv30_fb_priv *priv, int k, int i, int j)
+{
+	struct nouveau_device *device = nv_device(priv);
+	int b = (device->chipset > 0x30 ?
+		 nv_rd32(priv, 0x122c + 0x10 * k + 0x4 * j) >> (4 * (i ^ 1)) :
+		 0) & 0xf;
+
+	return 2 * (b & 0x8 ? b - 0x10 : b);
+}
+
+static int
+calc_ref(struct nv30_fb_priv *priv, int l, int k, int i)
+{
+	int j, x = 0;
+
+	for (j = 0; j < 4; j++) {
+		int m = (l >> (8 * i) & 0xff) + calc_bias(priv, k, i, j);
+
+		x |= (0x80 | clamp(m, 0, 0x1f)) << (8 * j);
+	}
+
+	return x;
+}
+
+static int
+nv30_fb_init(struct nouveau_object *object)
+{
+	struct nouveau_device *device = nv_device(object);
+	struct nv30_fb_priv *priv = (void *)object;
+	int ret, i, j;
+
+	ret = nouveau_fb_init(&priv->base);
+	if (ret)
+		return ret;
+
+	/* Init the memory timing regs at 0x10037c/0x1003ac */
+	if (device->chipset == 0x30 ||
+	    device->chipset == 0x31 ||
+	    device->chipset == 0x35) {
+		/* Related to ROP count */
+		int n = (device->chipset == 0x31 ? 2 : 4);
+		int l = nv_rd32(priv, 0x1003d0);
+
+		for (i = 0; i < n; i++) {
+			for (j = 0; j < 3; j++)
+				nv_wr32(priv, 0x10037c + 0xc * i + 0x4 * j,
+					calc_ref(priv, l, 0, j));
+
+			for (j = 0; j < 2; j++)
+				nv_wr32(priv, 0x1003ac + 0x8 * i + 0x4 * j,
+					calc_ref(priv, l, 1, j));
+		}
+	}
+
+	return 0;
+}
+
+static int
+nv30_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+	     struct nouveau_oclass *oclass, void *data, u32 size,
+	     struct nouveau_object **pobject)
+{
+	struct nv30_fb_priv *priv;
+	u32 pbus1218;
+	int ret;
+
+	ret = nouveau_fb_create(parent, engine, oclass, &priv);
+	*pobject = nv_object(priv);
+	if (ret)
+		return ret;
+
+	pbus1218 = nv_rd32(priv, 0x001218);
+	switch (pbus1218 & 0x00000300) {
+	case 0x00000000: priv->base.ram.type = NV_MEM_TYPE_SDRAM; break;
+	case 0x00000100: priv->base.ram.type = NV_MEM_TYPE_DDR1; break;
+	case 0x00000200: priv->base.ram.type = NV_MEM_TYPE_GDDR3; break;
+	case 0x00000300: priv->base.ram.type = NV_MEM_TYPE_GDDR2; break;
+	}
+	priv->base.ram.size = nv_rd32(priv, 0x10020c) & 0xff000000;
+
+	priv->base.memtype_valid = nv04_fb_memtype_valid;
+	priv->base.tile.regions = 8;
+	priv->base.tile.init = nv30_fb_tile_init;
+	priv->base.tile.fini = nv30_fb_tile_fini;
+	priv->base.tile.prog = nv10_fb_tile_prog;
+	return nouveau_fb_created(&priv->base);
+}
+
+struct nouveau_oclass
+nv30_fb_oclass = {
+	.handle = NV_SUBDEV(FB, 0x30),
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nv30_fb_ctor,
+		.dtor = _nouveau_fb_dtor,
+		.init = nv30_fb_init,
+		.fini = _nouveau_fb_fini,
+	},
+};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nv40.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nv40.c
new file mode 100644
index 000000000000..347a496fcad8
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nv40.c
@@ -0,0 +1,178 @@
+/*
+ * Copyright (C) 2010 Francisco Jerez.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include <subdev/fb.h>
+
+struct nv40_fb_priv {
+	struct nouveau_fb base;
+};
+
+static inline int
+nv44_graph_class(struct nouveau_device *device)
+{
+	if ((device->chipset & 0xf0) == 0x60)
+		return 1;
+
+	return !(0x0baf & (1 << (device->chipset & 0x0f)));
+}
+
+static void
+nv40_fb_tile_prog(struct nouveau_fb *pfb, int i, struct nouveau_fb_tile *tile)
+{
+	nv_wr32(pfb, 0x100604 + (i * 0x10), tile->limit);
+	nv_wr32(pfb, 0x100608 + (i * 0x10), tile->pitch);
+	nv_wr32(pfb, 0x100600 + (i * 0x10), tile->addr);
+}
+
+static void
+nv40_fb_init_gart(struct nv40_fb_priv *priv)
+{
+	nv_wr32(priv, 0x100800, 0x00000001);
+}
+
+static void
+nv44_fb_init_gart(struct nv40_fb_priv *priv)
+{
+	nv_wr32(priv, 0x100850, 0x80000000);
+	nv_wr32(priv, 0x100800, 0x00000001);
+}
+
+static int
+nv40_fb_init(struct nouveau_object *object)
+{
+	struct nv40_fb_priv *priv = (void *)object;
+	int ret;
+
+	ret = nouveau_fb_init(&priv->base);
+	if (ret)
+		return ret;
+
+	switch (nv_device(priv)->chipset) {
+	case 0x40:
+	case 0x45:
+		nv_mask(priv, 0x10033c, 0x00008000, 0x00000000);
+		break;
+	default:
+		if (nv44_graph_class(nv_device(priv)))
+			nv44_fb_init_gart(priv);
+		else
+			nv40_fb_init_gart(priv);
+		break;
+	}
+
+	return 0;
+}
+
+static int
+nv40_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+	     struct nouveau_oclass *oclass, void *data, u32 size,
+	     struct nouveau_object **pobject)
+{
+	struct nouveau_device *device = nv_device(parent);
+	struct nv40_fb_priv *priv;
+	int ret;
+
+	ret = nouveau_fb_create(parent, engine, oclass, &priv);
+	*pobject = nv_object(priv);
+	if (ret)
+		return ret;
+
+	/* 0x001218 is actually present on a few other NV4X I looked at,
+	 * and even contains sane values matching 0x100474.  From looking
+	 * at various vbios images however, this isn't the case everywhere.
+	 * So, I chose to use the same regs I've seen NVIDIA reading around
+	 * the memory detection, hopefully that'll get us the right numbers
+	 */
+	if (device->chipset == 0x40) {
+		u32 pbus1218 = nv_rd32(priv, 0x001218);
+		switch (pbus1218 & 0x00000300) {
+		case 0x00000000: priv->base.ram.type = NV_MEM_TYPE_SDRAM; break;
+		case 0x00000100: priv->base.ram.type = NV_MEM_TYPE_DDR1; break;
+		case 0x00000200: priv->base.ram.type = NV_MEM_TYPE_GDDR3; break;
+		case 0x00000300: priv->base.ram.type = NV_MEM_TYPE_DDR2; break;
+		}
+	} else
+	if (device->chipset == 0x49 || device->chipset == 0x4b) {
+		u32 pfb914 = nv_rd32(priv, 0x100914);
+		switch (pfb914 & 0x00000003) {
+		case 0x00000000: priv->base.ram.type = NV_MEM_TYPE_DDR1; break;
+		case 0x00000001: priv->base.ram.type = NV_MEM_TYPE_DDR2; break;
+		case 0x00000002: priv->base.ram.type = NV_MEM_TYPE_GDDR3; break;
+		case 0x00000003: break;
+		}
+	} else
+	if (device->chipset != 0x4e) {
+		u32 pfb474 = nv_rd32(priv, 0x100474);
+		if (pfb474 & 0x00000004)
+			priv->base.ram.type = NV_MEM_TYPE_GDDR3;
+		if (pfb474 & 0x00000002)
+			priv->base.ram.type = NV_MEM_TYPE_DDR2;
+		if (pfb474 & 0x00000001)
+			priv->base.ram.type = NV_MEM_TYPE_DDR1;
+	} else {
+		priv->base.ram.type = NV_MEM_TYPE_STOLEN;
+	}
+
+	priv->base.ram.size = nv_rd32(priv, 0x10020c) & 0xff000000;
+
+	priv->base.memtype_valid = nv04_fb_memtype_valid;
+	switch (device->chipset) {
+	case 0x40:
+	case 0x45:
+		priv->base.tile.regions = 8;
+		break;
+	case 0x46:
+	case 0x47:
+	case 0x49:
+	case 0x4b:
+	case 0x4c:
+		priv->base.tile.regions = 15;
+		break;
+	default:
+		priv->base.tile.regions = 12;
+		break;
+	}
+	priv->base.tile.init = nv30_fb_tile_init;
+	priv->base.tile.fini = nv30_fb_tile_fini;
+	if (device->chipset == 0x40)
+		priv->base.tile.prog = nv10_fb_tile_prog;
+	else
+		priv->base.tile.prog = nv40_fb_tile_prog;
+
+	return nouveau_fb_created(&priv->base);
+}
+
+
+struct nouveau_oclass
+nv40_fb_oclass = {
+	.handle = NV_SUBDEV(FB, 0x40),
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nv40_fb_ctor,
+		.dtor = _nouveau_fb_dtor,
+		.init = nv40_fb_init,
+		.fini = _nouveau_fb_fini,
+	},
+};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nv50.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nv50.c
new file mode 100644
index 000000000000..436e9efe7ef5
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nv50.c
@@ -0,0 +1,498 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <core/object.h>
+#include <core/enum.h>
+
+#include <subdev/fb.h>
+#include <subdev/bios.h>
+
+struct nv50_fb_priv {
+	struct nouveau_fb base;
+	struct page *r100c08_page;
+	dma_addr_t r100c08;
+};
+
+static int types[0x80] = {
+	1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+	1, 1, 1, 1, 0, 0, 0, 0, 2, 2, 2, 2, 0, 0, 0, 0,
+	1, 1, 1, 1, 1, 1, 1, 0, 2, 2, 2, 2, 2, 2, 2, 0,
+	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+	1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 0, 0,
+	0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+	1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 2, 2, 2, 2,
+	1, 0, 2, 0, 1, 0, 2, 0, 1, 1, 2, 2, 1, 1, 0, 0
+};
+
+static bool
+nv50_fb_memtype_valid(struct nouveau_fb *pfb, u32 memtype)
+{
+	return types[(memtype & 0xff00) >> 8] != 0;
+}
+
+static int
+nv50_fb_vram_new(struct nouveau_fb *pfb, u64 size, u32 align, u32 ncmin,
+		 u32 memtype, struct nouveau_mem **pmem)
+{
+	struct nv50_fb_priv *priv = (void *)pfb;
+	struct nouveau_mm *heap = &priv->base.vram;
+	struct nouveau_mm *tags = &priv->base.tags;
+	struct nouveau_mm_node *r;
+	struct nouveau_mem *mem;
+	int comp = (memtype & 0x300) >> 8;
+	int type = (memtype & 0x07f);
+	int back = (memtype & 0x800);
+	int min, max, ret;
+
+	max = (size >> 12);
+	min = ncmin ? (ncmin >> 12) : max;
+	align >>= 12;
+
+	mem = kzalloc(sizeof(*mem), GFP_KERNEL);
+	if (!mem)
+		return -ENOMEM;
+
+	mutex_lock(&pfb->base.mutex);
+	if (comp) {
+		if (align == 16) {
+			int n = (max >> 4) * comp;
+
+			ret = nouveau_mm_head(tags, 1, n, n, 1, &mem->tag);
+			if (ret)
+				mem->tag = NULL;
+		}
+
+		if (unlikely(!mem->tag))
+			comp = 0;
+	}
+
+	INIT_LIST_HEAD(&mem->regions);
+	mem->memtype = (comp << 7) | type;
+	mem->size = max;
+
+	type = types[type];
+	do {
+		if (back)
+			ret = nouveau_mm_tail(heap, type, max, min, align, &r);
+		else
+			ret = nouveau_mm_head(heap, type, max, min, align, &r);
+		if (ret) {
+			mutex_unlock(&pfb->base.mutex);
+			pfb->ram.put(pfb, &mem);
+			return ret;
+		}
+
+		list_add_tail(&r->rl_entry, &mem->regions);
+		max -= r->length;
+	} while (max);
+	mutex_unlock(&pfb->base.mutex);
+
+	r = list_first_entry(&mem->regions, struct nouveau_mm_node, rl_entry);
+	mem->offset = (u64)r->offset << 12;
+	*pmem = mem;
+	return 0;
+}
+
+void
+nv50_fb_vram_del(struct nouveau_fb *pfb, struct nouveau_mem **pmem)
+{
+	struct nv50_fb_priv *priv = (void *)pfb;
+	struct nouveau_mm_node *this;
+	struct nouveau_mem *mem;
+
+	mem = *pmem;
+	*pmem = NULL;
+	if (unlikely(mem == NULL))
+		return;
+
+	mutex_lock(&pfb->base.mutex);
+	while (!list_empty(&mem->regions)) {
+		this = list_first_entry(&mem->regions, typeof(*this), rl_entry);
+
+		list_del(&this->rl_entry);
+		nouveau_mm_free(&priv->base.vram, &this);
+	}
+
+	nouveau_mm_free(&priv->base.tags, &mem->tag);
+	mutex_unlock(&pfb->base.mutex);
+
+	kfree(mem);
+}
+
+static u32
+nv50_vram_rblock(struct nv50_fb_priv *priv)
+{
+	int i, parts, colbits, rowbitsa, rowbitsb, banks;
+	u64 rowsize, predicted;
+	u32 r0, r4, rt, ru, rblock_size;
+
+	r0 = nv_rd32(priv, 0x100200);
+	r4 = nv_rd32(priv, 0x100204);
+	rt = nv_rd32(priv, 0x100250);
+	ru = nv_rd32(priv, 0x001540);
+	nv_debug(priv, "memcfg 0x%08x 0x%08x 0x%08x 0x%08x\n", r0, r4, rt, ru);
+
+	for (i = 0, parts = 0; i < 8; i++) {
+		if (ru & (0x00010000 << i))
+			parts++;
+	}
+
+	colbits  =  (r4 & 0x0000f000) >> 12;
+	rowbitsa = ((r4 & 0x000f0000) >> 16) + 8;
+	rowbitsb = ((r4 & 0x00f00000) >> 20) + 8;
+	banks    = 1 << (((r4 & 0x03000000) >> 24) + 2);
+
+	rowsize = parts * banks * (1 << colbits) * 8;
+	predicted = rowsize << rowbitsa;
+	if (r0 & 0x00000004)
+		predicted += rowsize << rowbitsb;
+
+	if (predicted != priv->base.ram.size) {
+		nv_warn(priv, "memory controller reports %d MiB VRAM\n",
+			(u32)(priv->base.ram.size >> 20));
+	}
+
+	rblock_size = rowsize;
+	if (rt & 1)
+		rblock_size *= 3;
+
+	nv_debug(priv, "rblock %d bytes\n", rblock_size);
+	return rblock_size;
+}
+
+static int
+nv50_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+	     struct nouveau_oclass *oclass, void *data, u32 size,
+	     struct nouveau_object **pobject)
+{
+	struct nouveau_device *device = nv_device(parent);
+	struct nouveau_bios *bios = nouveau_bios(device);
+	const u32 rsvd_head = ( 256 * 1024) >> 12; /* vga memory */
+	const u32 rsvd_tail = (1024 * 1024) >> 12; /* vbios etc */
+	struct nv50_fb_priv *priv;
+	u32 tags;
+	int ret;
+
+	ret = nouveau_fb_create(parent, engine, oclass, &priv);
+	*pobject = nv_object(priv);
+	if (ret)
+		return ret;
+
+	switch (nv_rd32(priv, 0x100714) & 0x00000007) {
+	case 0: priv->base.ram.type = NV_MEM_TYPE_DDR1; break;
+	case 1:
+		if (nouveau_fb_bios_memtype(bios) == NV_MEM_TYPE_DDR3)
+			priv->base.ram.type = NV_MEM_TYPE_DDR3;
+		else
+			priv->base.ram.type = NV_MEM_TYPE_DDR2;
+		break;
+	case 2: priv->base.ram.type = NV_MEM_TYPE_GDDR3; break;
+	case 3: priv->base.ram.type = NV_MEM_TYPE_GDDR4; break;
+	case 4: priv->base.ram.type = NV_MEM_TYPE_GDDR5; break;
+	default:
+		break;
+	}
+
+	priv->base.ram.size = nv_rd32(priv, 0x10020c);
+	priv->base.ram.size = (priv->base.ram.size & 0xffffff00) |
+			     ((priv->base.ram.size & 0x000000ff) << 32);
+
+	tags = nv_rd32(priv, 0x100320);
+	if (tags) {
+		ret = nouveau_mm_init(&priv->base.tags, 0, tags, 1);
+		if (ret)
+			return ret;
+
+		nv_debug(priv, "%d compression tags\n", tags);
+	}
+
+	size = (priv->base.ram.size >> 12) - rsvd_head - rsvd_tail;
+	switch (device->chipset) {
+	case 0xaa:
+	case 0xac:
+	case 0xaf: /* IGPs, no reordering, no real VRAM */
+		ret = nouveau_mm_init(&priv->base.vram, rsvd_head, size, 1);
+		if (ret)
+			return ret;
+
+		priv->base.ram.stolen = (u64)nv_rd32(priv, 0x100e10) << 12;
+		break;
+	default:
+		ret = nouveau_mm_init(&priv->base.vram, rsvd_head, size,
+				      nv50_vram_rblock(priv) >> 12);
+		if (ret)
+			return ret;
+
+		priv->base.ram.ranks = (nv_rd32(priv, 0x100200) & 0x4) ? 2 : 1;
+		break;
+	}
+
+	priv->r100c08_page = alloc_page(GFP_KERNEL | __GFP_ZERO);
+	if (priv->r100c08_page) {
+		priv->r100c08 = pci_map_page(device->pdev, priv->r100c08_page,
+					     0, PAGE_SIZE,
+					     PCI_DMA_BIDIRECTIONAL);
+		if (pci_dma_mapping_error(device->pdev, priv->r100c08))
+			nv_warn(priv, "failed 0x100c08 page map\n");
+	} else {
+		nv_warn(priv, "failed 0x100c08 page alloc\n");
+	}
+
+	priv->base.memtype_valid = nv50_fb_memtype_valid;
+	priv->base.ram.get = nv50_fb_vram_new;
+	priv->base.ram.put = nv50_fb_vram_del;
+	return nouveau_fb_created(&priv->base);
+}
+
+static void
+nv50_fb_dtor(struct nouveau_object *object)
+{
+	struct nouveau_device *device = nv_device(object);
+	struct nv50_fb_priv *priv = (void *)object;
+
+	if (priv->r100c08_page) {
+		pci_unmap_page(device->pdev, priv->r100c08, PAGE_SIZE,
+			       PCI_DMA_BIDIRECTIONAL);
+		__free_page(priv->r100c08_page);
+	}
+
+	nouveau_mm_fini(&priv->base.vram);
+	nouveau_fb_destroy(&priv->base);
+}
+
+static int
+nv50_fb_init(struct nouveau_object *object)
+{
+	struct nouveau_device *device = nv_device(object);
+	struct nv50_fb_priv *priv = (void *)object;
+	int ret;
+
+	ret = nouveau_fb_init(&priv->base);
+	if (ret)
+		return ret;
+
+	/* Not a clue what this is exactly.  Without pointing it at a
+	 * scratch page, VRAM->GART blits with M2MF (as in DDX DFS)
+	 * cause IOMMU "read from address 0" errors (rh#561267)
+	 */
+	nv_wr32(priv, 0x100c08, priv->r100c08 >> 8);
+
+	/* This is needed to get meaningful information from 100c90
+	 * on traps. No idea what these values mean exactly. */
+	switch (device->chipset) {
+	case 0x50:
+		nv_wr32(priv, 0x100c90, 0x000707ff);
+		break;
+	case 0xa3:
+	case 0xa5:
+	case 0xa8:
+		nv_wr32(priv, 0x100c90, 0x000d0fff);
+		break;
+	case 0xaf:
+		nv_wr32(priv, 0x100c90, 0x089d1fff);
+		break;
+	default:
+		nv_wr32(priv, 0x100c90, 0x001d07ff);
+		break;
+	}
+
+	return 0;
+}
+
+struct nouveau_oclass
+nv50_fb_oclass = {
+	.handle = NV_SUBDEV(FB, 0x50),
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nv50_fb_ctor,
+		.dtor = nv50_fb_dtor,
+		.init = nv50_fb_init,
+		.fini = _nouveau_fb_fini,
+	},
+};
+
+static const struct nouveau_enum vm_dispatch_subclients[] = {
+	{ 0x00000000, "GRCTX", NULL },
+	{ 0x00000001, "NOTIFY", NULL },
+	{ 0x00000002, "QUERY", NULL },
+	{ 0x00000003, "COND", NULL },
+	{ 0x00000004, "M2M_IN", NULL },
+	{ 0x00000005, "M2M_OUT", NULL },
+	{ 0x00000006, "M2M_NOTIFY", NULL },
+	{}
+};
+
+static const struct nouveau_enum vm_ccache_subclients[] = {
+	{ 0x00000000, "CB", NULL },
+	{ 0x00000001, "TIC", NULL },
+	{ 0x00000002, "TSC", NULL },
+	{}
+};
+
+static const struct nouveau_enum vm_prop_subclients[] = {
+	{ 0x00000000, "RT0", NULL },
+	{ 0x00000001, "RT1", NULL },
+	{ 0x00000002, "RT2", NULL },
+	{ 0x00000003, "RT3", NULL },
+	{ 0x00000004, "RT4", NULL },
+	{ 0x00000005, "RT5", NULL },
+	{ 0x00000006, "RT6", NULL },
+	{ 0x00000007, "RT7", NULL },
+	{ 0x00000008, "ZETA", NULL },
+	{ 0x00000009, "LOCAL", NULL },
+	{ 0x0000000a, "GLOBAL", NULL },
+	{ 0x0000000b, "STACK", NULL },
+	{ 0x0000000c, "DST2D", NULL },
+	{}
+};
+
+static const struct nouveau_enum vm_pfifo_subclients[] = {
+	{ 0x00000000, "PUSHBUF", NULL },
+	{ 0x00000001, "SEMAPHORE", NULL },
+	{}
+};
+
+static const struct nouveau_enum vm_bar_subclients[] = {
+	{ 0x00000000, "FB", NULL },
+	{ 0x00000001, "IN", NULL },
+	{}
+};
+
+static const struct nouveau_enum vm_client[] = {
+	{ 0x00000000, "STRMOUT", NULL },
+	{ 0x00000003, "DISPATCH", vm_dispatch_subclients },
+	{ 0x00000004, "PFIFO_WRITE", NULL },
+	{ 0x00000005, "CCACHE", vm_ccache_subclients },
+	{ 0x00000006, "PPPP", NULL },
+	{ 0x00000007, "CLIPID", NULL },
+	{ 0x00000008, "PFIFO_READ", NULL },
+	{ 0x00000009, "VFETCH", NULL },
+	{ 0x0000000a, "TEXTURE", NULL },
+	{ 0x0000000b, "PROP", vm_prop_subclients },
+	{ 0x0000000c, "PVP", NULL },
+	{ 0x0000000d, "PBSP", NULL },
+	{ 0x0000000e, "PCRYPT", NULL },
+	{ 0x0000000f, "PCOUNTER", NULL },
+	{ 0x00000011, "PDAEMON", NULL },
+	{}
+};
+
+static const struct nouveau_enum vm_engine[] = {
+	{ 0x00000000, "PGRAPH", NULL },
+	{ 0x00000001, "PVP", NULL },
+	{ 0x00000004, "PEEPHOLE", NULL },
+	{ 0x00000005, "PFIFO", vm_pfifo_subclients },
+	{ 0x00000006, "BAR", vm_bar_subclients },
+	{ 0x00000008, "PPPP", NULL },
+	{ 0x00000009, "PBSP", NULL },
+	{ 0x0000000a, "PCRYPT", NULL },
+	{ 0x0000000b, "PCOUNTER", NULL },
+	{ 0x0000000c, "SEMAPHORE_BG", NULL },
+	{ 0x0000000d, "PCOPY", NULL },
+	{ 0x0000000e, "PDAEMON", NULL },
+	{}
+};
+
+static const struct nouveau_enum vm_fault[] = {
+	{ 0x00000000, "PT_NOT_PRESENT", NULL },
+	{ 0x00000001, "PT_TOO_SHORT", NULL },
+	{ 0x00000002, "PAGE_NOT_PRESENT", NULL },
+	{ 0x00000003, "PAGE_SYSTEM_ONLY", NULL },
+	{ 0x00000004, "PAGE_READ_ONLY", NULL },
+	{ 0x00000006, "NULL_DMAOBJ", NULL },
+	{ 0x00000007, "WRONG_MEMTYPE", NULL },
+	{ 0x0000000b, "VRAM_LIMIT", NULL },
+	{ 0x0000000f, "DMAOBJ_LIMIT", NULL },
+	{}
+};
+
+void
+nv50_fb_trap(struct nouveau_fb *pfb, int display)
+{
+	struct nouveau_device *device = nv_device(pfb);
+	struct nv50_fb_priv *priv = (void *)pfb;
+	const struct nouveau_enum *en, *cl;
+	u32 trap[6], idx, chan;
+	u8 st0, st1, st2, st3;
+	int i;
+
+	idx = nv_rd32(priv, 0x100c90);
+	if (!(idx & 0x80000000))
+		return;
+	idx &= 0x00ffffff;
+
+	for (i = 0; i < 6; i++) {
+		nv_wr32(priv, 0x100c90, idx | i << 24);
+		trap[i] = nv_rd32(priv, 0x100c94);
+	}
+	nv_wr32(priv, 0x100c90, idx | 0x80000000);
+
+	if (!display)
+		return;
+
+	/* decode status bits into something more useful */
+	if (device->chipset  < 0xa3 ||
+	    device->chipset == 0xaa || device->chipset == 0xac) {
+		st0 = (trap[0] & 0x0000000f) >> 0;
+		st1 = (trap[0] & 0x000000f0) >> 4;
+		st2 = (trap[0] & 0x00000f00) >> 8;
+		st3 = (trap[0] & 0x0000f000) >> 12;
+	} else {
+		st0 = (trap[0] & 0x000000ff) >> 0;
+		st1 = (trap[0] & 0x0000ff00) >> 8;
+		st2 = (trap[0] & 0x00ff0000) >> 16;
+		st3 = (trap[0] & 0xff000000) >> 24;
+	}
+	chan = (trap[2] << 16) | trap[1];
+
+	nv_error(priv, "trapped %s at 0x%02x%04x%04x on channel 0x%08x ",
+		 (trap[5] & 0x00000100) ? "read" : "write",
+		 trap[5] & 0xff, trap[4] & 0xffff, trap[3] & 0xffff, chan);
+
+	en = nouveau_enum_find(vm_engine, st0);
+	if (en)
+		printk("%s/", en->name);
+	else
+		printk("%02x/", st0);
+
+	cl = nouveau_enum_find(vm_client, st2);
+	if (cl)
+		printk("%s/", cl->name);
+	else
+		printk("%02x/", st2);
+
+	if      (cl && cl->data) cl = nouveau_enum_find(cl->data, st3);
+	else if (en && en->data) cl = nouveau_enum_find(en->data, st3);
+	else                     cl = NULL;
+	if (cl)
+		printk("%s", cl->name);
+	else
+		printk("%02x", st3);
+
+	printk(" reason: ");
+	en = nouveau_enum_find(vm_fault, st1);
+	if (en)
+		printk("%s\n", en->name);
+	else
+		printk("0x%08x\n", st1);
+}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nvc0.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nvc0.c
new file mode 100644
index 000000000000..9f59f2bf0079
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nvc0.c
@@ -0,0 +1,245 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <subdev/fb.h>
+#include <subdev/bios.h>
+
+struct nvc0_fb_priv {
+	struct nouveau_fb base;
+	struct page *r100c10_page;
+	dma_addr_t r100c10;
+};
+
+/* 0 = unsupported
+ * 1 = non-compressed
+ * 3 = compressed
+ */
+static const u8 types[256] = {
+	1, 1, 3, 3, 3, 3, 0, 3, 3, 3, 3, 0, 0, 0, 0, 0,
+	0, 1, 0, 0, 0, 0, 0, 3, 3, 3, 3, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 3, 3, 3, 3, 3,
+	3, 3, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+	0, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 3, 3, 3, 3, 0, 1, 1, 1, 1, 0,
+	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 3, 3, 3, 3, 1, 1, 1, 1, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3,
+	3, 3, 3, 1, 0, 0, 0, 0, 0, 0, 0, 0, 3, 3, 3, 3,
+	3, 3, 0, 0, 0, 0, 0, 0, 3, 0, 0, 3, 0, 3, 0, 3,
+	3, 0, 3, 3, 3, 3, 3, 0, 0, 3, 0, 3, 0, 3, 3, 0,
+	3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 1, 1, 0
+};
+
+static bool
+nvc0_fb_memtype_valid(struct nouveau_fb *pfb, u32 tile_flags)
+{
+	u8 memtype = (tile_flags & 0x0000ff00) >> 8;
+	return likely((types[memtype] == 1));
+}
+
+static int
+nvc0_fb_vram_new(struct nouveau_fb *pfb, u64 size, u32 align, u32 ncmin,
+		 u32 memtype, struct nouveau_mem **pmem)
+{
+	struct nouveau_mm *mm = &pfb->vram;
+	struct nouveau_mm_node *r;
+	struct nouveau_mem *mem;
+	int type = (memtype & 0x0ff);
+	int back = (memtype & 0x800);
+	int ret;
+
+	size  >>= 12;
+	align >>= 12;
+	ncmin >>= 12;
+	if (!ncmin)
+		ncmin = size;
+
+	mem = kzalloc(sizeof(*mem), GFP_KERNEL);
+	if (!mem)
+		return -ENOMEM;
+
+	INIT_LIST_HEAD(&mem->regions);
+	mem->memtype = type;
+	mem->size = size;
+
+	mutex_lock(&mm->mutex);
+	do {
+		if (back)
+			ret = nouveau_mm_tail(mm, 1, size, ncmin, align, &r);
+		else
+			ret = nouveau_mm_head(mm, 1, size, ncmin, align, &r);
+		if (ret) {
+			mutex_unlock(&mm->mutex);
+			pfb->ram.put(pfb, &mem);
+			return ret;
+		}
+
+		list_add_tail(&r->rl_entry, &mem->regions);
+		size -= r->length;
+	} while (size);
+	mutex_unlock(&mm->mutex);
+
+	r = list_first_entry(&mem->regions, struct nouveau_mm_node, rl_entry);
+	mem->offset = (u64)r->offset << 12;
+	*pmem = mem;
+	return 0;
+}
+
+static int
+nvc0_fb_init(struct nouveau_object *object)
+{
+	struct nvc0_fb_priv *priv = (void *)object;
+	int ret;
+
+	ret = nouveau_fb_init(&priv->base);
+	if (ret)
+		return ret;
+
+	nv_wr32(priv, 0x100c10, priv->r100c10 >> 8);
+	return 0;
+}
+
+static void
+nvc0_fb_dtor(struct nouveau_object *object)
+{
+	struct nouveau_device *device = nv_device(object);
+	struct nvc0_fb_priv *priv = (void *)object;
+
+	if (priv->r100c10_page) {
+		pci_unmap_page(device->pdev, priv->r100c10, PAGE_SIZE,
+			       PCI_DMA_BIDIRECTIONAL);
+		__free_page(priv->r100c10_page);
+	}
+
+	nouveau_fb_destroy(&priv->base);
+}
+
+static int
+nvc0_vram_detect(struct nvc0_fb_priv *priv)
+{
+	struct nouveau_bios *bios = nouveau_bios(priv);
+	struct nouveau_fb *pfb = &priv->base;
+	const u32 rsvd_head = ( 256 * 1024) >> 12; /* vga memory */
+	const u32 rsvd_tail = (1024 * 1024) >> 12; /* vbios etc */
+	u32 parts = nv_rd32(priv, 0x022438);
+	u32 pmask = nv_rd32(priv, 0x022554);
+	u32 bsize = nv_rd32(priv, 0x10f20c);
+	u32 offset, length;
+	bool uniform = true;
+	int ret, part;
+
+	nv_debug(priv, "0x100800: 0x%08x\n", nv_rd32(priv, 0x100800));
+	nv_debug(priv, "parts 0x%08x mask 0x%08x\n", parts, pmask);
+
+	priv->base.ram.type = nouveau_fb_bios_memtype(bios);
+	priv->base.ram.ranks = (nv_rd32(priv, 0x10f200) & 0x00000004) ? 2 : 1;
+
+	/* read amount of vram attached to each memory controller */
+	for (part = 0; part < parts; part++) {
+		if (!(pmask & (1 << part))) {
+			u32 psize = nv_rd32(priv, 0x11020c + (part * 0x1000));
+			if (psize != bsize) {
+				if (psize < bsize)
+					bsize = psize;
+				uniform = false;
+			}
+
+			nv_debug(priv, "%d: mem_amount 0x%08x\n", part, psize);
+			priv->base.ram.size += (u64)psize << 20;
+		}
+	}
+
+	/* if all controllers have the same amount attached, there's no holes */
+	if (uniform) {
+		offset = rsvd_head;
+		length = (priv->base.ram.size >> 12) - rsvd_head - rsvd_tail;
+		return nouveau_mm_init(&pfb->vram, offset, length, 1);
+	}
+
+	/* otherwise, address lowest common amount from 0GiB */
+	ret = nouveau_mm_init(&pfb->vram, rsvd_head, (bsize << 8) * parts, 1);
+	if (ret)
+		return ret;
+
+	/* and the rest starting from (8GiB + common_size) */
+	offset = (0x0200000000ULL >> 12) + (bsize << 8);
+	length = (priv->base.ram.size >> 12) - (bsize << 8) - rsvd_tail;
+
+	ret = nouveau_mm_init(&pfb->vram, offset, length, 0);
+	if (ret) {
+		nouveau_mm_fini(&pfb->vram);
+		return ret;
+	}
+
+	return 0;
+}
+
+static int
+nvc0_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+	     struct nouveau_oclass *oclass, void *data, u32 size,
+	     struct nouveau_object **pobject)
+{
+	struct nouveau_device *device = nv_device(parent);
+	struct nvc0_fb_priv *priv;
+	int ret;
+
+	ret = nouveau_fb_create(parent, engine, oclass, &priv);
+	*pobject = nv_object(priv);
+	if (ret)
+		return ret;
+
+	priv->base.memtype_valid = nvc0_fb_memtype_valid;
+	priv->base.ram.get = nvc0_fb_vram_new;
+	priv->base.ram.put = nv50_fb_vram_del;
+
+	ret = nvc0_vram_detect(priv);
+	if (ret)
+		return ret;
+
+	priv->r100c10_page = alloc_page(GFP_KERNEL | __GFP_ZERO);
+	if (!priv->r100c10_page)
+		return -ENOMEM;
+
+	priv->r100c10 = pci_map_page(device->pdev, priv->r100c10_page, 0,
+				     PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
+	if (pci_dma_mapping_error(device->pdev, priv->r100c10))
+		return -EFAULT;
+
+	return nouveau_fb_created(&priv->base);
+}
+
+
+struct nouveau_oclass
+nvc0_fb_oclass = {
+	.handle = NV_SUBDEV(FB, 0xc0),
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nvc0_fb_ctor,
+		.dtor = nvc0_fb_dtor,
+		.init = nvc0_fb_init,
+		.fini = _nouveau_fb_fini,
+	},
+};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/gpio/base.c b/drivers/gpu/drm/nouveau/core/subdev/gpio/base.c
new file mode 100644
index 000000000000..acf818c58bf0
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/gpio/base.c
@@ -0,0 +1,271 @@
+/*
+ * Copyright 2011 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <subdev/gpio.h>
+#include <subdev/bios.h>
+#include <subdev/bios/gpio.h>
+
+static int
+nouveau_gpio_drive(struct nouveau_gpio *gpio,
+		   int idx, int line, int dir, int out)
+{
+	return gpio->drive ? gpio->drive(gpio, line, dir, out) : -ENODEV;
+}
+
+static int
+nouveau_gpio_sense(struct nouveau_gpio *gpio, int idx, int line)
+{
+	return gpio->sense ? gpio->sense(gpio, line) : -ENODEV;
+}
+
+static int
+nouveau_gpio_find(struct nouveau_gpio *gpio, int idx, u8 tag, u8 line,
+		  struct dcb_gpio_func *func)
+{
+	if (line == 0xff && tag == 0xff)
+		return -EINVAL;
+
+	if (!dcb_gpio_parse(nouveau_bios(gpio), idx, tag, line, func))
+		return 0;
+
+	/* Apple iMac G4 NV18 */
+	if (nv_device_match(nv_object(gpio), 0x0189, 0x10de, 0x0010)) {
+		if (tag == DCB_GPIO_TVDAC0) {
+			*func = (struct dcb_gpio_func) {
+				.func = DCB_GPIO_TVDAC0,
+				.line = 4,
+				.log[0] = 0,
+				.log[1] = 1,
+			};
+			return 0;
+		}
+	}
+
+	return -EINVAL;
+}
+
+static int
+nouveau_gpio_set(struct nouveau_gpio *gpio, int idx, u8 tag, u8 line, int state)
+{
+	struct dcb_gpio_func func;
+	int ret;
+
+	ret = nouveau_gpio_find(gpio, idx, tag, line, &func);
+	if (ret == 0) {
+		int dir = !!(func.log[state] & 0x02);
+		int out = !!(func.log[state] & 0x01);
+		ret = nouveau_gpio_drive(gpio, idx, func.line, dir, out);
+	}
+
+	return ret;
+}
+
+static int
+nouveau_gpio_get(struct nouveau_gpio *gpio, int idx, u8 tag, u8 line)
+{
+	struct dcb_gpio_func func;
+	int ret;
+
+	ret = nouveau_gpio_find(gpio, idx, tag, line, &func);
+	if (ret == 0) {
+		ret = nouveau_gpio_sense(gpio, idx, func.line);
+		if (ret >= 0)
+			ret = (ret == (func.log[1] & 1));
+	}
+
+	return ret;
+}
+
+static int
+nouveau_gpio_irq(struct nouveau_gpio *gpio, int idx, u8 tag, u8 line, bool on)
+{
+	struct dcb_gpio_func func;
+	int ret;
+
+	ret = nouveau_gpio_find(gpio, idx, tag, line, &func);
+	if (ret == 0) {
+		if (idx == 0 && gpio->irq_enable)
+			gpio->irq_enable(gpio, func.line, on);
+		else
+			ret = -ENODEV;
+	}
+
+	return ret;
+}
+
+struct gpio_isr {
+	struct nouveau_gpio *gpio;
+	struct list_head head;
+	struct work_struct work;
+	int idx;
+	struct dcb_gpio_func func;
+	void (*handler)(void *, int);
+	void *data;
+	bool inhibit;
+};
+
+static void
+nouveau_gpio_isr_bh(struct work_struct *work)
+{
+	struct gpio_isr *isr = container_of(work, struct gpio_isr, work);
+	struct nouveau_gpio *gpio = isr->gpio;
+	unsigned long flags;
+	int state;
+
+	state = nouveau_gpio_get(gpio, isr->idx, isr->func.func,
+						 isr->func.line);
+	if (state >= 0)
+		isr->handler(isr->data, state);
+
+	spin_lock_irqsave(&gpio->lock, flags);
+	isr->inhibit = false;
+	spin_unlock_irqrestore(&gpio->lock, flags);
+}
+
+static void
+nouveau_gpio_isr_run(struct nouveau_gpio *gpio, int idx, u32 line_mask)
+{
+	struct gpio_isr *isr;
+
+	if (idx != 0)
+		return;
+
+	spin_lock(&gpio->lock);
+	list_for_each_entry(isr, &gpio->isr, head) {
+		if (line_mask & (1 << isr->func.line)) {
+			if (isr->inhibit)
+				continue;
+			isr->inhibit = true;
+			schedule_work(&isr->work);
+		}
+	}
+	spin_unlock(&gpio->lock);
+}
+
+static int
+nouveau_gpio_isr_add(struct nouveau_gpio *gpio, int idx, u8 tag, u8 line,
+		     void (*handler)(void *, int), void *data)
+{
+	struct gpio_isr *isr;
+	unsigned long flags;
+	int ret;
+
+	isr = kzalloc(sizeof(*isr), GFP_KERNEL);
+	if (!isr)
+		return -ENOMEM;
+
+	ret = nouveau_gpio_find(gpio, idx, tag, line, &isr->func);
+	if (ret) {
+		kfree(isr);
+		return ret;
+	}
+
+	INIT_WORK(&isr->work, nouveau_gpio_isr_bh);
+	isr->gpio = gpio;
+	isr->handler = handler;
+	isr->data = data;
+	isr->idx = idx;
+
+	spin_lock_irqsave(&gpio->lock, flags);
+	list_add(&isr->head, &gpio->isr);
+	spin_unlock_irqrestore(&gpio->lock, flags);
+	return 0;
+}
+
+static void
+nouveau_gpio_isr_del(struct nouveau_gpio *gpio, int idx, u8 tag, u8 line,
+		     void (*handler)(void *, int), void *data)
+{
+	struct gpio_isr *isr, *tmp;
+	struct dcb_gpio_func func;
+	unsigned long flags;
+	LIST_HEAD(tofree);
+	int ret;
+
+	ret = nouveau_gpio_find(gpio, idx, tag, line, &func);
+	if (ret == 0) {
+		spin_lock_irqsave(&gpio->lock, flags);
+		list_for_each_entry_safe(isr, tmp, &gpio->isr, head) {
+			if (memcmp(&isr->func, &func, sizeof(func)) ||
+			    isr->idx != idx ||
+			    isr->handler != handler || isr->data != data)
+				continue;
+			list_move_tail(&isr->head, &tofree);
+		}
+		spin_unlock_irqrestore(&gpio->lock, flags);
+
+		list_for_each_entry_safe(isr, tmp, &tofree, head) {
+			flush_work(&isr->work);
+			kfree(isr);
+		}
+	}
+}
+
+int
+nouveau_gpio_create_(struct nouveau_object *parent,
+		     struct nouveau_object *engine,
+		     struct nouveau_oclass *oclass, int length, void **pobject)
+{
+	struct nouveau_gpio *gpio;
+	int ret;
+
+	ret = nouveau_subdev_create_(parent, engine, oclass, 0, "GPIO", "gpio",
+				     length, pobject);
+	gpio = *pobject;
+	if (ret)
+		return ret;
+
+	gpio->find = nouveau_gpio_find;
+	gpio->set  = nouveau_gpio_set;
+	gpio->get  = nouveau_gpio_get;
+	gpio->irq  = nouveau_gpio_irq;
+	gpio->isr_run = nouveau_gpio_isr_run;
+	gpio->isr_add = nouveau_gpio_isr_add;
+	gpio->isr_del = nouveau_gpio_isr_del;
+	INIT_LIST_HEAD(&gpio->isr);
+	spin_lock_init(&gpio->lock);
+	return 0;
+}
+
+static struct dmi_system_id gpio_reset_ids[] = {
+	{
+		.ident = "Apple Macbook 10,1",
+		.matches = {
+			DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
+			DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro10,1"),
+		}
+	},
+	{ }
+};
+
+int
+nouveau_gpio_init(struct nouveau_gpio *gpio)
+{
+	int ret = nouveau_subdev_init(&gpio->base);
+	if (ret == 0 && gpio->reset) {
+		if (dmi_check_system(gpio_reset_ids))
+			gpio->reset(gpio);
+	}
+	return ret;
+}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/gpio/nv10.c b/drivers/gpu/drm/nouveau/core/subdev/gpio/nv10.c
new file mode 100644
index 000000000000..168d16a9a8e9
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/gpio/nv10.c
@@ -0,0 +1,169 @@
+/*
+ * Copyright (C) 2009 Francisco Jerez.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include <subdev/gpio.h>
+
+struct nv10_gpio_priv {
+	struct nouveau_gpio base;
+};
+
+static int
+nv10_gpio_sense(struct nouveau_gpio *gpio, int line)
+{
+	if (line < 2) {
+		line = line * 16;
+		line = nv_rd32(gpio, 0x600818) >> line;
+		return !!(line & 0x0100);
+	} else
+	if (line < 10) {
+		line = (line - 2) * 4;
+		line = nv_rd32(gpio, 0x60081c) >> line;
+		return !!(line & 0x04);
+	} else
+	if (line < 14) {
+		line = (line - 10) * 4;
+		line = nv_rd32(gpio, 0x600850) >> line;
+		return !!(line & 0x04);
+	}
+
+	return -EINVAL;
+}
+
+static int
+nv10_gpio_drive(struct nouveau_gpio *gpio, int line, int dir, int out)
+{
+	u32 reg, mask, data;
+
+	if (line < 2) {
+		line = line * 16;
+		reg  = 0x600818;
+		mask = 0x00000011;
+		data = (dir << 4) | out;
+	} else
+	if (line < 10) {
+		line = (line - 2) * 4;
+		reg  = 0x60081c;
+		mask = 0x00000003;
+		data = (dir << 1) | out;
+	} else
+	if (line < 14) {
+		line = (line - 10) * 4;
+		reg  = 0x600850;
+		mask = 0x00000003;
+		data = (dir << 1) | out;
+	} else {
+		return -EINVAL;
+	}
+
+	nv_mask(gpio, reg, mask << line, data << line);
+	return 0;
+}
+
+static void
+nv10_gpio_irq_enable(struct nouveau_gpio *gpio, int line, bool on)
+{
+	u32 mask = 0x00010001 << line;
+
+	nv_wr32(gpio, 0x001104, mask);
+	nv_mask(gpio, 0x001144, mask, on ? mask : 0);
+}
+
+static void
+nv10_gpio_intr(struct nouveau_subdev *subdev)
+{
+	struct nv10_gpio_priv *priv = (void *)subdev;
+	u32 intr = nv_rd32(priv, 0x001104);
+	u32 hi = (intr & 0x0000ffff) >> 0;
+	u32 lo = (intr & 0xffff0000) >> 16;
+
+	priv->base.isr_run(&priv->base, 0, hi | lo);
+
+	nv_wr32(priv, 0x001104, intr);
+}
+
+static int
+nv10_gpio_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+	       struct nouveau_oclass *oclass, void *data, u32 size,
+	       struct nouveau_object **pobject)
+{
+	struct nv10_gpio_priv *priv;
+	int ret;
+
+	ret = nouveau_gpio_create(parent, engine, oclass, &priv);
+	*pobject = nv_object(priv);
+	if (ret)
+		return ret;
+
+	priv->base.drive = nv10_gpio_drive;
+	priv->base.sense = nv10_gpio_sense;
+	priv->base.irq_enable = nv10_gpio_irq_enable;
+	nv_subdev(priv)->intr = nv10_gpio_intr;
+	return 0;
+}
+
+static void
+nv10_gpio_dtor(struct nouveau_object *object)
+{
+	struct nv10_gpio_priv *priv = (void *)object;
+	nouveau_gpio_destroy(&priv->base);
+}
+
+static int
+nv10_gpio_init(struct nouveau_object *object)
+{
+	struct nv10_gpio_priv *priv = (void *)object;
+	int ret;
+
+	ret = nouveau_gpio_init(&priv->base);
+	if (ret)
+		return ret;
+
+	nv_wr32(priv, 0x001140, 0x00000000);
+	nv_wr32(priv, 0x001100, 0xffffffff);
+	nv_wr32(priv, 0x001144, 0x00000000);
+	nv_wr32(priv, 0x001104, 0xffffffff);
+	return 0;
+}
+
+static int
+nv10_gpio_fini(struct nouveau_object *object, bool suspend)
+{
+	struct nv10_gpio_priv *priv = (void *)object;
+	nv_wr32(priv, 0x001140, 0x00000000);
+	nv_wr32(priv, 0x001144, 0x00000000);
+	return nouveau_gpio_fini(&priv->base, suspend);
+}
+
+struct nouveau_oclass
+nv10_gpio_oclass = {
+	.handle = NV_SUBDEV(GPIO, 0x10),
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nv10_gpio_ctor,
+		.dtor = nv10_gpio_dtor,
+		.init = nv10_gpio_init,
+		.fini = nv10_gpio_fini,
+	},
+};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/gpio/nv50.c b/drivers/gpu/drm/nouveau/core/subdev/gpio/nv50.c
new file mode 100644
index 000000000000..f3502c961cd9
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/gpio/nv50.c
@@ -0,0 +1,194 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <subdev/gpio.h>
+
+struct nv50_gpio_priv {
+	struct nouveau_gpio base;
+};
+
+static void
+nv50_gpio_reset(struct nouveau_gpio *gpio)
+{
+	struct nouveau_bios *bios = nouveau_bios(gpio);
+	struct nv50_gpio_priv *priv = (void *)gpio;
+	u16 entry;
+	u8 ver;
+	int ent = -1;
+
+	while ((entry = dcb_gpio_entry(bios, 0, ++ent, &ver))) {
+		static const u32 regs[] = { 0xe100, 0xe28c };
+		u32 data = nv_ro32(bios, entry);
+		u8  line =   (data & 0x0000001f);
+		u8  func =   (data & 0x0000ff00) >> 8;
+		u8  defs = !!(data & 0x01000000);
+		u8  unk0 = !!(data & 0x02000000);
+		u8  unk1 = !!(data & 0x04000000);
+		u32 val = (unk1 << 16) | unk0;
+		u32 reg = regs[line >> 4]; line &= 0x0f;
+
+		if (func == 0xff)
+			continue;
+
+		gpio->set(gpio, 0, func, line, defs);
+
+		nv_mask(priv, reg, 0x00010001 << line, val << line);
+	}
+}
+
+static int
+nv50_gpio_location(int line, u32 *reg, u32 *shift)
+{
+	const u32 nv50_gpio_reg[4] = { 0xe104, 0xe108, 0xe280, 0xe284 };
+
+	if (line >= 32)
+		return -EINVAL;
+
+	*reg = nv50_gpio_reg[line >> 3];
+	*shift = (line & 7) << 2;
+	return 0;
+}
+
+static int
+nv50_gpio_drive(struct nouveau_gpio *gpio, int line, int dir, int out)
+{
+	u32 reg, shift;
+
+	if (nv50_gpio_location(line, &reg, &shift))
+		return -EINVAL;
+
+	nv_mask(gpio, reg, 7 << shift, (((dir ^ 1) << 1) | out) << shift);
+	return 0;
+}
+
+static int
+nv50_gpio_sense(struct nouveau_gpio *gpio, int line)
+{
+	u32 reg, shift;
+
+	if (nv50_gpio_location(line, &reg, &shift))
+		return -EINVAL;
+
+	return !!(nv_rd32(gpio, reg) & (4 << shift));
+}
+
+void
+nv50_gpio_irq_enable(struct nouveau_gpio *gpio, int line, bool on)
+{
+	u32 reg  = line < 16 ? 0xe050 : 0xe070;
+	u32 mask = 0x00010001 << (line & 0xf);
+
+	nv_wr32(gpio, reg + 4, mask);
+	nv_mask(gpio, reg + 0, mask, on ? mask : 0);
+}
+
+void
+nv50_gpio_intr(struct nouveau_subdev *subdev)
+{
+	struct nv50_gpio_priv *priv = (void *)subdev;
+	u32 intr0, intr1 = 0;
+	u32 hi, lo;
+
+	intr0 = nv_rd32(priv, 0xe054) & nv_rd32(priv, 0xe050);
+	if (nv_device(priv)->chipset >= 0x90)
+		intr1 = nv_rd32(priv, 0xe074) & nv_rd32(priv, 0xe070);
+
+	hi = (intr0 & 0x0000ffff) | (intr1 << 16);
+	lo = (intr0 >> 16) | (intr1 & 0xffff0000);
+	priv->base.isr_run(&priv->base, 0, hi | lo);
+
+	nv_wr32(priv, 0xe054, intr0);
+	if (nv_device(priv)->chipset >= 0x90)
+		nv_wr32(priv, 0xe074, intr1);
+}
+
+static int
+nv50_gpio_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+	       struct nouveau_oclass *oclass, void *data, u32 size,
+	       struct nouveau_object **pobject)
+{
+	struct nv50_gpio_priv *priv;
+	int ret;
+
+	ret = nouveau_gpio_create(parent, engine, oclass, &priv);
+	*pobject = nv_object(priv);
+	if (ret)
+		return ret;
+
+	priv->base.reset = nv50_gpio_reset;
+	priv->base.drive = nv50_gpio_drive;
+	priv->base.sense = nv50_gpio_sense;
+	priv->base.irq_enable = nv50_gpio_irq_enable;
+	nv_subdev(priv)->intr = nv50_gpio_intr;
+	return 0;
+}
+
+void
+nv50_gpio_dtor(struct nouveau_object *object)
+{
+	struct nv50_gpio_priv *priv = (void *)object;
+	nouveau_gpio_destroy(&priv->base);
+}
+
+int
+nv50_gpio_init(struct nouveau_object *object)
+{
+	struct nv50_gpio_priv *priv = (void *)object;
+	int ret;
+
+	ret = nouveau_gpio_init(&priv->base);
+	if (ret)
+		return ret;
+
+	/* disable, and ack any pending gpio interrupts */
+	nv_wr32(priv, 0xe050, 0x00000000);
+	nv_wr32(priv, 0xe054, 0xffffffff);
+	if (nv_device(priv)->chipset >= 0x90) {
+		nv_wr32(priv, 0xe070, 0x00000000);
+		nv_wr32(priv, 0xe074, 0xffffffff);
+	}
+
+	return 0;
+}
+
+int
+nv50_gpio_fini(struct nouveau_object *object, bool suspend)
+{
+	struct nv50_gpio_priv *priv = (void *)object;
+	nv_wr32(priv, 0xe050, 0x00000000);
+	if (nv_device(priv)->chipset >= 0x90)
+		nv_wr32(priv, 0xe070, 0x00000000);
+	return nouveau_gpio_fini(&priv->base, suspend);
+}
+
+struct nouveau_oclass
+nv50_gpio_oclass = {
+	.handle = NV_SUBDEV(GPIO, 0x50),
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nv50_gpio_ctor,
+		.dtor = nv50_gpio_dtor,
+		.init = nv50_gpio_init,
+		.fini = nv50_gpio_fini,
+	},
+};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/gpio/nvd0.c b/drivers/gpu/drm/nouveau/core/subdev/gpio/nvd0.c
new file mode 100644
index 000000000000..8d18fcad26e0
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/gpio/nvd0.c
@@ -0,0 +1,104 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <subdev/gpio.h>
+
+struct nvd0_gpio_priv {
+	struct nouveau_gpio base;
+};
+
+static void
+nvd0_gpio_reset(struct nouveau_gpio *gpio)
+{
+	struct nouveau_bios *bios = nouveau_bios(gpio);
+	struct nvd0_gpio_priv *priv = (void *)gpio;
+	u16 entry;
+	u8 ver;
+	int ent = -1;
+
+	while ((entry = dcb_gpio_entry(bios, 0, ++ent, &ver))) {
+		u32 data = nv_ro32(bios, entry);
+		u8  line =   (data & 0x0000003f);
+		u8  defs = !!(data & 0x00000080);
+		u8  func =   (data & 0x0000ff00) >> 8;
+		u8  unk0 =   (data & 0x00ff0000) >> 16;
+		u8  unk1 =   (data & 0x1f000000) >> 24;
+
+		if (func == 0xff)
+			continue;
+
+		gpio->set(gpio, 0, func, line, defs);
+
+		nv_mask(priv, 0x00d610 + (line * 4), 0xff, unk0);
+		if (unk1--)
+			nv_mask(priv, 0x00d740 + (unk1 * 4), 0xff, line);
+	}
+}
+
+static int
+nvd0_gpio_drive(struct nouveau_gpio *gpio, int line, int dir, int out)
+{
+	u32 data = ((dir ^ 1) << 13) | (out << 12);
+	nv_mask(gpio, 0x00d610 + (line * 4), 0x00003000, data);
+	nv_mask(gpio, 0x00d604, 0x00000001, 0x00000001); /* update? */
+	return 0;
+}
+
+static int
+nvd0_gpio_sense(struct nouveau_gpio *gpio, int line)
+{
+	return !!(nv_rd32(gpio, 0x00d610 + (line * 4)) & 0x00004000);
+}
+
+static int
+nvd0_gpio_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+	       struct nouveau_oclass *oclass, void *data, u32 size,
+	       struct nouveau_object **pobject)
+{
+	struct nvd0_gpio_priv *priv;
+	int ret;
+
+	ret = nouveau_gpio_create(parent, engine, oclass, &priv);
+	*pobject = nv_object(priv);
+	if (ret)
+		return ret;
+
+	priv->base.reset = nvd0_gpio_reset;
+	priv->base.drive = nvd0_gpio_drive;
+	priv->base.sense = nvd0_gpio_sense;
+	priv->base.irq_enable = nv50_gpio_irq_enable;
+	nv_subdev(priv)->intr = nv50_gpio_intr;
+	return 0;
+}
+
+struct nouveau_oclass
+nvd0_gpio_oclass = {
+	.handle = NV_SUBDEV(GPIO, 0xd0),
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nvd0_gpio_ctor,
+		.dtor = nv50_gpio_dtor,
+		.init = nv50_gpio_init,
+		.fini = nv50_gpio_fini,
+	},
+};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/i2c/aux.c b/drivers/gpu/drm/nouveau/core/subdev/i2c/aux.c
new file mode 100644
index 000000000000..fe1ebf199ba9
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/i2c/aux.c
@@ -0,0 +1,212 @@
+/*
+ * Copyright 2009 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <subdev/i2c.h>
+
+/******************************************************************************
+ * aux channel util functions
+ *****************************************************************************/
+#define AUX_DBG(fmt, args...) nv_debug(aux, "AUXCH(%d): " fmt, ch, ##args)
+#define AUX_ERR(fmt, args...) nv_error(aux, "AUXCH(%d): " fmt, ch, ##args)
+
+static void
+auxch_fini(struct nouveau_i2c *aux, int ch)
+{
+	nv_mask(aux, 0x00e4e4 + (ch * 0x50), 0x00310000, 0x00000000);
+}
+
+static int
+auxch_init(struct nouveau_i2c *aux, int ch)
+{
+	const u32 unksel = 1; /* nfi which to use, or if it matters.. */
+	const u32 ureq = unksel ? 0x00100000 : 0x00200000;
+	const u32 urep = unksel ? 0x01000000 : 0x02000000;
+	u32 ctrl, timeout;
+
+	/* wait up to 1ms for any previous transaction to be done... */
+	timeout = 1000;
+	do {
+		ctrl = nv_rd32(aux, 0x00e4e4 + (ch * 0x50));
+		udelay(1);
+		if (!timeout--) {
+			AUX_ERR("begin idle timeout 0x%08x", ctrl);
+			return -EBUSY;
+		}
+	} while (ctrl & 0x03010000);
+
+	/* set some magic, and wait up to 1ms for it to appear */
+	nv_mask(aux, 0x00e4e4 + (ch * 0x50), 0x00300000, ureq);
+	timeout = 1000;
+	do {
+		ctrl = nv_rd32(aux, 0x00e4e4 + (ch * 0x50));
+		udelay(1);
+		if (!timeout--) {
+			AUX_ERR("magic wait 0x%08x\n", ctrl);
+			auxch_fini(aux, ch);
+			return -EBUSY;
+		}
+	} while ((ctrl & 0x03000000) != urep);
+
+	return 0;
+}
+
+static int
+auxch_tx(struct nouveau_i2c *aux, int ch, u8 type, u32 addr, u8 *data, u8 size)
+{
+	u32 ctrl, stat, timeout, retries;
+	u32 xbuf[4] = {};
+	int ret, i;
+
+	AUX_DBG("%d: 0x%08x %d\n", type, addr, size);
+
+	ret = auxch_init(aux, ch);
+	if (ret)
+		goto out;
+
+	stat = nv_rd32(aux, 0x00e4e8 + (ch * 0x50));
+	if (!(stat & 0x10000000)) {
+		AUX_DBG("sink not detected\n");
+		ret = -ENXIO;
+		goto out;
+	}
+
+	if (!(type & 1)) {
+		memcpy(xbuf, data, size);
+		for (i = 0; i < 16; i += 4) {
+			AUX_DBG("wr 0x%08x\n", xbuf[i / 4]);
+			nv_wr32(aux, 0x00e4c0 + (ch * 0x50) + i, xbuf[i / 4]);
+		}
+	}
+
+	ctrl  = nv_rd32(aux, 0x00e4e4 + (ch * 0x50));
+	ctrl &= ~0x0001f0ff;
+	ctrl |= type << 12;
+	ctrl |= size - 1;
+	nv_wr32(aux, 0x00e4e0 + (ch * 0x50), addr);
+
+	/* retry transaction a number of times on failure... */
+	ret = -EREMOTEIO;
+	for (retries = 0; retries < 32; retries++) {
+		/* reset, and delay a while if this is a retry */
+		nv_wr32(aux, 0x00e4e4 + (ch * 0x50), 0x80000000 | ctrl);
+		nv_wr32(aux, 0x00e4e4 + (ch * 0x50), 0x00000000 | ctrl);
+		if (retries)
+			udelay(400);
+
+		/* transaction request, wait up to 1ms for it to complete */
+		nv_wr32(aux, 0x00e4e4 + (ch * 0x50), 0x00010000 | ctrl);
+
+		timeout = 1000;
+		do {
+			ctrl = nv_rd32(aux, 0x00e4e4 + (ch * 0x50));
+			udelay(1);
+			if (!timeout--) {
+				AUX_ERR("tx req timeout 0x%08x\n", ctrl);
+				goto out;
+			}
+		} while (ctrl & 0x00010000);
+
+		/* read status, and check if transaction completed ok */
+		stat = nv_mask(aux, 0x00e4e8 + (ch * 0x50), 0, 0);
+		if (!(stat & 0x000f0f00)) {
+			ret = 0;
+			break;
+		}
+
+		AUX_DBG("%02d 0x%08x 0x%08x\n", retries, ctrl, stat);
+	}
+
+	if (type & 1) {
+		for (i = 0; i < 16; i += 4) {
+			xbuf[i / 4] = nv_rd32(aux, 0x00e4d0 + (ch * 0x50) + i);
+			AUX_DBG("rd 0x%08x\n", xbuf[i / 4]);
+		}
+		memcpy(data, xbuf, size);
+	}
+
+out:
+	auxch_fini(aux, ch);
+	return ret;
+}
+
+int
+nv_rdaux(struct nouveau_i2c_port *auxch, u32 addr, u8 *data, u8 size)
+{
+	return auxch_tx(auxch->i2c, auxch->drive, 9, addr, data, size);
+}
+
+int
+nv_wraux(struct nouveau_i2c_port *auxch, u32 addr, u8 *data, u8 size)
+{
+	return auxch_tx(auxch->i2c, auxch->drive, 8, addr, data, size);
+}
+
+static int
+aux_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
+{
+	struct nouveau_i2c_port *auxch = (struct nouveau_i2c_port *)adap;
+	struct i2c_msg *msg = msgs;
+	int ret, mcnt = num;
+
+	while (mcnt--) {
+		u8 remaining = msg->len;
+		u8 *ptr = msg->buf;
+
+		while (remaining) {
+			u8 cnt = (remaining > 16) ? 16 : remaining;
+			u8 cmd;
+
+			if (msg->flags & I2C_M_RD)
+				cmd = 1;
+			else
+				cmd = 0;
+
+			if (mcnt || remaining > 16)
+				cmd |= 4; /* MOT */
+
+			ret = auxch_tx(auxch->i2c, auxch->drive, cmd,
+				       msg->addr, ptr, cnt);
+			if (ret < 0)
+				return ret;
+
+			ptr += cnt;
+			remaining -= cnt;
+		}
+
+		msg++;
+	}
+
+	return num;
+}
+
+static u32
+aux_func(struct i2c_adapter *adap)
+{
+	return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
+}
+
+const struct i2c_algorithm nouveau_i2c_aux_algo = {
+	.master_xfer = aux_xfer,
+	.functionality = aux_func
+};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/i2c/base.c b/drivers/gpu/drm/nouveau/core/subdev/i2c/base.c
new file mode 100644
index 000000000000..3d2c88310f98
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/i2c/base.c
@@ -0,0 +1,407 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include "core/option.h"
+
+#include "subdev/i2c.h"
+#include "subdev/vga.h"
+
+int
+nv_rdi2cr(struct nouveau_i2c_port *port, u8 addr, u8 reg)
+{
+	u8 val;
+	struct i2c_msg msgs[] = {
+		{ .addr = addr, .flags = 0, .len = 1, .buf = &reg },
+		{ .addr = addr, .flags = I2C_M_RD, .len = 1, .buf = &val },
+	};
+
+	int ret = i2c_transfer(&port->adapter, msgs, 2);
+	if (ret != 2)
+		return -EIO;
+
+	return val;
+}
+
+int
+nv_wri2cr(struct nouveau_i2c_port *port, u8 addr, u8 reg, u8 val)
+{
+	struct i2c_msg msgs[] = {
+		{ .addr = addr, .flags = 0, .len = 1, .buf = &reg },
+		{ .addr = addr, .flags = 0, .len = 1, .buf = &val },
+	};
+
+	int ret = i2c_transfer(&port->adapter, msgs, 2);
+	if (ret != 2)
+		return -EIO;
+
+	return 0;
+}
+
+bool
+nv_probe_i2c(struct nouveau_i2c_port *port, u8 addr)
+{
+	u8 buf[] = { 0 };
+	struct i2c_msg msgs[] = {
+		{
+			.addr = addr,
+			.flags = 0,
+			.len = 1,
+			.buf = buf,
+		},
+		{
+			.addr = addr,
+			.flags = I2C_M_RD,
+			.len = 1,
+			.buf = buf,
+		}
+	};
+
+	return i2c_transfer(&port->adapter, msgs, 2) == 2;
+}
+
+static struct nouveau_i2c_port *
+nouveau_i2c_find(struct nouveau_i2c *i2c, u8 index)
+{
+	struct nouveau_bios *bios = nouveau_bios(i2c);
+	struct nouveau_i2c_port *port;
+
+	if (index == NV_I2C_DEFAULT(0) ||
+	    index == NV_I2C_DEFAULT(1)) {
+		u8  ver, hdr, cnt, len;
+		u16 i2c = dcb_i2c_table(bios, &ver, &hdr, &cnt, &len);
+		if (i2c && ver >= 0x30) {
+			u8 auxidx = nv_ro08(bios, i2c + 4);
+			if (index == NV_I2C_DEFAULT(0))
+				index = (auxidx & 0x0f) >> 0;
+			else
+				index = (auxidx & 0xf0) >> 4;
+		} else {
+			index = 2;
+		}
+	}
+
+	list_for_each_entry(port, &i2c->ports, head) {
+		if (port->index == index)
+			break;
+	}
+
+	if (&port->head == &i2c->ports)
+		return NULL;
+
+	if (nv_device(i2c)->card_type >= NV_50 && (port->dcb & 0x00000100)) {
+		u32 reg = 0x00e500, val;
+		if (port->type == 6) {
+			reg += port->drive * 0x50;
+			val  = 0x2002;
+		} else {
+			reg += ((port->dcb & 0x1e00) >> 9) * 0x50;
+			val  = 0xe001;
+		}
+
+		/* nfi, but neither auxch or i2c work if it's 1 */
+		nv_mask(i2c, reg + 0x0c, 0x00000001, 0x00000000);
+		/* nfi, but switches auxch vs normal i2c */
+		nv_mask(i2c, reg + 0x00, 0x0000f003, val);
+	}
+
+	return port;
+}
+
+static int
+nouveau_i2c_identify(struct nouveau_i2c *i2c, int index, const char *what,
+		     struct i2c_board_info *info,
+		     bool (*match)(struct nouveau_i2c_port *,
+				   struct i2c_board_info *))
+{
+	struct nouveau_i2c_port *port = nouveau_i2c_find(i2c, index);
+	int i;
+
+	if (!port) {
+		nv_debug(i2c, "no bus when probing %s on %d\n", what, index);
+		return -ENODEV;
+	}
+
+	nv_debug(i2c, "probing %ss on bus: %d\n", what, port->index);
+	for (i = 0; info[i].addr; i++) {
+		if (nv_probe_i2c(port, info[i].addr) &&
+		    (!match || match(port, &info[i]))) {
+			nv_info(i2c, "detected %s: %s\n", what, info[i].type);
+			return i;
+		}
+	}
+
+	nv_debug(i2c, "no devices found.\n");
+	return -ENODEV;
+}
+
+void
+nouveau_i2c_drive_scl(void *data, int state)
+{
+	struct nouveau_i2c_port *port = data;
+
+	if (port->type == DCB_I2C_NV04_BIT) {
+		u8 val = nv_rdvgac(port->i2c, 0, port->drive);
+		if (state) val |= 0x20;
+		else	   val &= 0xdf;
+		nv_wrvgac(port->i2c, 0, port->drive, val | 0x01);
+	} else
+	if (port->type == DCB_I2C_NV4E_BIT) {
+		nv_mask(port->i2c, port->drive, 0x2f, state ? 0x21 : 0x01);
+	} else
+	if (port->type == DCB_I2C_NVIO_BIT) {
+		if (state) port->state |= 0x01;
+		else	   port->state &= 0xfe;
+		nv_wr32(port->i2c, port->drive, 4 | port->state);
+	}
+}
+
+void
+nouveau_i2c_drive_sda(void *data, int state)
+{
+	struct nouveau_i2c_port *port = data;
+
+	if (port->type == DCB_I2C_NV04_BIT) {
+		u8 val = nv_rdvgac(port->i2c, 0, port->drive);
+		if (state) val |= 0x10;
+		else	   val &= 0xef;
+		nv_wrvgac(port->i2c, 0, port->drive, val | 0x01);
+	} else
+	if (port->type == DCB_I2C_NV4E_BIT) {
+		nv_mask(port->i2c, port->drive, 0x1f, state ? 0x11 : 0x01);
+	} else
+	if (port->type == DCB_I2C_NVIO_BIT) {
+		if (state) port->state |= 0x02;
+		else	   port->state &= 0xfd;
+		nv_wr32(port->i2c, port->drive, 4 | port->state);
+	}
+}
+
+int
+nouveau_i2c_sense_scl(void *data)
+{
+	struct nouveau_i2c_port *port = data;
+	struct nouveau_device *device = nv_device(port->i2c);
+
+	if (port->type == DCB_I2C_NV04_BIT) {
+		return !!(nv_rdvgac(port->i2c, 0, port->sense) & 0x04);
+	} else
+	if (port->type == DCB_I2C_NV4E_BIT) {
+		return !!(nv_rd32(port->i2c, port->sense) & 0x00040000);
+	} else
+	if (port->type == DCB_I2C_NVIO_BIT) {
+		if (device->card_type < NV_D0)
+			return !!(nv_rd32(port->i2c, port->sense) & 0x01);
+		else
+			return !!(nv_rd32(port->i2c, port->sense) & 0x10);
+	}
+
+	return 0;
+}
+
+int
+nouveau_i2c_sense_sda(void *data)
+{
+	struct nouveau_i2c_port *port = data;
+	struct nouveau_device *device = nv_device(port->i2c);
+
+	if (port->type == DCB_I2C_NV04_BIT) {
+		return !!(nv_rdvgac(port->i2c, 0, port->sense) & 0x08);
+	} else
+	if (port->type == DCB_I2C_NV4E_BIT) {
+		return !!(nv_rd32(port->i2c, port->sense) & 0x00080000);
+	} else
+	if (port->type == DCB_I2C_NVIO_BIT) {
+		if (device->card_type < NV_D0)
+			return !!(nv_rd32(port->i2c, port->sense) & 0x02);
+		else
+			return !!(nv_rd32(port->i2c, port->sense) & 0x20);
+	}
+
+	return 0;
+}
+
+static const u32 nv50_i2c_port[] = {
+	0x00e138, 0x00e150, 0x00e168, 0x00e180,
+	0x00e254, 0x00e274, 0x00e764, 0x00e780,
+	0x00e79c, 0x00e7b8
+};
+
+static int
+nouveau_i2c_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+		 struct nouveau_oclass *oclass, void *data, u32 size,
+		 struct nouveau_object **pobject)
+{
+	struct nouveau_device *device = nv_device(parent);
+	struct nouveau_bios *bios = nouveau_bios(parent);
+	struct nouveau_i2c_port *port;
+	struct nouveau_i2c *i2c;
+	struct dcb_i2c_entry info;
+	int ret, i = -1;
+
+	ret = nouveau_subdev_create(parent, engine, oclass, 0,
+				    "I2C", "i2c", &i2c);
+	*pobject = nv_object(i2c);
+	if (ret)
+		return ret;
+
+	i2c->find = nouveau_i2c_find;
+	i2c->identify = nouveau_i2c_identify;
+	INIT_LIST_HEAD(&i2c->ports);
+
+	while (!dcb_i2c_parse(bios, ++i, &info)) {
+		if (info.type == DCB_I2C_UNUSED)
+			continue;
+
+		port = kzalloc(sizeof(*port), GFP_KERNEL);
+		if (!port) {
+			nv_error(i2c, "failed port memory alloc at %d\n", i);
+			break;
+		}
+
+		port->type = info.type;
+		switch (port->type) {
+		case DCB_I2C_NV04_BIT:
+			port->drive = info.drive;
+			port->sense = info.sense;
+			break;
+		case DCB_I2C_NV4E_BIT:
+			port->drive = 0x600800 + info.drive;
+			port->sense = port->drive;
+			break;
+		case DCB_I2C_NVIO_BIT:
+			port->drive = info.drive & 0x0f;
+			if (device->card_type < NV_D0) {
+				if (info.drive >= ARRAY_SIZE(nv50_i2c_port))
+					break;
+				port->drive = nv50_i2c_port[port->drive];
+				port->sense = port->drive;
+			} else {
+				port->drive = 0x00d014 + (port->drive * 0x20);
+				port->sense = port->drive;
+			}
+			break;
+		case DCB_I2C_NVIO_AUX:
+			port->drive = info.drive & 0x0f;
+			port->sense = port->drive;
+			port->adapter.algo = &nouveau_i2c_aux_algo;
+			break;
+		default:
+			break;
+		}
+
+		if (!port->adapter.algo && !port->drive) {
+			nv_error(i2c, "I2C%d: type %d index %x/%x unknown\n",
+				 i, port->type, port->drive, port->sense);
+			kfree(port);
+			continue;
+		}
+
+		snprintf(port->adapter.name, sizeof(port->adapter.name),
+			 "nouveau-%s-%d", device->name, i);
+		port->adapter.owner = THIS_MODULE;
+		port->adapter.dev.parent = &device->pdev->dev;
+		port->i2c = i2c;
+		port->index = i;
+		port->dcb = info.data;
+		i2c_set_adapdata(&port->adapter, i2c);
+
+		if (port->adapter.algo != &nouveau_i2c_aux_algo) {
+			nouveau_i2c_drive_scl(port, 0);
+			nouveau_i2c_drive_sda(port, 1);
+			nouveau_i2c_drive_scl(port, 1);
+
+#ifdef CONFIG_NOUVEAU_I2C_INTERNAL_DEFAULT
+			if (nouveau_boolopt(device->cfgopt, "NvI2C", true)) {
+#else
+			if (nouveau_boolopt(device->cfgopt, "NvI2C", false)) {
+#endif
+				port->adapter.algo = &nouveau_i2c_bit_algo;
+				ret = i2c_add_adapter(&port->adapter);
+			} else {
+				port->adapter.algo_data = &port->bit;
+				port->bit.udelay = 10;
+				port->bit.timeout = usecs_to_jiffies(2200);
+				port->bit.data = port;
+				port->bit.setsda = nouveau_i2c_drive_sda;
+				port->bit.setscl = nouveau_i2c_drive_scl;
+				port->bit.getsda = nouveau_i2c_sense_sda;
+				port->bit.getscl = nouveau_i2c_sense_scl;
+				ret = i2c_bit_add_bus(&port->adapter);
+			}
+		} else {
+			port->adapter.algo = &nouveau_i2c_aux_algo;
+			ret = i2c_add_adapter(&port->adapter);
+		}
+
+		if (ret) {
+			nv_error(i2c, "I2C%d: failed register: %d\n", i, ret);
+			kfree(port);
+			continue;
+		}
+
+		list_add_tail(&port->head, &i2c->ports);
+	}
+
+	return 0;
+}
+
+static void
+nouveau_i2c_dtor(struct nouveau_object *object)
+{
+	struct nouveau_i2c *i2c = (void *)object;
+	struct nouveau_i2c_port *port, *temp;
+
+	list_for_each_entry_safe(port, temp, &i2c->ports, head) {
+		i2c_del_adapter(&port->adapter);
+		list_del(&port->head);
+		kfree(port);
+	}
+
+	nouveau_subdev_destroy(&i2c->base);
+}
+
+static int
+nouveau_i2c_init(struct nouveau_object *object)
+{
+	struct nouveau_i2c *i2c = (void *)object;
+	return nouveau_subdev_init(&i2c->base);
+}
+
+static int
+nouveau_i2c_fini(struct nouveau_object *object, bool suspend)
+{
+	struct nouveau_i2c *i2c = (void *)object;
+	return nouveau_subdev_fini(&i2c->base, suspend);
+}
+
+struct nouveau_oclass
+nouveau_i2c_oclass = {
+	.handle = NV_SUBDEV(I2C, 0x00),
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nouveau_i2c_ctor,
+		.dtor = nouveau_i2c_dtor,
+		.init = nouveau_i2c_init,
+		.fini = nouveau_i2c_fini,
+	},
+};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/i2c/bit.c b/drivers/gpu/drm/nouveau/core/subdev/i2c/bit.c
new file mode 100644
index 000000000000..1c4c9a5c8e2e
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/i2c/bit.c
@@ -0,0 +1,230 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include "subdev/i2c.h"
+
+#ifdef CONFIG_NOUVEAU_I2C_INTERNAL
+#define T_TIMEOUT  2200000
+#define T_RISEFALL 1000
+#define T_HOLD     5000
+
+static inline void
+i2c_drive_scl(struct nouveau_i2c_port *port, int state)
+{
+	nouveau_i2c_drive_scl(port, state);
+}
+
+static inline void
+i2c_drive_sda(struct nouveau_i2c_port *port, int state)
+{
+	nouveau_i2c_drive_sda(port, state);
+}
+
+static inline int
+i2c_sense_scl(struct nouveau_i2c_port *port)
+{
+	return nouveau_i2c_sense_scl(port);
+}
+
+static inline int
+i2c_sense_sda(struct nouveau_i2c_port *port)
+{
+	return nouveau_i2c_sense_sda(port);
+}
+
+static void
+i2c_delay(struct nouveau_i2c_port *port, u32 nsec)
+{
+	udelay((nsec + 500) / 1000);
+}
+
+static bool
+i2c_raise_scl(struct nouveau_i2c_port *port)
+{
+	u32 timeout = T_TIMEOUT / T_RISEFALL;
+
+	i2c_drive_scl(port, 1);
+	do {
+		i2c_delay(port, T_RISEFALL);
+	} while (!i2c_sense_scl(port) && --timeout);
+
+	return timeout != 0;
+}
+
+static int
+i2c_start(struct nouveau_i2c_port *port)
+{
+	int ret = 0;
+
+	port->state  = i2c_sense_scl(port);
+	port->state |= i2c_sense_sda(port) << 1;
+	if (port->state != 3) {
+		i2c_drive_scl(port, 0);
+		i2c_drive_sda(port, 1);
+		if (!i2c_raise_scl(port))
+			ret = -EBUSY;
+	}
+
+	i2c_drive_sda(port, 0);
+	i2c_delay(port, T_HOLD);
+	i2c_drive_scl(port, 0);
+	i2c_delay(port, T_HOLD);
+	return ret;
+}
+
+static void
+i2c_stop(struct nouveau_i2c_port *port)
+{
+	i2c_drive_scl(port, 0);
+	i2c_drive_sda(port, 0);
+	i2c_delay(port, T_RISEFALL);
+
+	i2c_drive_scl(port, 1);
+	i2c_delay(port, T_HOLD);
+	i2c_drive_sda(port, 1);
+	i2c_delay(port, T_HOLD);
+}
+
+static int
+i2c_bitw(struct nouveau_i2c_port *port, int sda)
+{
+	i2c_drive_sda(port, sda);
+	i2c_delay(port, T_RISEFALL);
+
+	if (!i2c_raise_scl(port))
+		return -ETIMEDOUT;
+	i2c_delay(port, T_HOLD);
+
+	i2c_drive_scl(port, 0);
+	i2c_delay(port, T_HOLD);
+	return 0;
+}
+
+static int
+i2c_bitr(struct nouveau_i2c_port *port)
+{
+	int sda;
+
+	i2c_drive_sda(port, 1);
+	i2c_delay(port, T_RISEFALL);
+
+	if (!i2c_raise_scl(port))
+		return -ETIMEDOUT;
+	i2c_delay(port, T_HOLD);
+
+	sda = i2c_sense_sda(port);
+
+	i2c_drive_scl(port, 0);
+	i2c_delay(port, T_HOLD);
+	return sda;
+}
+
+static int
+i2c_get_byte(struct nouveau_i2c_port *port, u8 *byte, bool last)
+{
+	int i, bit;
+
+	*byte = 0;
+	for (i = 7; i >= 0; i--) {
+		bit = i2c_bitr(port);
+		if (bit < 0)
+			return bit;
+		*byte |= bit << i;
+	}
+
+	return i2c_bitw(port, last ? 1 : 0);
+}
+
+static int
+i2c_put_byte(struct nouveau_i2c_port *port, u8 byte)
+{
+	int i, ret;
+	for (i = 7; i >= 0; i--) {
+		ret = i2c_bitw(port, !!(byte & (1 << i)));
+		if (ret < 0)
+			return ret;
+	}
+
+	ret = i2c_bitr(port);
+	if (ret == 1) /* nack */
+		ret = -EIO;
+	return ret;
+}
+
+static int
+i2c_addr(struct nouveau_i2c_port *port, struct i2c_msg *msg)
+{
+	u32 addr = msg->addr << 1;
+	if (msg->flags & I2C_M_RD)
+		addr |= 1;
+	return i2c_put_byte(port, addr);
+}
+
+static int
+i2c_bit_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
+{
+	struct nouveau_i2c_port *port = (struct nouveau_i2c_port *)adap;
+	struct i2c_msg *msg = msgs;
+	int ret = 0, mcnt = num;
+
+	while (!ret && mcnt--) {
+		u8 remaining = msg->len;
+		u8 *ptr = msg->buf;
+
+		ret = i2c_start(port);
+		if (ret == 0)
+			ret = i2c_addr(port, msg);
+
+		if (msg->flags & I2C_M_RD) {
+			while (!ret && remaining--)
+				ret = i2c_get_byte(port, ptr++, !remaining);
+		} else {
+			while (!ret && remaining--)
+				ret = i2c_put_byte(port, *ptr++);
+		}
+
+		msg++;
+	}
+
+	i2c_stop(port);
+	return (ret < 0) ? ret : num;
+}
+#else
+static int
+i2c_bit_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
+{
+	return -ENODEV;
+}
+#endif
+
+static u32
+i2c_bit_func(struct i2c_adapter *adap)
+{
+	return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
+}
+
+const struct i2c_algorithm nouveau_i2c_bit_algo = {
+	.master_xfer = i2c_bit_xfer,
+	.functionality = i2c_bit_func
+};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/ibus/nvc0.c b/drivers/gpu/drm/nouveau/core/subdev/ibus/nvc0.c
new file mode 100644
index 000000000000..4e977ff27e44
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/ibus/nvc0.c
@@ -0,0 +1,123 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <subdev/ibus.h>
+
+struct nvc0_ibus_priv {
+	struct nouveau_ibus base;
+};
+
+static void
+nvc0_ibus_intr_hub(struct nvc0_ibus_priv *priv, int i)
+{
+	u32 addr = nv_rd32(priv, 0x122120 + (i * 0x0400));
+	u32 data = nv_rd32(priv, 0x122124 + (i * 0x0400));
+	u32 stat = nv_rd32(priv, 0x122128 + (i * 0x0400));
+	nv_error(priv, "HUB%d: 0x%06x 0x%08x (0x%08x)\n", i, addr, data, stat);
+	nv_mask(priv, 0x122128 + (i * 0x0400), 0x00000200, 0x00000000);
+}
+
+static void
+nvc0_ibus_intr_rop(struct nvc0_ibus_priv *priv, int i)
+{
+	u32 addr = nv_rd32(priv, 0x124120 + (i * 0x0400));
+	u32 data = nv_rd32(priv, 0x124124 + (i * 0x0400));
+	u32 stat = nv_rd32(priv, 0x124128 + (i * 0x0400));
+	nv_error(priv, "ROP%d: 0x%06x 0x%08x (0x%08x)\n", i, addr, data, stat);
+	nv_mask(priv, 0x124128 + (i * 0x0400), 0x00000200, 0x00000000);
+}
+
+static void
+nvc0_ibus_intr_gpc(struct nvc0_ibus_priv *priv, int i)
+{
+	u32 addr = nv_rd32(priv, 0x128120 + (i * 0x0400));
+	u32 data = nv_rd32(priv, 0x128124 + (i * 0x0400));
+	u32 stat = nv_rd32(priv, 0x128128 + (i * 0x0400));
+	nv_error(priv, "GPC%d: 0x%06x 0x%08x (0x%08x)\n", i, addr, data, stat);
+	nv_mask(priv, 0x128128 + (i * 0x0400), 0x00000200, 0x00000000);
+}
+
+static void
+nvc0_ibus_intr(struct nouveau_subdev *subdev)
+{
+	struct nvc0_ibus_priv *priv = (void *)subdev;
+	u32 intr0 = nv_rd32(priv, 0x121c58);
+	u32 intr1 = nv_rd32(priv, 0x121c5c);
+	u32 hubnr = nv_rd32(priv, 0x121c70);
+	u32 ropnr = nv_rd32(priv, 0x121c74);
+	u32 gpcnr = nv_rd32(priv, 0x121c78);
+	u32 i;
+
+	for (i = 0; (intr0 & 0x0000ff00) && i < hubnr; i++) {
+		u32 stat = 0x00000100 << i;
+		if (intr0 & stat) {
+			nvc0_ibus_intr_hub(priv, i);
+			intr0 &= ~stat;
+		}
+	}
+
+	for (i = 0; (intr0 & 0xffff0000) && i < ropnr; i++) {
+		u32 stat = 0x00010000 << i;
+		if (intr0 & stat) {
+			nvc0_ibus_intr_rop(priv, i);
+			intr0 &= ~stat;
+		}
+	}
+
+	for (i = 0; intr1 && i < gpcnr; i++) {
+		u32 stat = 0x00000001 << i;
+		if (intr1 & stat) {
+			nvc0_ibus_intr_gpc(priv, i);
+			intr1 &= ~stat;
+		}
+	}
+}
+
+static int
+nvc0_ibus_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+	       struct nouveau_oclass *oclass, void *data, u32 size,
+	       struct nouveau_object **pobject)
+{
+	struct nvc0_ibus_priv *priv;
+	int ret;
+
+	ret = nouveau_ibus_create(parent, engine, oclass, &priv);
+	*pobject = nv_object(priv);
+	if (ret)
+		return ret;
+
+	nv_subdev(priv)->intr = nvc0_ibus_intr;
+	return 0;
+}
+
+struct nouveau_oclass
+nvc0_ibus_oclass = {
+	.handle = NV_SUBDEV(IBUS, 0xc0),
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nvc0_ibus_ctor,
+		.dtor = _nouveau_ibus_dtor,
+		.init = _nouveau_ibus_init,
+		.fini = _nouveau_ibus_fini,
+	},
+};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/ibus/nve0.c b/drivers/gpu/drm/nouveau/core/subdev/ibus/nve0.c
new file mode 100644
index 000000000000..7120124dceac
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/ibus/nve0.c
@@ -0,0 +1,123 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <subdev/ibus.h>
+
+struct nve0_ibus_priv {
+	struct nouveau_ibus base;
+};
+
+static void
+nve0_ibus_intr_hub(struct nve0_ibus_priv *priv, int i)
+{
+	u32 addr = nv_rd32(priv, 0x122120 + (i * 0x0800));
+	u32 data = nv_rd32(priv, 0x122124 + (i * 0x0800));
+	u32 stat = nv_rd32(priv, 0x122128 + (i * 0x0800));
+	nv_error(priv, "HUB%d: 0x%06x 0x%08x (0x%08x)\n", i, addr, data, stat);
+	nv_mask(priv, 0x122128 + (i * 0x0800), 0x00000200, 0x00000000);
+}
+
+static void
+nve0_ibus_intr_rop(struct nve0_ibus_priv *priv, int i)
+{
+	u32 addr = nv_rd32(priv, 0x124120 + (i * 0x0800));
+	u32 data = nv_rd32(priv, 0x124124 + (i * 0x0800));
+	u32 stat = nv_rd32(priv, 0x124128 + (i * 0x0800));
+	nv_error(priv, "ROP%d: 0x%06x 0x%08x (0x%08x)\n", i, addr, data, stat);
+	nv_mask(priv, 0x124128 + (i * 0x0800), 0x00000200, 0x00000000);
+}
+
+static void
+nve0_ibus_intr_gpc(struct nve0_ibus_priv *priv, int i)
+{
+	u32 addr = nv_rd32(priv, 0x128120 + (i * 0x0800));
+	u32 data = nv_rd32(priv, 0x128124 + (i * 0x0800));
+	u32 stat = nv_rd32(priv, 0x128128 + (i * 0x0800));
+	nv_error(priv, "GPC%d: 0x%06x 0x%08x (0x%08x)\n", i, addr, data, stat);
+	nv_mask(priv, 0x128128 + (i * 0x0800), 0x00000200, 0x00000000);
+}
+
+static void
+nve0_ibus_intr(struct nouveau_subdev *subdev)
+{
+	struct nve0_ibus_priv *priv = (void *)subdev;
+	u32 intr0 = nv_rd32(priv, 0x120058);
+	u32 intr1 = nv_rd32(priv, 0x12005c);
+	u32 hubnr = nv_rd32(priv, 0x120070);
+	u32 ropnr = nv_rd32(priv, 0x120074);
+	u32 gpcnr = nv_rd32(priv, 0x120078);
+	u32 i;
+
+	for (i = 0; (intr0 & 0x0000ff00) && i < hubnr; i++) {
+		u32 stat = 0x00000100 << i;
+		if (intr0 & stat) {
+			nve0_ibus_intr_hub(priv, i);
+			intr0 &= ~stat;
+		}
+	}
+
+	for (i = 0; (intr0 & 0xffff0000) && i < ropnr; i++) {
+		u32 stat = 0x00010000 << i;
+		if (intr0 & stat) {
+			nve0_ibus_intr_rop(priv, i);
+			intr0 &= ~stat;
+		}
+	}
+
+	for (i = 0; intr1 && i < gpcnr; i++) {
+		u32 stat = 0x00000001 << i;
+		if (intr1 & stat) {
+			nve0_ibus_intr_gpc(priv, i);
+			intr1 &= ~stat;
+		}
+	}
+}
+
+static int
+nve0_ibus_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+	       struct nouveau_oclass *oclass, void *data, u32 size,
+	       struct nouveau_object **pobject)
+{
+	struct nve0_ibus_priv *priv;
+	int ret;
+
+	ret = nouveau_ibus_create(parent, engine, oclass, &priv);
+	*pobject = nv_object(priv);
+	if (ret)
+		return ret;
+
+	nv_subdev(priv)->intr = nve0_ibus_intr;
+	return 0;
+}
+
+struct nouveau_oclass
+nve0_ibus_oclass = {
+	.handle = NV_SUBDEV(IBUS, 0xe0),
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nve0_ibus_ctor,
+		.dtor = _nouveau_ibus_dtor,
+		.init = _nouveau_ibus_init,
+		.fini = _nouveau_ibus_fini,
+	},
+};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/instmem/base.c b/drivers/gpu/drm/nouveau/core/subdev/instmem/base.c
new file mode 100644
index 000000000000..1188227ca6aa
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/instmem/base.c
@@ -0,0 +1,135 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <subdev/instmem.h>
+
+int
+nouveau_instobj_create_(struct nouveau_object *parent,
+			struct nouveau_object *engine,
+			struct nouveau_oclass *oclass,
+			int length, void **pobject)
+{
+	struct nouveau_instmem *imem = (void *)engine;
+	struct nouveau_instobj *iobj;
+	int ret;
+
+	ret = nouveau_object_create_(parent, engine, oclass, NV_MEMOBJ_CLASS,
+				     length, pobject);
+	iobj = *pobject;
+	if (ret)
+		return ret;
+
+	list_add(&iobj->head, &imem->list);
+	return 0;
+}
+
+void
+nouveau_instobj_destroy(struct nouveau_instobj *iobj)
+{
+	if (iobj->head.prev)
+		list_del(&iobj->head);
+	return nouveau_object_destroy(&iobj->base);
+}
+
+void
+_nouveau_instobj_dtor(struct nouveau_object *object)
+{
+	struct nouveau_instobj *iobj = (void *)object;
+	return nouveau_instobj_destroy(iobj);
+}
+
+int
+nouveau_instmem_create_(struct nouveau_object *parent,
+			struct nouveau_object *engine,
+			struct nouveau_oclass *oclass,
+			int length, void **pobject)
+{
+	struct nouveau_instmem *imem;
+	int ret;
+
+	ret = nouveau_subdev_create_(parent, engine, oclass, 0,
+				     "INSTMEM", "instmem", length, pobject);
+	imem = *pobject;
+	if (ret)
+		return ret;
+
+	INIT_LIST_HEAD(&imem->list);
+	return 0;
+}
+
+int
+nouveau_instmem_init(struct nouveau_instmem *imem)
+{
+	struct nouveau_instobj *iobj;
+	int ret, i;
+
+	ret = nouveau_subdev_init(&imem->base);
+	if (ret)
+		return ret;
+
+	list_for_each_entry(iobj, &imem->list, head) {
+		if (iobj->suspend) {
+			for (i = 0; i < iobj->size; i += 4)
+				nv_wo32(iobj, i, iobj->suspend[i / 4]);
+			vfree(iobj->suspend);
+			iobj->suspend = NULL;
+		}
+	}
+
+	return 0;
+}
+
+int
+nouveau_instmem_fini(struct nouveau_instmem *imem, bool suspend)
+{
+	struct nouveau_instobj *iobj;
+	int i;
+
+	if (suspend) {
+		list_for_each_entry(iobj, &imem->list, head) {
+			iobj->suspend = vmalloc(iobj->size);
+			if (iobj->suspend) {
+				for (i = 0; i < iobj->size; i += 4)
+					iobj->suspend[i / 4] = nv_ro32(iobj, i);
+			} else
+				return -ENOMEM;
+		}
+	}
+
+	return nouveau_subdev_fini(&imem->base, suspend);
+}
+
+int
+_nouveau_instmem_init(struct nouveau_object *object)
+{
+	struct nouveau_instmem *imem = (void *)object;
+	return nouveau_instmem_init(imem);
+}
+
+int
+_nouveau_instmem_fini(struct nouveau_object *object, bool suspend)
+{
+	struct nouveau_instmem *imem = (void *)object;
+	return nouveau_instmem_fini(imem, suspend);
+}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/instmem/nv04.c b/drivers/gpu/drm/nouveau/core/subdev/instmem/nv04.c
new file mode 100644
index 000000000000..ba4d28b50368
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/instmem/nv04.c
@@ -0,0 +1,198 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <subdev/fb.h>
+
+#include "nv04.h"
+
+static int
+nv04_instobj_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+		  struct nouveau_oclass *oclass, void *data, u32 size,
+		  struct nouveau_object **pobject)
+{
+	struct nv04_instmem_priv *priv = (void *)engine;
+	struct nv04_instobj_priv *node;
+	int ret, align;
+
+	align = (unsigned long)data;
+	if (!align)
+		align = 1;
+
+	ret = nouveau_instobj_create(parent, engine, oclass, &node);
+	*pobject = nv_object(node);
+	if (ret)
+		return ret;
+
+	ret = nouveau_mm_head(&priv->heap, 1, size, size, align, &node->mem);
+	if (ret)
+		return ret;
+
+	node->base.addr = node->mem->offset;
+	node->base.size = node->mem->length;
+	return 0;
+}
+
+static void
+nv04_instobj_dtor(struct nouveau_object *object)
+{
+	struct nv04_instmem_priv *priv = (void *)object->engine;
+	struct nv04_instobj_priv *node = (void *)object;
+	nouveau_mm_free(&priv->heap, &node->mem);
+	nouveau_instobj_destroy(&node->base);
+}
+
+static u32
+nv04_instobj_rd32(struct nouveau_object *object, u32 addr)
+{
+	struct nv04_instobj_priv *node = (void *)object;
+	return nv_ro32(object->engine, node->mem->offset + addr);
+}
+
+static void
+nv04_instobj_wr32(struct nouveau_object *object, u32 addr, u32 data)
+{
+	struct nv04_instobj_priv *node = (void *)object;
+	nv_wo32(object->engine, node->mem->offset + addr, data);
+}
+
+static struct nouveau_oclass
+nv04_instobj_oclass = {
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nv04_instobj_ctor,
+		.dtor = nv04_instobj_dtor,
+		.init = _nouveau_instobj_init,
+		.fini = _nouveau_instobj_fini,
+		.rd32 = nv04_instobj_rd32,
+		.wr32 = nv04_instobj_wr32,
+	},
+};
+
+int
+nv04_instmem_alloc(struct nouveau_instmem *imem, struct nouveau_object *parent,
+		   u32 size, u32 align, struct nouveau_object **pobject)
+{
+	struct nouveau_object *engine = nv_object(imem);
+	struct nv04_instmem_priv *priv = (void *)(imem);
+	int ret;
+
+	ret = nouveau_object_ctor(parent, engine, &nv04_instobj_oclass,
+				  (void *)(unsigned long)align, size, pobject);
+	if (ret)
+		return ret;
+
+	/* INSTMEM itself creates objects to reserve (and preserve across
+	 * suspend/resume) various fixed data locations, each one of these
+	 * takes a reference on INSTMEM itself, causing it to never be
+	 * freed.  We drop all the self-references here to avoid this.
+	 */
+	if (unlikely(!priv->created))
+		atomic_dec(&engine->refcount);
+
+	return 0;
+}
+
+static int
+nv04_instmem_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+		  struct nouveau_oclass *oclass, void *data, u32 size,
+		  struct nouveau_object **pobject)
+{
+	struct nv04_instmem_priv *priv;
+	int ret;
+
+	ret = nouveau_instmem_create(parent, engine, oclass, &priv);
+	*pobject = nv_object(priv);
+	if (ret)
+		return ret;
+
+	/* PRAMIN aperture maps over the end of VRAM, reserve it */
+	priv->base.reserved = 512 * 1024;
+	priv->base.alloc    = nv04_instmem_alloc;
+
+	ret = nouveau_mm_init(&priv->heap, 0, priv->base.reserved, 1);
+	if (ret)
+		return ret;
+
+	/* 0x00000-0x10000: reserve for probable vbios image */
+	ret = nouveau_gpuobj_new(parent, NULL, 0x10000, 0, 0, &priv->vbios);
+	if (ret)
+		return ret;
+
+	/* 0x10000-0x18000: reserve for RAMHT */
+	ret = nouveau_ramht_new(parent, NULL, 0x08000, 0, &priv->ramht);
+	if (ret)
+		return ret;
+
+	/* 0x18000-0x18800: reserve for RAMFC (enough for 32 nv30 channels) */
+	ret = nouveau_gpuobj_new(parent, NULL, 0x00800, 0,
+				 NVOBJ_FLAG_ZERO_ALLOC, &priv->ramfc);
+	if (ret)
+		return ret;
+
+	/* 0x18800-0x18a00: reserve for RAMRO */
+	ret = nouveau_gpuobj_new(parent, NULL, 0x00200, 0, 0, &priv->ramro);
+	if (ret)
+		return ret;
+
+	priv->created = true;
+	return 0;
+}
+
+void
+nv04_instmem_dtor(struct nouveau_object *object)
+{
+	struct nv04_instmem_priv *priv = (void *)object;
+	nouveau_gpuobj_ref(NULL, &priv->ramfc);
+	nouveau_gpuobj_ref(NULL, &priv->ramro);
+	nouveau_ramht_ref(NULL, &priv->ramht);
+	nouveau_gpuobj_ref(NULL, &priv->vbios);
+	nouveau_mm_fini(&priv->heap);
+	if (priv->iomem)
+		iounmap(priv->iomem);
+	nouveau_instmem_destroy(&priv->base);
+}
+
+static u32
+nv04_instmem_rd32(struct nouveau_object *object, u32 addr)
+{
+	return nv_rd32(object, 0x700000 + addr);
+}
+
+static void
+nv04_instmem_wr32(struct nouveau_object *object, u32 addr, u32 data)
+{
+	return nv_wr32(object, 0x700000 + addr, data);
+}
+
+struct nouveau_oclass
+nv04_instmem_oclass = {
+	.handle = NV_SUBDEV(INSTMEM, 0x04),
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nv04_instmem_ctor,
+		.dtor = nv04_instmem_dtor,
+		.init = _nouveau_instmem_init,
+		.fini = _nouveau_instmem_fini,
+		.rd32 = nv04_instmem_rd32,
+		.wr32 = nv04_instmem_wr32,
+	},
+};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/instmem/nv04.h b/drivers/gpu/drm/nouveau/core/subdev/instmem/nv04.h
new file mode 100644
index 000000000000..7983d8d9b358
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/instmem/nv04.h
@@ -0,0 +1,39 @@
+#ifndef __NV04_INSTMEM_H__
+#define __NV04_INSTMEM_H__
+
+#include <core/gpuobj.h>
+#include <core/ramht.h>
+#include <core/mm.h>
+
+#include <subdev/instmem.h>
+
+struct nv04_instmem_priv {
+	struct nouveau_instmem base;
+	bool created;
+
+	void __iomem *iomem;
+	struct nouveau_mm heap;
+
+	struct nouveau_gpuobj *vbios;
+	struct nouveau_ramht  *ramht;
+	struct nouveau_gpuobj *ramro;
+	struct nouveau_gpuobj *ramfc;
+};
+
+static inline struct nv04_instmem_priv *
+nv04_instmem(void *obj)
+{
+	return (void *)nouveau_instmem(obj);
+}
+
+struct nv04_instobj_priv {
+	struct nouveau_instobj base;
+	struct nouveau_mm_node *mem;
+};
+
+void nv04_instmem_dtor(struct nouveau_object *);
+
+int nv04_instmem_alloc(struct nouveau_instmem *, struct nouveau_object *,
+		       u32 size, u32 align, struct nouveau_object **pobject);
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/core/subdev/instmem/nv40.c b/drivers/gpu/drm/nouveau/core/subdev/instmem/nv40.c
new file mode 100644
index 000000000000..73c52ebd5932
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/instmem/nv40.c
@@ -0,0 +1,138 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include "nv04.h"
+
+static inline int
+nv44_graph_class(struct nv04_instmem_priv *priv)
+{
+	if ((nv_device(priv)->chipset & 0xf0) == 0x60)
+		return 1;
+	return !(0x0baf & (1 << (nv_device(priv)->chipset & 0x0f)));
+}
+
+static int
+nv40_instmem_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+		  struct nouveau_oclass *oclass, void *data, u32 size,
+		  struct nouveau_object **pobject)
+{
+	struct nouveau_device *device = nv_device(parent);
+	struct pci_dev *pdev = device->pdev;
+	struct nv04_instmem_priv *priv;
+	int ret, bar, vs;
+
+	ret = nouveau_instmem_create(parent, engine, oclass, &priv);
+	*pobject = nv_object(priv);
+	if (ret)
+		return ret;
+
+	/* map bar */
+	if (pci_resource_len(pdev, 2))
+		bar = 2;
+	else
+		bar = 3;
+
+	priv->iomem = ioremap(pci_resource_start(pdev, bar),
+			      pci_resource_len(pdev, bar));
+	if (!priv->iomem) {
+		nv_error(priv, "unable to map PRAMIN BAR\n");
+		return -EFAULT;
+	}
+
+	/* PRAMIN aperture maps over the end of vram, reserve enough space
+	 * to fit graphics contexts for every channel, the magics come
+	 * from engine/graph/nv40.c
+	 */
+	vs = hweight8((nv_rd32(priv, 0x001540) & 0x0000ff00) >> 8);
+	if      (device->chipset == 0x40) priv->base.reserved = 0x6aa0 * vs;
+	else if (device->chipset  < 0x43) priv->base.reserved = 0x4f00 * vs;
+	else if (nv44_graph_class(priv))  priv->base.reserved = 0x4980 * vs;
+	else				  priv->base.reserved = 0x4a40 * vs;
+	priv->base.reserved += 16 * 1024;
+	priv->base.reserved *= 32;		/* per-channel */
+	priv->base.reserved += 512 * 1024;	/* pci(e)gart table */
+	priv->base.reserved += 512 * 1024;	/* object storage */
+
+	priv->base.reserved = round_up(priv->base.reserved, 4096);
+	priv->base.alloc    = nv04_instmem_alloc;
+
+	ret = nouveau_mm_init(&priv->heap, 0, priv->base.reserved, 1);
+	if (ret)
+		return ret;
+
+	/* 0x00000-0x10000: reserve for probable vbios image */
+	ret = nouveau_gpuobj_new(parent, NULL, 0x10000, 0, 0, &priv->vbios);
+	if (ret)
+		return ret;
+
+	/* 0x10000-0x18000: reserve for RAMHT */
+	ret = nouveau_ramht_new(parent, NULL, 0x08000, 0, &priv->ramht);
+	if (ret)
+		return ret;
+
+	/* 0x18000-0x18200: reserve for RAMRO
+	 * 0x18200-0x20000: padding
+	 */
+	ret = nouveau_gpuobj_new(parent, NULL, 0x08000, 0, 0, &priv->ramro);
+	if (ret)
+		return ret;
+
+	/* 0x20000-0x21000: reserve for RAMFC
+	 * 0x21000-0x40000: padding and some unknown crap
+	 */
+	ret = nouveau_gpuobj_new(parent, NULL, 0x20000, 0,
+				 NVOBJ_FLAG_ZERO_ALLOC, &priv->ramfc);
+	if (ret)
+		return ret;
+
+	priv->created = true;
+	return 0;
+}
+
+static u32
+nv40_instmem_rd32(struct nouveau_object *object, u32 addr)
+{
+	struct nv04_instmem_priv *priv = (void *)object;
+	return ioread32_native(priv->iomem + addr);
+}
+
+static void
+nv40_instmem_wr32(struct nouveau_object *object, u32 addr, u32 data)
+{
+	struct nv04_instmem_priv *priv = (void *)object;
+	iowrite32_native(data, priv->iomem + addr);
+}
+
+struct nouveau_oclass
+nv40_instmem_oclass = {
+	.handle = NV_SUBDEV(INSTMEM, 0x40),
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nv40_instmem_ctor,
+		.dtor = nv04_instmem_dtor,
+		.init = _nouveau_instmem_init,
+		.fini = _nouveau_instmem_fini,
+		.rd32 = nv40_instmem_rd32,
+		.wr32 = nv40_instmem_wr32,
+	},
+};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/instmem/nv50.c b/drivers/gpu/drm/nouveau/core/subdev/instmem/nv50.c
new file mode 100644
index 000000000000..27ef0891d10b
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/instmem/nv50.c
@@ -0,0 +1,172 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <subdev/instmem.h>
+#include <subdev/fb.h>
+
+#include <core/mm.h>
+
+struct nv50_instmem_priv {
+	struct nouveau_instmem base;
+	spinlock_t lock;
+	u64 addr;
+};
+
+struct nv50_instobj_priv {
+	struct nouveau_instobj base;
+	struct nouveau_mem *mem;
+};
+
+static int
+nv50_instobj_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+		  struct nouveau_oclass *oclass, void *data, u32 size,
+		  struct nouveau_object **pobject)
+{
+	struct nouveau_fb *pfb = nouveau_fb(parent);
+	struct nv50_instobj_priv *node;
+	u32 align = (unsigned long)data;
+	int ret;
+
+	size  = max((size  + 4095) & ~4095, (u32)4096);
+	align = max((align + 4095) & ~4095, (u32)4096);
+
+	ret = nouveau_instobj_create(parent, engine, oclass, &node);
+	*pobject = nv_object(node);
+	if (ret)
+		return ret;
+
+	ret = pfb->ram.get(pfb, size, align, 0, 0x800, &node->mem);
+	if (ret)
+		return ret;
+
+	node->base.addr = node->mem->offset;
+	node->base.size = node->mem->size << 12;
+	node->mem->page_shift = 12;
+	return 0;
+}
+
+static void
+nv50_instobj_dtor(struct nouveau_object *object)
+{
+	struct nv50_instobj_priv *node = (void *)object;
+	struct nouveau_fb *pfb = nouveau_fb(object);
+	pfb->ram.put(pfb, &node->mem);
+	nouveau_instobj_destroy(&node->base);
+}
+
+static u32
+nv50_instobj_rd32(struct nouveau_object *object, u32 offset)
+{
+	struct nv50_instmem_priv *priv = (void *)object->engine;
+	struct nv50_instobj_priv *node = (void *)object;
+	unsigned long flags;
+	u64 base = (node->mem->offset + offset) & 0xffffff00000ULL;
+	u64 addr = (node->mem->offset + offset) & 0x000000fffffULL;
+	u32 data;
+
+	spin_lock_irqsave(&priv->lock, flags);
+	if (unlikely(priv->addr != base)) {
+		nv_wr32(priv, 0x001700, base >> 16);
+		priv->addr = base;
+	}
+	data = nv_rd32(priv, 0x700000 + addr);
+	spin_unlock_irqrestore(&priv->lock, flags);
+	return data;
+}
+
+static void
+nv50_instobj_wr32(struct nouveau_object *object, u32 offset, u32 data)
+{
+	struct nv50_instmem_priv *priv = (void *)object->engine;
+	struct nv50_instobj_priv *node = (void *)object;
+	unsigned long flags;
+	u64 base = (node->mem->offset + offset) & 0xffffff00000ULL;
+	u64 addr = (node->mem->offset + offset) & 0x000000fffffULL;
+
+	spin_lock_irqsave(&priv->lock, flags);
+	if (unlikely(priv->addr != base)) {
+		nv_wr32(priv, 0x001700, base >> 16);
+		priv->addr = base;
+	}
+	nv_wr32(priv, 0x700000 + addr, data);
+	spin_unlock_irqrestore(&priv->lock, flags);
+}
+
+static struct nouveau_oclass
+nv50_instobj_oclass = {
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nv50_instobj_ctor,
+		.dtor = nv50_instobj_dtor,
+		.init = _nouveau_instobj_init,
+		.fini = _nouveau_instobj_fini,
+		.rd32 = nv50_instobj_rd32,
+		.wr32 = nv50_instobj_wr32,
+	},
+};
+
+static int
+nv50_instmem_alloc(struct nouveau_instmem *imem, struct nouveau_object *parent,
+		   u32 size, u32 align, struct nouveau_object **pobject)
+{
+	struct nouveau_object *engine = nv_object(imem);
+	return nouveau_object_ctor(parent, engine, &nv50_instobj_oclass,
+				   (void *)(unsigned long)align, size, pobject);
+}
+
+static int
+nv50_instmem_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+		  struct nouveau_oclass *oclass, void *data, u32 size,
+		  struct nouveau_object **pobject)
+{
+	struct nv50_instmem_priv *priv;
+	int ret;
+
+	ret = nouveau_instmem_create(parent, engine, oclass, &priv);
+	*pobject = nv_object(priv);
+	if (ret)
+		return ret;
+
+	spin_lock_init(&priv->lock);
+	priv->base.alloc = nv50_instmem_alloc;
+	return 0;
+}
+
+static int
+nv50_instmem_fini(struct nouveau_object *object, bool suspend)
+{
+	struct nv50_instmem_priv *priv = (void *)object;
+	priv->addr = ~0ULL;
+	return nouveau_instmem_fini(&priv->base, suspend);
+}
+
+struct nouveau_oclass
+nv50_instmem_oclass = {
+	.handle = NV_SUBDEV(INSTMEM, 0x50),
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nv50_instmem_ctor,
+		.dtor = _nouveau_instmem_dtor,
+		.init = _nouveau_instmem_init,
+		.fini = nv50_instmem_fini,
+	},
+};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/ltcg/nvc0.c b/drivers/gpu/drm/nouveau/core/subdev/ltcg/nvc0.c
new file mode 100644
index 000000000000..078a2b9d6bd6
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/ltcg/nvc0.c
@@ -0,0 +1,93 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <subdev/ltcg.h>
+
+struct nvc0_ltcg_priv {
+	struct nouveau_ltcg base;
+	u32 subp_nr;
+};
+
+static void
+nvc0_ltcg_subp_isr(struct nvc0_ltcg_priv *priv, int unit, int subp)
+{
+	u32 subp_base = 0x141000 + (unit * 0x2000) + (subp * 0x400);
+	u32 stat = nv_rd32(priv, subp_base + 0x020);
+
+	if (stat) {
+		nv_info(priv, "LTC%d_LTS%d: 0x%08x\n", unit, subp, stat);
+		nv_wr32(priv, subp_base + 0x020, stat);
+	}
+}
+
+static void
+nvc0_ltcg_intr(struct nouveau_subdev *subdev)
+{
+	struct nvc0_ltcg_priv *priv = (void *)subdev;
+	u32 units;
+
+	units = nv_rd32(priv, 0x00017c);
+	while (units) {
+		u32 subp, unit = ffs(units) - 1;
+		for (subp = 0; subp < priv->subp_nr; subp++)
+			nvc0_ltcg_subp_isr(priv, unit, subp);
+		units &= ~(1 << unit);
+	}
+
+	/* we do something horribly wrong and upset PMFB a lot, so mask off
+	 * interrupts from it after the first one until it's fixed
+	 */
+	nv_mask(priv, 0x000640, 0x02000000, 0x00000000);
+}
+
+static int
+nvc0_ltcg_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+	       struct nouveau_oclass *oclass, void *data, u32 size,
+	       struct nouveau_object **pobject)
+{
+	struct nvc0_ltcg_priv *priv;
+	int ret;
+
+	ret = nouveau_ltcg_create(parent, engine, oclass, &priv);
+	*pobject = nv_object(priv);
+	if (ret)
+		return ret;
+
+	priv->subp_nr = nv_rd32(priv, 0x17e8dc) >> 24;
+	nv_mask(priv, 0x17e820, 0x00100000, 0x00000000); /* INTR_EN &= ~0x10 */
+
+	nv_subdev(priv)->intr = nvc0_ltcg_intr;
+	return 0;
+}
+
+struct nouveau_oclass
+nvc0_ltcg_oclass = {
+	.handle = NV_SUBDEV(LTCG, 0xc0),
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nvc0_ltcg_ctor,
+		.dtor = _nouveau_ltcg_dtor,
+		.init = _nouveau_ltcg_init,
+		.fini = _nouveau_ltcg_fini,
+	},
+};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/mc/base.c b/drivers/gpu/drm/nouveau/core/subdev/mc/base.c
new file mode 100644
index 000000000000..de5721cfc4c2
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/mc/base.c
@@ -0,0 +1,49 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <subdev/mc.h>
+
+void
+nouveau_mc_intr(struct nouveau_subdev *subdev)
+{
+	struct nouveau_mc *pmc = nouveau_mc(subdev);
+	const struct nouveau_mc_intr *map = pmc->intr_map;
+	struct nouveau_subdev *unit;
+	u32 stat;
+
+	stat = nv_rd32(pmc, 0x000100);
+	while (stat && map->stat) {
+		if (stat & map->stat) {
+			unit = nouveau_subdev(subdev, map->unit);
+			if (unit && unit->intr)
+				unit->intr(unit);
+			stat &= ~map->stat;
+		}
+		map++;
+	}
+
+	if (stat) {
+		nv_error(pmc, "unknown intr 0x%08x\n", stat);
+	}
+}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/mc/nv04.c b/drivers/gpu/drm/nouveau/core/subdev/mc/nv04.c
new file mode 100644
index 000000000000..23ebe477a6f0
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/mc/nv04.c
@@ -0,0 +1,83 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <subdev/mc.h>
+
+struct nv04_mc_priv {
+	struct nouveau_mc base;
+};
+
+const struct nouveau_mc_intr
+nv04_mc_intr[] = {
+	{ 0x00000001, NVDEV_ENGINE_MPEG },	/* NV17- MPEG/ME */
+	{ 0x00000100, NVDEV_ENGINE_FIFO },
+	{ 0x00001000, NVDEV_ENGINE_GR },
+	{ 0x00020000, NVDEV_ENGINE_VP },	/* NV40- */
+	{ 0x00100000, NVDEV_SUBDEV_TIMER },
+	{ 0x01000000, NVDEV_ENGINE_DISP },	/* NV04- PCRTC0 */
+	{ 0x02000000, NVDEV_ENGINE_DISP },	/* NV11- PCRTC1 */
+	{ 0x10000000, NVDEV_SUBDEV_GPIO },	/* PBUS */
+	{ 0x80000000, NVDEV_ENGINE_SW },
+	{}
+};
+
+static int
+nv04_mc_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+	     struct nouveau_oclass *oclass, void *data, u32 size,
+	     struct nouveau_object **pobject)
+{
+	struct nv04_mc_priv *priv;
+	int ret;
+
+	ret = nouveau_mc_create(parent, engine, oclass, &priv);
+	*pobject = nv_object(priv);
+	if (ret)
+		return ret;
+
+	nv_subdev(priv)->intr = nouveau_mc_intr;
+	priv->base.intr_map = nv04_mc_intr;
+	return 0;
+}
+
+int
+nv04_mc_init(struct nouveau_object *object)
+{
+	struct nv04_mc_priv *priv = (void *)object;
+
+	nv_wr32(priv, 0x000200, 0xffffffff); /* everything enabled */
+	nv_wr32(priv, 0x001850, 0x00000001); /* disable rom access */
+
+	return nouveau_mc_init(&priv->base);
+}
+
+struct nouveau_oclass
+nv04_mc_oclass = {
+	.handle = NV_SUBDEV(MC, 0x04),
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nv04_mc_ctor,
+		.dtor = _nouveau_mc_dtor,
+		.init = nv04_mc_init,
+		.fini = _nouveau_mc_fini,
+	},
+};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/mc/nv44.c b/drivers/gpu/drm/nouveau/core/subdev/mc/nv44.c
new file mode 100644
index 000000000000..397d868359ad
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/mc/nv44.c
@@ -0,0 +1,74 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <subdev/mc.h>
+
+struct nv44_mc_priv {
+	struct nouveau_mc base;
+};
+
+static int
+nv44_mc_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+	     struct nouveau_oclass *oclass, void *data, u32 size,
+	     struct nouveau_object **pobject)
+{
+	struct nv44_mc_priv *priv;
+	int ret;
+
+	ret = nouveau_mc_create(parent, engine, oclass, &priv);
+	*pobject = nv_object(priv);
+	if (ret)
+		return ret;
+
+	nv_subdev(priv)->intr = nouveau_mc_intr;
+	priv->base.intr_map = nv04_mc_intr;
+	return 0;
+}
+
+static int
+nv44_mc_init(struct nouveau_object *object)
+{
+	struct nv44_mc_priv *priv = (void *)object;
+	u32 tmp = nv_rd32(priv, 0x10020c);
+
+	nv_wr32(priv, 0x000200, 0xffffffff); /* everything enabled */
+
+	nv_wr32(priv, 0x001700, tmp);
+	nv_wr32(priv, 0x001704, 0);
+	nv_wr32(priv, 0x001708, 0);
+	nv_wr32(priv, 0x00170c, tmp);
+
+	return nouveau_mc_init(&priv->base);
+}
+
+struct nouveau_oclass
+nv44_mc_oclass = {
+	.handle = NV_SUBDEV(MC, 0x44),
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nv44_mc_ctor,
+		.dtor = _nouveau_mc_dtor,
+		.init = nv44_mc_init,
+		.fini = _nouveau_mc_fini,
+	},
+};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/mc/nv50.c b/drivers/gpu/drm/nouveau/core/subdev/mc/nv50.c
new file mode 100644
index 000000000000..cedf33b02977
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/mc/nv50.c
@@ -0,0 +1,80 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <subdev/mc.h>
+
+struct nv50_mc_priv {
+	struct nouveau_mc base;
+};
+
+static const struct nouveau_mc_intr
+nv50_mc_intr[] = {
+	{ 0x00000001, NVDEV_ENGINE_MPEG },
+	{ 0x00000100, NVDEV_ENGINE_FIFO },
+	{ 0x00001000, NVDEV_ENGINE_GR },
+	{ 0x00004000, NVDEV_ENGINE_CRYPT },	/* NV84- */
+	{ 0x00008000, NVDEV_ENGINE_BSP },	/* NV84- */
+	{ 0x00100000, NVDEV_SUBDEV_TIMER },
+	{ 0x00200000, NVDEV_SUBDEV_GPIO },
+	{ 0x04000000, NVDEV_ENGINE_DISP },
+	{ 0x80000000, NVDEV_ENGINE_SW },
+	{},
+};
+
+static int
+nv50_mc_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+	     struct nouveau_oclass *oclass, void *data, u32 size,
+	     struct nouveau_object **pobject)
+{
+	struct nv50_mc_priv *priv;
+	int ret;
+
+	ret = nouveau_mc_create(parent, engine, oclass, &priv);
+	*pobject = nv_object(priv);
+	if (ret)
+		return ret;
+
+	nv_subdev(priv)->intr = nouveau_mc_intr;
+	priv->base.intr_map = nv50_mc_intr;
+	return 0;
+}
+
+int
+nv50_mc_init(struct nouveau_object *object)
+{
+	struct nv50_mc_priv *priv = (void *)object;
+	nv_wr32(priv, 0x000200, 0xffffffff); /* everything on */
+	return nouveau_mc_init(&priv->base);
+}
+
+struct nouveau_oclass
+nv50_mc_oclass = {
+	.handle = NV_SUBDEV(MC, 0x50),
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nv50_mc_ctor,
+		.dtor = _nouveau_mc_dtor,
+		.init = nv50_mc_init,
+		.fini = _nouveau_mc_fini,
+	},
+};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/mc/nv98.c b/drivers/gpu/drm/nouveau/core/subdev/mc/nv98.c
new file mode 100644
index 000000000000..a001e4c4d38d
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/mc/nv98.c
@@ -0,0 +1,73 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <subdev/mc.h>
+
+struct nv98_mc_priv {
+	struct nouveau_mc base;
+};
+
+static const struct nouveau_mc_intr
+nv98_mc_intr[] = {
+	{ 0x00000001, NVDEV_ENGINE_PPP },
+	{ 0x00000100, NVDEV_ENGINE_FIFO },
+	{ 0x00001000, NVDEV_ENGINE_GR },
+	{ 0x00004000, NVDEV_ENGINE_CRYPT },	/* NV84:NVA3 */
+	{ 0x00008000, NVDEV_ENGINE_BSP },
+	{ 0x00100000, NVDEV_SUBDEV_TIMER },
+	{ 0x00200000, NVDEV_SUBDEV_GPIO },
+	{ 0x00400000, NVDEV_ENGINE_COPY0 },	/* NVA3-     */
+	{ 0x04000000, NVDEV_ENGINE_DISP },
+	{ 0x80000000, NVDEV_ENGINE_SW },
+	{},
+};
+
+static int
+nv98_mc_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+	     struct nouveau_oclass *oclass, void *data, u32 size,
+	     struct nouveau_object **pobject)
+{
+	struct nv98_mc_priv *priv;
+	int ret;
+
+	ret = nouveau_mc_create(parent, engine, oclass, &priv);
+	*pobject = nv_object(priv);
+	if (ret)
+		return ret;
+
+	nv_subdev(priv)->intr = nouveau_mc_intr;
+	priv->base.intr_map = nv98_mc_intr;
+	return 0;
+}
+
+struct nouveau_oclass
+nv98_mc_oclass = {
+	.handle = NV_SUBDEV(MC, 0x98),
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nv98_mc_ctor,
+		.dtor = _nouveau_mc_dtor,
+		.init = nv50_mc_init,
+		.fini = _nouveau_mc_fini,
+	},
+};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/mc/nvc0.c b/drivers/gpu/drm/nouveau/core/subdev/mc/nvc0.c
new file mode 100644
index 000000000000..c2b81e30a17d
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/mc/nvc0.c
@@ -0,0 +1,75 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <subdev/mc.h>
+
+struct nvc0_mc_priv {
+	struct nouveau_mc base;
+};
+
+static const struct nouveau_mc_intr
+nvc0_mc_intr[] = {
+	{ 0x00000001, NVDEV_ENGINE_PPP },
+	{ 0x00000020, NVDEV_ENGINE_COPY0 },
+	{ 0x00000040, NVDEV_ENGINE_COPY1 },
+	{ 0x00000100, NVDEV_ENGINE_FIFO },
+	{ 0x00001000, NVDEV_ENGINE_GR },
+	{ 0x00008000, NVDEV_ENGINE_BSP },
+	{ 0x00100000, NVDEV_SUBDEV_TIMER },
+	{ 0x00200000, NVDEV_SUBDEV_GPIO },
+	{ 0x02000000, NVDEV_SUBDEV_LTCG },
+	{ 0x04000000, NVDEV_ENGINE_DISP },
+	{ 0x40000000, NVDEV_SUBDEV_IBUS },
+	{ 0x80000000, NVDEV_ENGINE_SW },
+	{},
+};
+
+static int
+nvc0_mc_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+	     struct nouveau_oclass *oclass, void *data, u32 size,
+	     struct nouveau_object **pobject)
+{
+	struct nvc0_mc_priv *priv;
+	int ret;
+
+	ret = nouveau_mc_create(parent, engine, oclass, &priv);
+	*pobject = nv_object(priv);
+	if (ret)
+		return ret;
+
+	nv_subdev(priv)->intr = nouveau_mc_intr;
+	priv->base.intr_map = nvc0_mc_intr;
+	return 0;
+}
+
+struct nouveau_oclass
+nvc0_mc_oclass = {
+	.handle = NV_SUBDEV(MC, 0xc0),
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nvc0_mc_ctor,
+		.dtor = _nouveau_mc_dtor,
+		.init = nv50_mc_init,
+		.fini = _nouveau_mc_fini,
+	},
+};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/mxm/base.c b/drivers/gpu/drm/nouveau/core/subdev/mxm/base.c
new file mode 100644
index 000000000000..93e3ddf7303a
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/mxm/base.c
@@ -0,0 +1,290 @@
+/*
+ * Copyright 2011 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <core/option.h>
+
+#include <subdev/i2c.h>
+#include <subdev/mxm.h>
+#include <subdev/bios.h>
+#include <subdev/bios/mxm.h>
+
+#include "mxms.h"
+
+static bool
+mxm_shadow_rom_fetch(struct nouveau_i2c_port *i2c, u8 addr,
+		     u8 offset, u8 size, u8 *data)
+{
+	struct i2c_msg msgs[] = {
+		{ .addr = addr, .flags = 0, .len = 1, .buf = &offset },
+		{ .addr = addr, .flags = I2C_M_RD, .len = size, .buf = data, },
+	};
+
+	return i2c_transfer(&i2c->adapter, msgs, 2) == 2;
+}
+
+static bool
+mxm_shadow_rom(struct nouveau_mxm *mxm, u8 version)
+{
+	struct nouveau_bios *bios = nouveau_bios(mxm);
+	struct nouveau_i2c *i2c = nouveau_i2c(mxm);
+	struct nouveau_i2c_port *port = NULL;
+	u8 i2cidx, mxms[6], addr, size;
+
+	i2cidx = mxm_ddc_map(bios, 1 /* LVDS_DDC */) & 0x0f;
+	if (i2cidx < 0x0f)
+		port = i2c->find(i2c, i2cidx);
+	if (!port)
+		return false;
+
+	addr = 0x54;
+	if (!mxm_shadow_rom_fetch(port, addr, 0, 6, mxms)) {
+		addr = 0x56;
+		if (!mxm_shadow_rom_fetch(port, addr, 0, 6, mxms))
+			return false;
+	}
+
+	mxm->mxms = mxms;
+	size = mxms_headerlen(mxm) + mxms_structlen(mxm);
+	mxm->mxms = kmalloc(size, GFP_KERNEL);
+
+	if (mxm->mxms &&
+	    mxm_shadow_rom_fetch(port, addr, 0, size, mxm->mxms))
+		return true;
+
+	kfree(mxm->mxms);
+	mxm->mxms = NULL;
+	return false;
+}
+
+#if defined(CONFIG_ACPI)
+static bool
+mxm_shadow_dsm(struct nouveau_mxm *mxm, u8 version)
+{
+	struct nouveau_device *device = nv_device(mxm);
+	static char muid[] = {
+		0x00, 0xA4, 0x04, 0x40, 0x7D, 0x91, 0xF2, 0x4C,
+		0xB8, 0x9C, 0x79, 0xB6, 0x2F, 0xD5, 0x56, 0x65
+	};
+	u32 mxms_args[] = { 0x00000000 };
+	union acpi_object args[4] = {
+		/* _DSM MUID */
+		{ .buffer.type = 3,
+		  .buffer.length = sizeof(muid),
+		  .buffer.pointer = muid,
+		},
+		/* spec says this can be zero to mean "highest revision", but
+		 * of course there's at least one bios out there which fails
+		 * unless you pass in exactly the version it supports..
+		 */
+		{ .integer.type = ACPI_TYPE_INTEGER,
+		  .integer.value = (version & 0xf0) << 4 | (version & 0x0f),
+		},
+		/* MXMS function */
+		{ .integer.type = ACPI_TYPE_INTEGER,
+		  .integer.value = 0x00000010,
+		},
+		/* Pointer to MXMS arguments */
+		{ .buffer.type = ACPI_TYPE_BUFFER,
+		  .buffer.length = sizeof(mxms_args),
+		  .buffer.pointer = (char *)mxms_args,
+		},
+	};
+	struct acpi_object_list list = { ARRAY_SIZE(args), args };
+	struct acpi_buffer retn = { ACPI_ALLOCATE_BUFFER, NULL };
+	union acpi_object *obj;
+	acpi_handle handle;
+	int ret;
+
+	handle = DEVICE_ACPI_HANDLE(&device->pdev->dev);
+	if (!handle)
+		return false;
+
+	ret = acpi_evaluate_object(handle, "_DSM", &list, &retn);
+	if (ret) {
+		nv_debug(mxm, "DSM MXMS failed: %d\n", ret);
+		return false;
+	}
+
+	obj = retn.pointer;
+	if (obj->type == ACPI_TYPE_BUFFER) {
+		mxm->mxms = kmemdup(obj->buffer.pointer,
+					 obj->buffer.length, GFP_KERNEL);
+	} else
+	if (obj->type == ACPI_TYPE_INTEGER) {
+		nv_debug(mxm, "DSM MXMS returned 0x%llx\n", obj->integer.value);
+	}
+
+	kfree(obj);
+	return mxm->mxms != NULL;
+}
+#endif
+
+#if defined(CONFIG_ACPI_WMI) || defined(CONFIG_ACPI_WMI_MODULE)
+
+#define WMI_WMMX_GUID "F6CB5C3C-9CAE-4EBD-B577-931EA32A2CC0"
+
+static u8
+wmi_wmmx_mxmi(struct nouveau_mxm *mxm, u8 version)
+{
+	u32 mxmi_args[] = { 0x494D584D /* MXMI */, version, 0 };
+	struct acpi_buffer args = { sizeof(mxmi_args), mxmi_args };
+	struct acpi_buffer retn = { ACPI_ALLOCATE_BUFFER, NULL };
+	union acpi_object *obj;
+	acpi_status status;
+
+	status = wmi_evaluate_method(WMI_WMMX_GUID, 0, 0, &args, &retn);
+	if (ACPI_FAILURE(status)) {
+		nv_debug(mxm, "WMMX MXMI returned %d\n", status);
+		return 0x00;
+	}
+
+	obj = retn.pointer;
+	if (obj->type == ACPI_TYPE_INTEGER) {
+		version = obj->integer.value;
+		nv_debug(mxm, "WMMX MXMI version %d.%d\n",
+			     (version >> 4), version & 0x0f);
+	} else {
+		version = 0;
+		nv_debug(mxm, "WMMX MXMI returned non-integer\n");
+	}
+
+	kfree(obj);
+	return version;
+}
+
+static bool
+mxm_shadow_wmi(struct nouveau_mxm *mxm, u8 version)
+{
+	u32 mxms_args[] = { 0x534D584D /* MXMS */, version, 0 };
+	struct acpi_buffer args = { sizeof(mxms_args), mxms_args };
+	struct acpi_buffer retn = { ACPI_ALLOCATE_BUFFER, NULL };
+	union acpi_object *obj;
+	acpi_status status;
+
+	if (!wmi_has_guid(WMI_WMMX_GUID)) {
+		nv_debug(mxm, "WMMX GUID not found\n");
+		return false;
+	}
+
+	mxms_args[1] = wmi_wmmx_mxmi(mxm, 0x00);
+	if (!mxms_args[1])
+		mxms_args[1] = wmi_wmmx_mxmi(mxm, version);
+	if (!mxms_args[1])
+		return false;
+
+	status = wmi_evaluate_method(WMI_WMMX_GUID, 0, 0, &args, &retn);
+	if (ACPI_FAILURE(status)) {
+		nv_debug(mxm, "WMMX MXMS returned %d\n", status);
+		return false;
+	}
+
+	obj = retn.pointer;
+	if (obj->type == ACPI_TYPE_BUFFER) {
+		mxm->mxms = kmemdup(obj->buffer.pointer,
+					 obj->buffer.length, GFP_KERNEL);
+	}
+
+	kfree(obj);
+	return mxm->mxms != NULL;
+}
+#endif
+
+static struct mxm_shadow_h {
+	const char *name;
+	bool (*exec)(struct nouveau_mxm *, u8 version);
+} _mxm_shadow[] = {
+	{ "ROM", mxm_shadow_rom },
+#if defined(CONFIG_ACPI)
+	{ "DSM", mxm_shadow_dsm },
+#endif
+#if defined(CONFIG_ACPI_WMI) || defined(CONFIG_ACPI_WMI_MODULE)
+	{ "WMI", mxm_shadow_wmi },
+#endif
+	{}
+};
+
+static int
+mxm_shadow(struct nouveau_mxm *mxm, u8 version)
+{
+	struct mxm_shadow_h *shadow = _mxm_shadow;
+	do {
+		nv_debug(mxm, "checking %s\n", shadow->name);
+		if (shadow->exec(mxm, version)) {
+			if (mxms_valid(mxm))
+				return 0;
+			kfree(mxm->mxms);
+			mxm->mxms = NULL;
+		}
+	} while ((++shadow)->name);
+	return -ENOENT;
+}
+
+int
+nouveau_mxm_create_(struct nouveau_object *parent,
+		    struct nouveau_object *engine,
+		    struct nouveau_oclass *oclass, int length, void **pobject)
+{
+	struct nouveau_device *device = nv_device(parent);
+	struct nouveau_bios *bios = nouveau_bios(device);
+	struct nouveau_mxm *mxm;
+	u8  ver, len;
+	u16 data;
+	int ret;
+
+	ret = nouveau_subdev_create_(parent, engine, oclass, 0, "MXM", "mxm",
+				     length, pobject);
+	mxm = *pobject;
+	if (ret)
+		return ret;
+
+	data = mxm_table(bios, &ver, &len);
+	if (!data || !(ver = nv_ro08(bios, data))) {
+		nv_info(mxm, "no VBIOS data, nothing to do\n");
+		return 0;
+	}
+
+	nv_info(mxm, "BIOS version %d.%d\n", ver >> 4, ver & 0x0f);
+
+	if (mxm_shadow(mxm, ver)) {
+		nv_info(mxm, "failed to locate valid SIS\n");
+#if 0
+		/* we should, perhaps, fall back to some kind of limited
+		 * mode here if the x86 vbios hasn't already done the
+		 * work for us (so we prevent loading with completely
+		 * whacked vbios tables).
+		 */
+		return -EINVAL;
+#else
+		return 0;
+#endif
+	}
+
+	nv_info(mxm, "MXMS Version %d.%d\n",
+		mxms_version(mxm) >> 8, mxms_version(mxm) & 0xff);
+	mxms_foreach(mxm, 0, NULL, NULL);
+
+	if (nouveau_boolopt(device->cfgopt, "NvMXMDCB", true))
+		mxm->action |= MXM_SANITISE_DCB;
+	return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/mxm/mxms.c b/drivers/gpu/drm/nouveau/core/subdev/mxm/mxms.c
new file mode 100644
index 000000000000..839ca1edc132
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/mxm/mxms.c
@@ -0,0 +1,193 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <subdev/mxm.h>
+#include "mxms.h"
+
+#define ROM16(x) le16_to_cpu(*(u16 *)&(x))
+#define ROM32(x) le32_to_cpu(*(u32 *)&(x))
+
+static u8 *
+mxms_data(struct nouveau_mxm *mxm)
+{
+	return mxm->mxms;
+
+}
+
+u16
+mxms_version(struct nouveau_mxm *mxm)
+{
+	u8 *mxms = mxms_data(mxm);
+	u16 version = (mxms[4] << 8) | mxms[5];
+	switch (version ) {
+	case 0x0200:
+	case 0x0201:
+	case 0x0300:
+		return version;
+	default:
+		break;
+	}
+
+	nv_debug(mxm, "unknown version %d.%d\n", mxms[4], mxms[5]);
+	return 0x0000;
+}
+
+u16
+mxms_headerlen(struct nouveau_mxm *mxm)
+{
+	return 8;
+}
+
+u16
+mxms_structlen(struct nouveau_mxm *mxm)
+{
+	return *(u16 *)&mxms_data(mxm)[6];
+}
+
+bool
+mxms_checksum(struct nouveau_mxm *mxm)
+{
+	u16 size = mxms_headerlen(mxm) + mxms_structlen(mxm);
+	u8 *mxms = mxms_data(mxm), sum = 0;
+	while (size--)
+		sum += *mxms++;
+	if (sum) {
+		nv_debug(mxm, "checksum invalid\n");
+		return false;
+	}
+	return true;
+}
+
+bool
+mxms_valid(struct nouveau_mxm *mxm)
+{
+	u8 *mxms = mxms_data(mxm);
+	if (*(u32 *)mxms != 0x5f4d584d) {
+		nv_debug(mxm, "signature invalid\n");
+		return false;
+	}
+
+	if (!mxms_version(mxm) || !mxms_checksum(mxm))
+		return false;
+
+	return true;
+}
+
+bool
+mxms_foreach(struct nouveau_mxm *mxm, u8 types,
+	     bool (*exec)(struct nouveau_mxm *, u8 *, void *), void *info)
+{
+	u8 *mxms = mxms_data(mxm);
+	u8 *desc = mxms + mxms_headerlen(mxm);
+	u8 *fini = desc + mxms_structlen(mxm) - 1;
+	while (desc < fini) {
+		u8 type = desc[0] & 0x0f;
+		u8 headerlen = 0;
+		u8 recordlen = 0;
+		u8 entries = 0;
+
+		switch (type) {
+		case 0: /* Output Device Structure */
+			if (mxms_version(mxm) >= 0x0300)
+				headerlen = 8;
+			else
+				headerlen = 6;
+			break;
+		case 1: /* System Cooling Capability Structure */
+		case 2: /* Thermal Structure */
+		case 3: /* Input Power Structure */
+			headerlen = 4;
+			break;
+		case 4: /* GPIO Device Structure */
+			headerlen = 4;
+			recordlen = 2;
+			entries   = (ROM32(desc[0]) & 0x01f00000) >> 20;
+			break;
+		case 5: /* Vendor Specific Structure */
+			headerlen = 8;
+			break;
+		case 6: /* Backlight Control Structure */
+			if (mxms_version(mxm) >= 0x0300) {
+				headerlen = 4;
+				recordlen = 8;
+				entries   = (desc[1] & 0xf0) >> 4;
+			} else {
+				headerlen = 8;
+			}
+			break;
+		case 7: /* Fan Control Structure */
+			headerlen = 8;
+			recordlen = 4;
+			entries   = desc[1] & 0x07;
+			break;
+		default:
+			nv_debug(mxm, "unknown descriptor type %d\n", type);
+			return false;
+		}
+
+		if (nv_subdev(mxm)->debug >= NV_DBG_DEBUG && (exec == NULL)) {
+			static const char * mxms_desc_name[] = {
+				"ODS", "SCCS", "TS", "IPS",
+				"GSD", "VSS", "BCS", "FCS",
+			};
+			u8 *dump = desc;
+			int i, j;
+
+			nv_debug(mxm, "%4s: ", mxms_desc_name[type]);
+			for (j = headerlen - 1; j >= 0; j--)
+				printk("%02x", dump[j]);
+			printk("\n");
+			dump += headerlen;
+
+			for (i = 0; i < entries; i++, dump += recordlen) {
+				nv_debug(mxm, "      ");
+				for (j = recordlen - 1; j >= 0; j--)
+					printk("%02x", dump[j]);
+				printk("\n");
+			}
+		}
+
+		if (types & (1 << type)) {
+			if (!exec(mxm, desc, info))
+				return false;
+		}
+
+		desc += headerlen + (entries * recordlen);
+	}
+
+	return true;
+}
+
+void
+mxms_output_device(struct nouveau_mxm *mxm, u8 *pdata, struct mxms_odev *desc)
+{
+	u64 data = ROM32(pdata[0]);
+	if (mxms_version(mxm) >= 0x0300)
+		data |= (u64)ROM16(pdata[4]) << 32;
+
+	desc->outp_type = (data & 0x00000000000000f0ULL) >> 4;
+	desc->ddc_port  = (data & 0x0000000000000f00ULL) >> 8;
+	desc->conn_type = (data & 0x000000000001f000ULL) >> 12;
+	desc->dig_conn  = (data & 0x0000000000780000ULL) >> 19;
+}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/mxm/mxms.h b/drivers/gpu/drm/nouveau/core/subdev/mxm/mxms.h
new file mode 100644
index 000000000000..5e0be0c591ca
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/mxm/mxms.h
@@ -0,0 +1,22 @@
+#ifndef __NVMXM_MXMS_H__
+#define __NVMXM_MXMS_H__
+
+struct mxms_odev {
+	u8 outp_type;
+	u8 conn_type;
+	u8 ddc_port;
+	u8 dig_conn;
+};
+
+void mxms_output_device(struct nouveau_mxm *, u8 *, struct mxms_odev *);
+
+u16  mxms_version(struct nouveau_mxm *);
+u16  mxms_headerlen(struct nouveau_mxm *);
+u16  mxms_structlen(struct nouveau_mxm *);
+bool mxms_checksum(struct nouveau_mxm *);
+bool mxms_valid(struct nouveau_mxm *);
+
+bool mxms_foreach(struct nouveau_mxm *, u8,
+		  bool (*)(struct nouveau_mxm *, u8 *, void *), void *);
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/core/subdev/mxm/nv50.c b/drivers/gpu/drm/nouveau/core/subdev/mxm/nv50.c
new file mode 100644
index 000000000000..af129c2e8113
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/mxm/nv50.c
@@ -0,0 +1,233 @@
+/*
+ * Copyright 2011 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <subdev/mxm.h>
+#include <subdev/bios.h>
+#include <subdev/bios/conn.h>
+#include <subdev/bios/dcb.h>
+#include <subdev/bios/mxm.h>
+
+#include "mxms.h"
+
+struct nv50_mxm_priv {
+	struct nouveau_mxm base;
+};
+
+struct context {
+	u32 *outp;
+	struct mxms_odev desc;
+};
+
+static bool
+mxm_match_tmds_partner(struct nouveau_mxm *mxm, u8 *data, void *info)
+{
+	struct context *ctx = info;
+	struct mxms_odev desc;
+
+	mxms_output_device(mxm, data, &desc);
+	if (desc.outp_type == 2 &&
+	    desc.dig_conn == ctx->desc.dig_conn)
+		return false;
+	return true;
+}
+
+static bool
+mxm_match_dcb(struct nouveau_mxm *mxm, u8 *data, void *info)
+{
+	struct nouveau_bios *bios = nouveau_bios(mxm);
+	struct context *ctx = info;
+	u64 desc = *(u64 *)data;
+
+	mxms_output_device(mxm, data, &ctx->desc);
+
+	/* match dcb encoder type to mxm-ods device type */
+	if ((ctx->outp[0] & 0x0000000f) != ctx->desc.outp_type)
+		return true;
+
+	/* digital output, have some extra stuff to match here, there's a
+	 * table in the vbios that provides a mapping from the mxm digital
+	 * connection enum values to SOR/link
+	 */
+	if ((desc & 0x00000000000000f0) >= 0x20) {
+		/* check against sor index */
+		u8 link = mxm_sor_map(bios, ctx->desc.dig_conn);
+		if ((ctx->outp[0] & 0x0f000000) != (link & 0x0f) << 24)
+			return true;
+
+		/* check dcb entry has a compatible link field */
+		link = (link & 0x30) >> 4;
+		if ((link & ((ctx->outp[1] & 0x00000030) >> 4)) != link)
+			return true;
+	}
+
+	/* mark this descriptor accounted for by setting invalid device type,
+	 * except of course some manufactures don't follow specs properly and
+	 * we need to avoid killing off the TMDS function on DP connectors
+	 * if MXM-SIS is missing an entry for it.
+	 */
+	data[0] &= ~0xf0;
+	if (ctx->desc.outp_type == 6 && ctx->desc.conn_type == 6 &&
+	    mxms_foreach(mxm, 0x01, mxm_match_tmds_partner, ctx)) {
+		data[0] |= 0x20; /* modify descriptor to match TMDS now */
+	} else {
+		data[0] |= 0xf0;
+	}
+
+	return false;
+}
+
+static int
+mxm_dcb_sanitise_entry(struct nouveau_bios *bios, void *data, int idx, u16 pdcb)
+{
+	struct nouveau_mxm *mxm = nouveau_mxm(bios);
+	struct context ctx = { .outp = (u32 *)(bios->data + pdcb) };
+	u8 type, i2cidx, link, ver, len;
+	u8 *conn;
+
+	/* look for an output device structure that matches this dcb entry.
+	 * if one isn't found, disable it.
+	 */
+	if (mxms_foreach(mxm, 0x01, mxm_match_dcb, &ctx)) {
+		nv_debug(mxm, "disable %d: 0x%08x 0x%08x\n",
+			idx, ctx.outp[0], ctx.outp[1]);
+		ctx.outp[0] |= 0x0000000f;
+		return 0;
+	}
+
+	/* modify the output's ddc/aux port, there's a pointer to a table
+	 * with the mapping from mxm ddc/aux port to dcb i2c_index in the
+	 * vbios mxm table
+	 */
+	i2cidx = mxm_ddc_map(bios, ctx.desc.ddc_port);
+	if ((ctx.outp[0] & 0x0000000f) != DCB_OUTPUT_DP)
+		i2cidx = (i2cidx & 0x0f) << 4;
+	else
+		i2cidx = (i2cidx & 0xf0);
+
+	if (i2cidx != 0xf0) {
+		ctx.outp[0] &= ~0x000000f0;
+		ctx.outp[0] |= i2cidx;
+	}
+
+	/* override dcb sorconf.link, based on what mxm data says */
+	switch (ctx.desc.outp_type) {
+	case 0x00: /* Analog CRT */
+	case 0x01: /* Analog TV/HDTV */
+		break;
+	default:
+		link = mxm_sor_map(bios, ctx.desc.dig_conn) & 0x30;
+		ctx.outp[1] &= ~0x00000030;
+		ctx.outp[1] |= link;
+		break;
+	}
+
+	/* we may need to fixup various other vbios tables based on what
+	 * the descriptor says the connector type should be.
+	 *
+	 * in a lot of cases, the vbios tables will claim DVI-I is possible,
+	 * and the mxm data says the connector is really HDMI.  another
+	 * common example is DP->eDP.
+	 */
+	conn  = bios->data;
+	conn += dcb_conn(bios, (ctx.outp[0] & 0x0000f000) >> 12, &ver, &len);
+	type  = conn[0];
+	switch (ctx.desc.conn_type) {
+	case 0x01: /* LVDS */
+		ctx.outp[1] |= 0x00000004; /* use_power_scripts */
+		/* XXX: modify default link width in LVDS table */
+		break;
+	case 0x02: /* HDMI */
+		type = DCB_CONNECTOR_HDMI_1;
+		break;
+	case 0x03: /* DVI-D */
+		type = DCB_CONNECTOR_DVI_D;
+		break;
+	case 0x0e: /* eDP, falls through to DPint */
+		ctx.outp[1] |= 0x00010000;
+	case 0x07: /* DP internal, wtf is this?? HP8670w */
+		ctx.outp[1] |= 0x00000004; /* use_power_scripts? */
+		type = DCB_CONNECTOR_eDP;
+		break;
+	default:
+		break;
+	}
+
+	if (mxms_version(mxm) >= 0x0300)
+		conn[0] = type;
+
+	return 0;
+}
+
+static bool
+mxm_show_unmatched(struct nouveau_mxm *mxm, u8 *data, void *info)
+{
+	u64 desc = *(u64 *)data;
+	if ((desc & 0xf0) != 0xf0)
+	nv_info(mxm, "unmatched output device 0x%016llx\n", desc);
+	return true;
+}
+
+static void
+mxm_dcb_sanitise(struct nouveau_mxm *mxm)
+{
+	struct nouveau_bios *bios = nouveau_bios(mxm);
+	u8  ver, hdr, cnt, len;
+	u16 dcb = dcb_table(bios, &ver, &hdr, &cnt, &len);
+	if (dcb == 0x0000 || ver != 0x40) {
+		nv_debug(mxm, "unsupported DCB version\n");
+		return;
+	}
+
+	dcb_outp_foreach(bios, NULL, mxm_dcb_sanitise_entry);
+	mxms_foreach(mxm, 0x01, mxm_show_unmatched, NULL);
+}
+
+static int
+nv50_mxm_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+	      struct nouveau_oclass *oclass, void *data, u32 size,
+	      struct nouveau_object **pobject)
+{
+	struct nv50_mxm_priv *priv;
+	int ret;
+
+	ret = nouveau_mxm_create(parent, engine, oclass, &priv);
+	*pobject = nv_object(priv);
+	if (ret)
+		return ret;
+
+	if (priv->base.action & MXM_SANITISE_DCB)
+		mxm_dcb_sanitise(&priv->base);
+	return 0;
+}
+
+struct nouveau_oclass
+nv50_mxm_oclass = {
+	.handle = NV_SUBDEV(MXM, 0x50),
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nv50_mxm_ctor,
+		.dtor = _nouveau_mxm_dtor,
+		.init = _nouveau_mxm_init,
+		.fini = _nouveau_mxm_fini,
+	},
+};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/therm/base.c b/drivers/gpu/drm/nouveau/core/subdev/therm/base.c
new file mode 100644
index 000000000000..1674c74a76c8
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/therm/base.c
@@ -0,0 +1,144 @@
+/*
+ * Copyright 2012 The Nouveau community
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Martin Peres
+ */
+
+#include <core/object.h>
+#include <core/device.h>
+
+#include <subdev/bios.h>
+
+#include "priv.h"
+
+int
+nouveau_therm_attr_get(struct nouveau_therm *therm,
+		       enum nouveau_therm_attr_type type)
+{
+	struct nouveau_therm_priv *priv = (void *)therm;
+
+	switch (type) {
+	case NOUVEAU_THERM_ATTR_FAN_MIN_DUTY:
+		return priv->bios_fan.min_duty;
+	case NOUVEAU_THERM_ATTR_FAN_MAX_DUTY:
+		return priv->bios_fan.max_duty;
+	case NOUVEAU_THERM_ATTR_FAN_MODE:
+		return priv->fan.mode;
+	case NOUVEAU_THERM_ATTR_THRS_FAN_BOOST:
+		return priv->bios_sensor.thrs_fan_boost.temp;
+	case NOUVEAU_THERM_ATTR_THRS_FAN_BOOST_HYST:
+		return priv->bios_sensor.thrs_fan_boost.hysteresis;
+	case NOUVEAU_THERM_ATTR_THRS_DOWN_CLK:
+		return priv->bios_sensor.thrs_down_clock.temp;
+	case NOUVEAU_THERM_ATTR_THRS_DOWN_CLK_HYST:
+		return priv->bios_sensor.thrs_down_clock.hysteresis;
+	case NOUVEAU_THERM_ATTR_THRS_CRITICAL:
+		return priv->bios_sensor.thrs_critical.temp;
+	case NOUVEAU_THERM_ATTR_THRS_CRITICAL_HYST:
+		return priv->bios_sensor.thrs_critical.hysteresis;
+	case NOUVEAU_THERM_ATTR_THRS_SHUTDOWN:
+		return priv->bios_sensor.thrs_shutdown.temp;
+	case NOUVEAU_THERM_ATTR_THRS_SHUTDOWN_HYST:
+		return priv->bios_sensor.thrs_shutdown.hysteresis;
+	}
+
+	return -EINVAL;
+}
+
+int
+nouveau_therm_attr_set(struct nouveau_therm *therm,
+		       enum nouveau_therm_attr_type type, int value)
+{
+	struct nouveau_therm_priv *priv = (void *)therm;
+
+	switch (type) {
+	case NOUVEAU_THERM_ATTR_FAN_MIN_DUTY:
+		if (value < 0)
+			value = 0;
+		if (value > priv->bios_fan.max_duty)
+			value = priv->bios_fan.max_duty;
+		priv->bios_fan.min_duty = value;
+		return 0;
+	case NOUVEAU_THERM_ATTR_FAN_MAX_DUTY:
+		if (value < 0)
+			value = 0;
+		if (value < priv->bios_fan.min_duty)
+			value = priv->bios_fan.min_duty;
+		priv->bios_fan.max_duty = value;
+		return 0;
+	case NOUVEAU_THERM_ATTR_FAN_MODE:
+		return nouveau_therm_fan_set_mode(therm, value);
+	case NOUVEAU_THERM_ATTR_THRS_FAN_BOOST:
+		priv->bios_sensor.thrs_fan_boost.temp = value;
+		return 0;
+	case NOUVEAU_THERM_ATTR_THRS_FAN_BOOST_HYST:
+		priv->bios_sensor.thrs_fan_boost.hysteresis = value;
+		return 0;
+	case NOUVEAU_THERM_ATTR_THRS_DOWN_CLK:
+		priv->bios_sensor.thrs_down_clock.temp = value;
+		return 0;
+	case NOUVEAU_THERM_ATTR_THRS_DOWN_CLK_HYST:
+		priv->bios_sensor.thrs_down_clock.hysteresis = value;
+		return 0;
+	case NOUVEAU_THERM_ATTR_THRS_CRITICAL:
+		priv->bios_sensor.thrs_critical.temp = value;
+		return 0;
+	case NOUVEAU_THERM_ATTR_THRS_CRITICAL_HYST:
+		priv->bios_sensor.thrs_critical.hysteresis = value;
+		return 0;
+	case NOUVEAU_THERM_ATTR_THRS_SHUTDOWN:
+		priv->bios_sensor.thrs_shutdown.temp = value;
+		return 0;
+	case NOUVEAU_THERM_ATTR_THRS_SHUTDOWN_HYST:
+		priv->bios_sensor.thrs_shutdown.hysteresis = value;
+		return 0;
+	}
+
+	return -EINVAL;
+}
+
+int
+nouveau_therm_init(struct nouveau_object *object)
+{
+	struct nouveau_therm *therm = (void *)object;
+	struct nouveau_therm_priv *priv = (void *)therm;
+	int ret;
+
+	ret = nouveau_subdev_init(&therm->base);
+	if (ret)
+		return ret;
+
+	if (priv->fan.percent >= 0)
+		therm->fan_set(therm, priv->fan.percent);
+
+	return 0;
+}
+
+int
+nouveau_therm_fini(struct nouveau_object *object, bool suspend)
+{
+	struct nouveau_therm *therm = (void *)object;
+	struct nouveau_therm_priv *priv = (void *)therm;
+
+	priv->fan.percent = therm->fan_get(therm);
+
+	return nouveau_subdev_fini(&therm->base, suspend);
+}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/therm/fan.c b/drivers/gpu/drm/nouveau/core/subdev/therm/fan.c
new file mode 100644
index 000000000000..b29237970fa0
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/therm/fan.c
@@ -0,0 +1,234 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ * 	    Martin Peres
+ */
+
+#include "priv.h"
+
+#include <core/object.h>
+#include <core/device.h>
+#include <subdev/gpio.h>
+#include <subdev/timer.h>
+
+int
+nouveau_therm_fan_get(struct nouveau_therm *therm)
+{
+	struct nouveau_therm_priv *priv = (void *)therm;
+	struct nouveau_gpio *gpio = nouveau_gpio(therm);
+	struct dcb_gpio_func func;
+	int card_type = nv_device(therm)->card_type;
+	u32 divs, duty;
+	int ret;
+
+	if (!priv->fan.pwm_get)
+		return -ENODEV;
+
+	ret = gpio->find(gpio, 0, DCB_GPIO_PWM_FAN, 0xff, &func);
+	if (ret == 0) {
+		ret = priv->fan.pwm_get(therm, func.line, &divs, &duty);
+		if (ret == 0 && divs) {
+			divs = max(divs, duty);
+			if (card_type <= NV_40 || (func.log[0] & 1))
+				duty = divs - duty;
+			return (duty * 100) / divs;
+		}
+
+		return gpio->get(gpio, 0, func.func, func.line) * 100;
+	}
+
+	return -ENODEV;
+}
+
+int
+nouveau_therm_fan_set(struct nouveau_therm *therm, int percent)
+{
+	struct nouveau_therm_priv *priv = (void *)therm;
+	struct nouveau_gpio *gpio = nouveau_gpio(therm);
+	struct dcb_gpio_func func;
+	int card_type = nv_device(therm)->card_type;
+	u32 divs, duty;
+	int ret;
+
+	if (priv->fan.mode == FAN_CONTROL_NONE)
+		return -EINVAL;
+
+	if (!priv->fan.pwm_set)
+		return -ENODEV;
+
+	if (percent < priv->bios_fan.min_duty)
+		percent = priv->bios_fan.min_duty;
+	if (percent > priv->bios_fan.max_duty)
+		percent = priv->bios_fan.max_duty;
+
+	ret = gpio->find(gpio, 0, DCB_GPIO_PWM_FAN, 0xff, &func);
+	if (ret == 0) {
+		divs = priv->bios_perf_fan.pwm_divisor;
+		if (priv->bios_fan.pwm_freq) {
+			divs = 1;
+			if (priv->fan.pwm_clock)
+				divs = priv->fan.pwm_clock(therm);
+			divs /= priv->bios_fan.pwm_freq;
+		}
+
+		duty = ((divs * percent) + 99) / 100;
+		if (card_type <= NV_40 || (func.log[0] & 1))
+			duty = divs - duty;
+
+		ret = priv->fan.pwm_set(therm, func.line, divs, duty);
+		return ret;
+	}
+
+	return -ENODEV;
+}
+
+int
+nouveau_therm_fan_sense(struct nouveau_therm *therm)
+{
+	struct nouveau_timer *ptimer = nouveau_timer(therm);
+	struct nouveau_gpio *gpio = nouveau_gpio(therm);
+	struct dcb_gpio_func func;
+	u32 cycles, cur, prev;
+	u64 start, end, tach;
+
+	if (gpio->find(gpio, 0, DCB_GPIO_FAN_SENSE, 0xff, &func))
+		return -ENODEV;
+
+	/* Time a complete rotation and extrapolate to RPM:
+	 * When the fan spins, it changes the value of GPIO FAN_SENSE.
+	 * We get 4 changes (0 -> 1 -> 0 -> 1) per complete rotation.
+	 */
+	start = ptimer->read(ptimer);
+	prev = gpio->get(gpio, 0, func.func, func.line);
+	cycles = 0;
+	do {
+		usleep_range(500, 1000); /* supports 0 < rpm < 7500 */
+
+		cur = gpio->get(gpio, 0, func.func, func.line);
+		if (prev != cur) {
+			if (!start)
+				start = ptimer->read(ptimer);
+			cycles++;
+			prev = cur;
+		}
+	} while (cycles < 5 && ptimer->read(ptimer) - start < 250000000);
+	end = ptimer->read(ptimer);
+
+	if (cycles == 5) {
+		tach = (u64)60000000000;
+		do_div(tach, (end - start));
+		return tach;
+	} else
+		return 0;
+}
+
+int
+nouveau_therm_fan_set_mode(struct nouveau_therm *therm,
+			   enum nouveau_therm_fan_mode mode)
+{
+	struct nouveau_therm_priv *priv = (void *)therm;
+
+	if (priv->fan.mode == mode)
+		return 0;
+
+	if (mode < FAN_CONTROL_NONE || mode >= FAN_CONTROL_NR)
+		return -EINVAL;
+
+	switch (mode)
+	{
+	case FAN_CONTROL_NONE:
+		nv_info(therm, "switch fan to no-control mode\n");
+		break;
+	case FAN_CONTROL_MANUAL:
+		nv_info(therm, "switch fan to manual mode\n");
+		break;
+	case FAN_CONTROL_NR:
+		break;
+	}
+
+	priv->fan.mode = mode;
+	return 0;
+}
+
+int
+nouveau_therm_fan_user_get(struct nouveau_therm *therm)
+{
+	return nouveau_therm_fan_get(therm);
+}
+
+int
+nouveau_therm_fan_user_set(struct nouveau_therm *therm, int percent)
+{
+	struct nouveau_therm_priv *priv = (void *)therm;
+
+	if (priv->fan.mode != FAN_CONTROL_MANUAL)
+		return -EINVAL;
+
+	return nouveau_therm_fan_set(therm, percent);
+}
+
+void
+nouveau_therm_fan_set_defaults(struct nouveau_therm *therm)
+{
+	struct nouveau_therm_priv *priv = (void *)therm;
+
+	priv->bios_fan.pwm_freq = 0;
+	priv->bios_fan.min_duty = 0;
+	priv->bios_fan.max_duty = 100;
+}
+
+
+static void
+nouveau_therm_fan_safety_checks(struct nouveau_therm *therm)
+{
+	struct nouveau_therm_priv *priv = (void *)therm;
+
+	if (priv->bios_fan.min_duty > 100)
+		priv->bios_fan.min_duty = 100;
+	if (priv->bios_fan.max_duty > 100)
+		priv->bios_fan.max_duty = 100;
+
+	if (priv->bios_fan.min_duty > priv->bios_fan.max_duty)
+		priv->bios_fan.min_duty = priv->bios_fan.max_duty;
+}
+
+int nouveau_fan_pwm_clock_dummy(struct nouveau_therm *therm)
+{
+	return 1;
+}
+
+int
+nouveau_therm_fan_ctor(struct nouveau_therm *therm)
+{
+	struct nouveau_therm_priv *priv = (void *)therm;
+	struct nouveau_bios *bios = nouveau_bios(therm);
+
+	nouveau_therm_fan_set_defaults(therm);
+	nvbios_perf_fan_parse(bios, &priv->bios_perf_fan);
+	if (nvbios_therm_fan_parse(bios, &priv->bios_fan))
+		nv_error(therm, "parsing the thermal table failed\n");
+	nouveau_therm_fan_safety_checks(therm);
+
+	nouveau_therm_fan_set_mode(therm, FAN_CONTROL_NONE);
+
+	return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/therm/ic.c b/drivers/gpu/drm/nouveau/core/subdev/therm/ic.c
new file mode 100644
index 000000000000..e512ff0aae60
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/therm/ic.c
@@ -0,0 +1,116 @@
+/*
+ * Copyright 2012 Nouveau community
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Martin Peres
+ */
+
+#include "priv.h"
+
+#include <subdev/i2c.h>
+#include <subdev/bios/extdev.h>
+
+static bool
+probe_monitoring_device(struct nouveau_i2c_port *i2c,
+			struct i2c_board_info *info)
+{
+	struct nouveau_therm_priv *priv = (void *)nouveau_therm(i2c->i2c);
+	struct i2c_client *client;
+
+	request_module("%s%s", I2C_MODULE_PREFIX, info->type);
+
+	client = i2c_new_device(&i2c->adapter, info);
+	if (!client)
+		return false;
+
+	if (!client->driver || client->driver->detect(client, info)) {
+		i2c_unregister_device(client);
+		return false;
+	}
+
+	nv_info(priv,
+		"Found an %s at address 0x%x (controlled by lm_sensors)\n",
+		info->type, info->addr);
+	priv->ic = client;
+
+	return true;
+}
+
+void
+nouveau_therm_ic_ctor(struct nouveau_therm *therm)
+{
+	struct nouveau_therm_priv *priv = (void *)therm;
+	struct nouveau_bios *bios = nouveau_bios(therm);
+	struct nouveau_i2c *i2c = nouveau_i2c(therm);
+	struct nvbios_extdev_func extdev_entry;
+	struct i2c_board_info info[] = {
+		{ I2C_BOARD_INFO("w83l785ts", 0x2d) },
+		{ I2C_BOARD_INFO("w83781d", 0x2d) },
+		{ I2C_BOARD_INFO("adt7473", 0x2e) },
+		{ I2C_BOARD_INFO("adt7473", 0x2d) },
+		{ I2C_BOARD_INFO("adt7473", 0x2c) },
+		{ I2C_BOARD_INFO("f75375", 0x2e) },
+		{ I2C_BOARD_INFO("lm99", 0x4c) },
+		{ I2C_BOARD_INFO("lm90", 0x4c) },
+		{ I2C_BOARD_INFO("lm90", 0x4d) },
+		{ I2C_BOARD_INFO("adm1021", 0x18) },
+		{ I2C_BOARD_INFO("adm1021", 0x19) },
+		{ I2C_BOARD_INFO("adm1021", 0x1a) },
+		{ I2C_BOARD_INFO("adm1021", 0x29) },
+		{ I2C_BOARD_INFO("adm1021", 0x2a) },
+		{ I2C_BOARD_INFO("adm1021", 0x2b) },
+		{ I2C_BOARD_INFO("adm1021", 0x4c) },
+		{ I2C_BOARD_INFO("adm1021", 0x4d) },
+		{ I2C_BOARD_INFO("adm1021", 0x4e) },
+		{ I2C_BOARD_INFO("lm63", 0x18) },
+		{ I2C_BOARD_INFO("lm63", 0x4e) },
+		{ }
+	};
+
+	if (!nvbios_extdev_find(bios, NVBIOS_EXTDEV_LM89, &extdev_entry)) {
+		struct i2c_board_info board[] = {
+			{ I2C_BOARD_INFO("lm90", extdev_entry.addr >> 1) },
+			{ }
+		};
+
+		i2c->identify(i2c, NV_I2C_DEFAULT(0), "monitoring device",
+				  board, probe_monitoring_device);
+		if (priv->ic)
+			return;
+	}
+
+	if (!nvbios_extdev_find(bios, NVBIOS_EXTDEV_ADT7473, &extdev_entry)) {
+		struct i2c_board_info board[] = {
+			{ I2C_BOARD_INFO("adt7473", extdev_entry.addr >> 1) },
+			{ }
+		};
+
+		i2c->identify(i2c, NV_I2C_DEFAULT(0), "monitoring device",
+				  board, probe_monitoring_device);
+		if (priv->ic)
+			return;
+	}
+
+	/* The vbios doesn't provide the address of an exisiting monitoring
+	   device. Let's try our static list.
+	 */
+	i2c->identify(i2c, NV_I2C_DEFAULT(0), "monitoring device", info,
+		      probe_monitoring_device);
+}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/therm/nv40.c b/drivers/gpu/drm/nouveau/core/subdev/therm/nv40.c
new file mode 100644
index 000000000000..fcf2cfe731d6
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/therm/nv40.c
@@ -0,0 +1,163 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ * 	    Martin Peres
+ */
+
+#include "priv.h"
+
+static int
+nv40_sensor_setup(struct nouveau_therm *therm)
+{
+	struct nouveau_device *device = nv_device(therm);
+
+	/* enable ADC readout and disable the ALARM threshold */
+	if (device->chipset >= 0x46) {
+		nv_mask(therm, 0x15b8, 0x80000000, 0);
+		nv_wr32(therm, 0x15b0, 0x80003fff);
+		return nv_rd32(therm, 0x15b4) & 0x3fff;
+	} else {
+		nv_wr32(therm, 0x15b0, 0xff);
+		return nv_rd32(therm, 0x15b4) & 0xff;
+	}
+}
+
+static int
+nv40_temp_get(struct nouveau_therm *therm)
+{
+	struct nouveau_therm_priv *priv = (void *)therm;
+	struct nouveau_device *device = nv_device(therm);
+	struct nvbios_therm_sensor *sensor = &priv->bios_sensor;
+	int core_temp;
+
+	if (device->chipset >= 0x46) {
+		nv_wr32(therm, 0x15b0, 0x80003fff);
+		core_temp = nv_rd32(therm, 0x15b4) & 0x3fff;
+	} else {
+		nv_wr32(therm, 0x15b0, 0xff);
+		core_temp = nv_rd32(therm, 0x15b4) & 0xff;
+	}
+
+	/* Setup the sensor if the temperature is 0 */
+	if (core_temp == 0)
+		core_temp = nv40_sensor_setup(therm);
+
+	if (sensor->slope_div == 0)
+		sensor->slope_div = 1;
+	if (sensor->offset_den == 0)
+		sensor->offset_den = 1;
+	if (sensor->slope_mult < 1)
+		sensor->slope_mult = 1;
+
+	core_temp = core_temp * sensor->slope_mult / sensor->slope_div;
+	core_temp = core_temp + sensor->offset_num / sensor->offset_den;
+	core_temp = core_temp + sensor->offset_constant - 8;
+
+	return core_temp;
+}
+
+int
+nv40_fan_pwm_get(struct nouveau_therm *therm, int line, u32 *divs, u32 *duty)
+{
+	if (line == 2) {
+		u32 reg = nv_rd32(therm, 0x0010f0);
+		if (reg & 0x80000000) {
+			*duty = (reg & 0x7fff0000) >> 16;
+			*divs = (reg & 0x00007fff);
+			return 0;
+		}
+	} else
+	if (line == 9) {
+		u32 reg = nv_rd32(therm, 0x0015f4);
+		if (reg & 0x80000000) {
+			*divs = nv_rd32(therm, 0x0015f8);
+			*duty = (reg & 0x7fffffff);
+			return 0;
+		}
+	} else {
+		nv_error(therm, "unknown pwm ctrl for gpio %d\n", line);
+		return -ENODEV;
+	}
+
+	return -EINVAL;
+}
+
+int
+nv40_fan_pwm_set(struct nouveau_therm *therm, int line, u32 divs, u32 duty)
+{
+	if (line == 2) {
+		nv_wr32(therm, 0x0010f0, 0x80000000 | (duty << 16) | divs);
+	} else
+	if (line == 9) {
+		nv_wr32(therm, 0x0015f8, divs);
+		nv_wr32(therm, 0x0015f4, duty | 0x80000000);
+	} else {
+		nv_error(therm, "unknown pwm ctrl for gpio %d\n", line);
+		return -ENODEV;
+	}
+
+	return 0;
+}
+
+static int
+nv40_therm_ctor(struct nouveau_object *parent,
+		   struct nouveau_object *engine,
+		   struct nouveau_oclass *oclass, void *data, u32 size,
+		   struct nouveau_object **pobject)
+{
+	struct nouveau_therm_priv *priv;
+	struct nouveau_therm *therm;
+	int ret;
+
+	ret = nouveau_therm_create(parent, engine, oclass, &priv);
+	*pobject = nv_object(priv);
+	therm = (void *) priv;
+	if (ret)
+		return ret;
+
+	nouveau_therm_ic_ctor(therm);
+	nouveau_therm_sensor_ctor(therm);
+	nouveau_therm_fan_ctor(therm);
+
+	priv->fan.pwm_get = nv40_fan_pwm_get;
+	priv->fan.pwm_set = nv40_fan_pwm_set;
+
+	therm->temp_get = nv40_temp_get;
+	therm->fan_get = nouveau_therm_fan_user_get;
+	therm->fan_set = nouveau_therm_fan_user_set;
+	therm->fan_sense = nouveau_therm_fan_sense;
+	therm->attr_get = nouveau_therm_attr_get;
+	therm->attr_set = nouveau_therm_attr_set;
+
+	return 0;
+}
+
+struct nouveau_oclass
+nv40_therm_oclass = {
+	.handle = NV_SUBDEV(THERM, 0x40),
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nv40_therm_ctor,
+		.dtor = _nouveau_therm_dtor,
+		.init = nouveau_therm_init,
+		.fini = nouveau_therm_fini,
+	},
+};
\ No newline at end of file
diff --git a/drivers/gpu/drm/nouveau/core/subdev/therm/nv50.c b/drivers/gpu/drm/nouveau/core/subdev/therm/nv50.c
new file mode 100644
index 000000000000..f87a7a3eb4e7
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/therm/nv50.c
@@ -0,0 +1,157 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ * 	    Martin Peres
+ */
+
+#include "priv.h"
+
+static int
+pwm_info(struct nouveau_therm *therm, int *line, int *ctrl, int *indx)
+{
+	if (*line == 0x04) {
+		*ctrl = 0x00e100;
+		*line = 4;
+		*indx = 0;
+	} else
+	if (*line == 0x09) {
+		*ctrl = 0x00e100;
+		*line = 9;
+		*indx = 1;
+	} else
+	if (*line == 0x10) {
+		*ctrl = 0x00e28c;
+		*line = 0;
+		*indx = 0;
+	} else {
+		nv_error(therm, "unknown pwm ctrl for gpio %d\n", *line);
+		return -ENODEV;
+	}
+
+	return 0;
+}
+
+int
+nv50_fan_pwm_get(struct nouveau_therm *therm, int line, u32 *divs, u32 *duty)
+{
+	int ctrl, id, ret = pwm_info(therm, &line, &ctrl, &id);
+	if (ret)
+		return ret;
+
+	if (nv_rd32(therm, ctrl) & (1 << line)) {
+		*divs = nv_rd32(therm, 0x00e114 + (id * 8));
+		*duty = nv_rd32(therm, 0x00e118 + (id * 8));
+		return 0;
+	}
+
+	return -EINVAL;
+}
+
+int
+nv50_fan_pwm_set(struct nouveau_therm *therm, int line, u32 divs, u32 duty)
+{
+	int ctrl, id, ret = pwm_info(therm, &line, &ctrl, &id);
+	if (ret)
+		return ret;
+
+	nv_mask(therm, ctrl, 0x00010001 << line, 0x00000001 << line);
+	nv_wr32(therm, 0x00e114 + (id * 8), divs);
+	nv_wr32(therm, 0x00e118 + (id * 8), duty | 0x80000000);
+	return 0;
+}
+
+int
+nv50_fan_pwm_clock(struct nouveau_therm *therm)
+{
+	int chipset = nv_device(therm)->chipset;
+	int crystal = nv_device(therm)->crystal;
+	int pwm_clock;
+
+	/* determine the PWM source clock */
+	if (chipset > 0x50 && chipset < 0x94) {
+		u8 pwm_div = nv_rd32(therm, 0x410c);
+		if (nv_rd32(therm, 0xc040) & 0x800000) {
+			/* Use the HOST clock (100 MHz)
+			* Where does this constant(2.4) comes from? */
+			pwm_clock = (100000000 >> pwm_div) / 10 / 24;
+		} else {
+			/* Where does this constant(20) comes from? */
+			pwm_clock = (crystal * 1000) >> pwm_div;
+			pwm_clock /= 20;
+		}
+	} else {
+		pwm_clock = (crystal * 1000) / 20;
+	}
+
+	return pwm_clock;
+}
+
+int
+nv50_temp_get(struct nouveau_therm *therm)
+{
+	return nv_rd32(therm, 0x20400);
+}
+
+static int
+nv50_therm_ctor(struct nouveau_object *parent,
+		   struct nouveau_object *engine,
+		   struct nouveau_oclass *oclass, void *data, u32 size,
+		   struct nouveau_object **pobject)
+{
+	struct nouveau_therm_priv *priv;
+	struct nouveau_therm *therm;
+	int ret;
+
+	ret = nouveau_therm_create(parent, engine, oclass, &priv);
+	*pobject = nv_object(priv);
+	therm = (void *) priv;
+	if (ret)
+		return ret;
+
+	nouveau_therm_ic_ctor(therm);
+	nouveau_therm_sensor_ctor(therm);
+	nouveau_therm_fan_ctor(therm);
+
+	priv->fan.pwm_get = nv50_fan_pwm_get;
+	priv->fan.pwm_set = nv50_fan_pwm_set;
+	priv->fan.pwm_clock = nv50_fan_pwm_clock;
+
+	therm->temp_get = nv50_temp_get;
+	therm->fan_get = nouveau_therm_fan_user_get;
+	therm->fan_set = nouveau_therm_fan_user_set;
+	therm->fan_sense = nouveau_therm_fan_sense;
+	therm->attr_get = nouveau_therm_attr_get;
+	therm->attr_set = nouveau_therm_attr_set;
+
+	return 0;
+}
+
+struct nouveau_oclass
+nv50_therm_oclass = {
+	.handle = NV_SUBDEV(THERM, 0x50),
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nv50_therm_ctor,
+		.dtor = _nouveau_therm_dtor,
+		.init = nouveau_therm_init,
+		.fini = nouveau_therm_fini,
+	},
+};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/therm/priv.h b/drivers/gpu/drm/nouveau/core/subdev/therm/priv.h
new file mode 100644
index 000000000000..1c3cd6abc36e
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/therm/priv.h
@@ -0,0 +1,73 @@
+/*
+ * Copyright 2012 The Nouveau community
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Martin Peres
+ */
+
+#include <subdev/therm.h>
+
+#include <subdev/bios/extdev.h>
+#include <subdev/bios/perf.h>
+#include <subdev/bios/therm.h>
+
+struct nouveau_therm_priv {
+	struct nouveau_therm base;
+
+	/* bios */
+	struct nvbios_therm_sensor bios_sensor;
+	struct nvbios_therm_fan bios_fan;
+	struct nvbios_perf_fan bios_perf_fan;
+
+	/* fan priv */
+	struct {
+		enum nouveau_therm_fan_mode mode;
+		int percent;
+
+		int (*pwm_get)(struct nouveau_therm *, int line, u32*, u32*);
+		int (*pwm_set)(struct nouveau_therm *, int line, u32, u32);
+		int (*pwm_clock)(struct nouveau_therm *);
+	} fan;
+
+	/* ic */
+	struct i2c_client *ic;
+};
+
+int nouveau_therm_init(struct nouveau_object *object);
+int nouveau_therm_fini(struct nouveau_object *object, bool suspend);
+int nouveau_therm_attr_get(struct nouveau_therm *therm,
+		       enum nouveau_therm_attr_type type);
+int nouveau_therm_attr_set(struct nouveau_therm *therm,
+		       enum nouveau_therm_attr_type type, int value);
+
+void nouveau_therm_ic_ctor(struct nouveau_therm *therm);
+
+int nouveau_therm_sensor_ctor(struct nouveau_therm *therm);
+
+int nouveau_therm_fan_ctor(struct nouveau_therm *therm);
+int nouveau_therm_fan_get(struct nouveau_therm *therm);
+int nouveau_therm_fan_set(struct nouveau_therm *therm, int percent);
+int nouveau_therm_fan_user_get(struct nouveau_therm *therm);
+int nouveau_therm_fan_user_set(struct nouveau_therm *therm, int percent);
+int nouveau_therm_fan_set_mode(struct nouveau_therm *therm,
+			   enum nouveau_therm_fan_mode mode);
+
+
+int nouveau_therm_fan_sense(struct nouveau_therm *therm);
diff --git a/drivers/gpu/drm/nouveau/core/subdev/therm/temp.c b/drivers/gpu/drm/nouveau/core/subdev/therm/temp.c
new file mode 100644
index 000000000000..204282301fb1
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/therm/temp.c
@@ -0,0 +1,81 @@
+/*
+ * Copyright 2012 The Nouveau community
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Martin Peres
+ */
+
+#include "priv.h"
+
+#include <core/object.h>
+#include <core/device.h>
+
+#include <subdev/bios.h>
+
+static void
+nouveau_therm_temp_set_defaults(struct nouveau_therm *therm)
+{
+	struct nouveau_therm_priv *priv = (void *)therm;
+
+	priv->bios_sensor.slope_mult = 1;
+	priv->bios_sensor.slope_div = 1;
+	priv->bios_sensor.offset_num = 0;
+	priv->bios_sensor.offset_den = 1;
+	priv->bios_sensor.offset_constant = 0;
+
+	priv->bios_sensor.thrs_fan_boost.temp = 90;
+	priv->bios_sensor.thrs_fan_boost.hysteresis = 3;
+
+	priv->bios_sensor.thrs_down_clock.temp = 95;
+	priv->bios_sensor.thrs_down_clock.hysteresis = 3;
+
+	priv->bios_sensor.thrs_critical.temp = 105;
+	priv->bios_sensor.thrs_critical.hysteresis = 5;
+
+	priv->bios_sensor.thrs_shutdown.temp = 135;
+	priv->bios_sensor.thrs_shutdown.hysteresis = 5; /*not that it matters */
+}
+
+
+static void
+nouveau_therm_temp_safety_checks(struct nouveau_therm *therm)
+{
+	struct nouveau_therm_priv *priv = (void *)therm;
+
+	if (!priv->bios_sensor.slope_div)
+		priv->bios_sensor.slope_div = 1;
+	if (!priv->bios_sensor.offset_den)
+		priv->bios_sensor.offset_den = 1;
+}
+
+int
+nouveau_therm_sensor_ctor(struct nouveau_therm *therm)
+{
+	struct nouveau_therm_priv *priv = (void *)therm;
+	struct nouveau_bios *bios = nouveau_bios(therm);
+
+	nouveau_therm_temp_set_defaults(therm);
+	if (nvbios_therm_sensor_parse(bios, NVBIOS_THERM_DOMAIN_CORE,
+				      &priv->bios_sensor))
+		nv_error(therm, "nvbios_therm_sensor_parse failed\n");
+	nouveau_therm_temp_safety_checks(therm);
+
+	return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/timer/base.c b/drivers/gpu/drm/nouveau/core/subdev/timer/base.c
new file mode 100644
index 000000000000..5d417cc9949b
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/timer/base.c
@@ -0,0 +1,87 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include "subdev/timer.h"
+
+bool
+nouveau_timer_wait_eq(void *obj, u64 nsec, u32 addr, u32 mask, u32 data)
+{
+	struct nouveau_timer *ptimer = nouveau_timer(obj);
+	u64 time0;
+
+	time0 = ptimer->read(ptimer);
+	do {
+		if (nv_iclass(obj, NV_SUBDEV_CLASS)) {
+			if ((nv_rd32(obj, addr) & mask) == data)
+				return true;
+		} else {
+			if ((nv_ro32(obj, addr) & mask) == data)
+				return true;
+		}
+	} while (ptimer->read(ptimer) - time0 < nsec);
+
+	return false;
+}
+
+bool
+nouveau_timer_wait_ne(void *obj, u64 nsec, u32 addr, u32 mask, u32 data)
+{
+	struct nouveau_timer *ptimer = nouveau_timer(obj);
+	u64 time0;
+
+	time0 = ptimer->read(ptimer);
+	do {
+		if (nv_iclass(obj, NV_SUBDEV_CLASS)) {
+			if ((nv_rd32(obj, addr) & mask) != data)
+				return true;
+		} else {
+			if ((nv_ro32(obj, addr) & mask) != data)
+				return true;
+		}
+	} while (ptimer->read(ptimer) - time0 < nsec);
+
+	return false;
+}
+
+bool
+nouveau_timer_wait_cb(void *obj, u64 nsec, bool (*func)(void *), void *data)
+{
+	struct nouveau_timer *ptimer = nouveau_timer(obj);
+	u64 time0;
+
+	time0 = ptimer->read(ptimer);
+	do {
+		if (func(data) == true)
+			return true;
+	} while (ptimer->read(ptimer) - time0 < nsec);
+
+	return false;
+}
+
+void
+nouveau_timer_alarm(void *obj, u32 nsec, struct nouveau_alarm *alarm)
+{
+	struct nouveau_timer *ptimer = nouveau_timer(obj);
+	ptimer->alarm(ptimer, nsec, alarm);
+}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/timer/nv04.c b/drivers/gpu/drm/nouveau/core/subdev/timer/nv04.c
new file mode 100644
index 000000000000..49976be4d73b
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/timer/nv04.c
@@ -0,0 +1,249 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <subdev/timer.h>
+
+#define NV04_PTIMER_INTR_0      0x009100
+#define NV04_PTIMER_INTR_EN_0   0x009140
+#define NV04_PTIMER_NUMERATOR   0x009200
+#define NV04_PTIMER_DENOMINATOR 0x009210
+#define NV04_PTIMER_TIME_0      0x009400
+#define NV04_PTIMER_TIME_1      0x009410
+#define NV04_PTIMER_ALARM_0     0x009420
+
+struct nv04_timer_priv {
+	struct nouveau_timer base;
+	struct list_head alarms;
+	spinlock_t lock;
+};
+
+static u64
+nv04_timer_read(struct nouveau_timer *ptimer)
+{
+	struct nv04_timer_priv *priv = (void *)ptimer;
+	u32 hi, lo;
+
+	do {
+		hi = nv_rd32(priv, NV04_PTIMER_TIME_1);
+		lo = nv_rd32(priv, NV04_PTIMER_TIME_0);
+	} while (hi != nv_rd32(priv, NV04_PTIMER_TIME_1));
+
+	return ((u64)hi << 32 | lo);
+}
+
+static void
+nv04_timer_alarm_trigger(struct nouveau_timer *ptimer)
+{
+	struct nv04_timer_priv *priv = (void *)ptimer;
+	struct nouveau_alarm *alarm, *atemp;
+	unsigned long flags;
+	LIST_HEAD(exec);
+
+	/* move any due alarms off the pending list */
+	spin_lock_irqsave(&priv->lock, flags);
+	list_for_each_entry_safe(alarm, atemp, &priv->alarms, head) {
+		if (alarm->timestamp <= ptimer->read(ptimer))
+			list_move_tail(&alarm->head, &exec);
+	}
+
+	/* reschedule interrupt for next alarm time */
+	if (!list_empty(&priv->alarms)) {
+		alarm = list_first_entry(&priv->alarms, typeof(*alarm), head);
+		nv_wr32(priv, NV04_PTIMER_ALARM_0, alarm->timestamp);
+		nv_wr32(priv, NV04_PTIMER_INTR_EN_0, 0x00000001);
+	} else {
+		nv_wr32(priv, NV04_PTIMER_INTR_EN_0, 0x00000000);
+	}
+	spin_unlock_irqrestore(&priv->lock, flags);
+
+	/* execute any pending alarm handlers */
+	list_for_each_entry_safe(alarm, atemp, &exec, head) {
+		list_del(&alarm->head);
+		alarm->func(alarm);
+	}
+}
+
+static void
+nv04_timer_alarm(struct nouveau_timer *ptimer, u32 time,
+		 struct nouveau_alarm *alarm)
+{
+	struct nv04_timer_priv *priv = (void *)ptimer;
+	struct nouveau_alarm *list;
+	unsigned long flags;
+
+	alarm->timestamp = ptimer->read(ptimer) + time;
+
+	/* append new alarm to list, in soonest-alarm-first order */
+	spin_lock_irqsave(&priv->lock, flags);
+	list_for_each_entry(list, &priv->alarms, head) {
+		if (list->timestamp > alarm->timestamp)
+			break;
+	}
+	list_add_tail(&alarm->head, &list->head);
+	spin_unlock_irqrestore(&priv->lock, flags);
+
+	/* process pending alarms */
+	nv04_timer_alarm_trigger(ptimer);
+}
+
+static void
+nv04_timer_intr(struct nouveau_subdev *subdev)
+{
+	struct nv04_timer_priv *priv = (void *)subdev;
+	u32 stat = nv_rd32(priv, NV04_PTIMER_INTR_0);
+
+	if (stat & 0x00000001) {
+		nv04_timer_alarm_trigger(&priv->base);
+		nv_wr32(priv, NV04_PTIMER_INTR_0, 0x00000001);
+		stat &= ~0x00000001;
+	}
+
+	if (stat) {
+		nv_error(priv, "unknown stat 0x%08x\n", stat);
+		nv_wr32(priv, NV04_PTIMER_INTR_0, stat);
+	}
+}
+
+static int
+nv04_timer_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+		struct nouveau_oclass *oclass, void *data, u32 size,
+		struct nouveau_object **pobject)
+{
+	struct nv04_timer_priv *priv;
+	int ret;
+
+	ret = nouveau_timer_create(parent, engine, oclass, &priv);
+	*pobject = nv_object(priv);
+	if (ret)
+		return ret;
+
+	priv->base.base.intr = nv04_timer_intr;
+	priv->base.read = nv04_timer_read;
+	priv->base.alarm = nv04_timer_alarm;
+
+	INIT_LIST_HEAD(&priv->alarms);
+	spin_lock_init(&priv->lock);
+	return 0;
+}
+
+static void
+nv04_timer_dtor(struct nouveau_object *object)
+{
+	struct nv04_timer_priv *priv = (void *)object;
+	return nouveau_timer_destroy(&priv->base);
+}
+
+static int
+nv04_timer_init(struct nouveau_object *object)
+{
+	struct nouveau_device *device = nv_device(object);
+	struct nv04_timer_priv *priv = (void *)object;
+	u32 m = 1, f, n, d;
+	int ret;
+
+	ret = nouveau_timer_init(&priv->base);
+	if (ret)
+		return ret;
+
+	/* aim for 31.25MHz, which gives us nanosecond timestamps */
+	d = 1000000 / 32;
+
+	/* determine base clock for timer source */
+#if 0 /*XXX*/
+	if (device->chipset < 0x40) {
+		n = nouveau_hw_get_clock(device, PLL_CORE);
+	} else
+#endif
+	if (device->chipset <= 0x40) {
+		/*XXX: figure this out */
+		f = -1;
+		n = 0;
+	} else {
+		f = device->crystal;
+		n = f;
+		while (n < (d * 2)) {
+			n += (n / m);
+			m++;
+		}
+
+		nv_wr32(priv, 0x009220, m - 1);
+	}
+
+	if (!n) {
+		nv_warn(priv, "unknown input clock freq\n");
+		if (!nv_rd32(priv, NV04_PTIMER_NUMERATOR) ||
+		    !nv_rd32(priv, NV04_PTIMER_DENOMINATOR)) {
+			nv_wr32(priv, NV04_PTIMER_NUMERATOR, 1);
+			nv_wr32(priv, NV04_PTIMER_DENOMINATOR, 1);
+		}
+		return 0;
+	}
+
+	/* reduce ratio to acceptable values */
+	while (((n % 5) == 0) && ((d % 5) == 0)) {
+		n /= 5;
+		d /= 5;
+	}
+
+	while (((n % 2) == 0) && ((d % 2) == 0)) {
+		n /= 2;
+		d /= 2;
+	}
+
+	while (n > 0xffff || d > 0xffff) {
+		n >>= 1;
+		d >>= 1;
+	}
+
+	nv_debug(priv, "input frequency : %dHz\n", f);
+	nv_debug(priv, "input multiplier: %d\n", m);
+	nv_debug(priv, "numerator       : 0x%08x\n", n);
+	nv_debug(priv, "denominator     : 0x%08x\n", d);
+	nv_debug(priv, "timer frequency : %dHz\n", (f * m) * d / n);
+
+	nv_wr32(priv, NV04_PTIMER_NUMERATOR, n);
+	nv_wr32(priv, NV04_PTIMER_DENOMINATOR, d);
+	nv_wr32(priv, NV04_PTIMER_INTR_0, 0xffffffff);
+	nv_wr32(priv, NV04_PTIMER_INTR_EN_0, 0x00000000);
+	return 0;
+}
+
+static int
+nv04_timer_fini(struct nouveau_object *object, bool suspend)
+{
+	struct nv04_timer_priv *priv = (void *)object;
+	nv_wr32(priv, NV04_PTIMER_INTR_EN_0, 0x00000000);
+	return nouveau_timer_fini(&priv->base, suspend);
+}
+
+struct nouveau_oclass
+nv04_timer_oclass = {
+	.handle = NV_SUBDEV(TIMER, 0x04),
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nv04_timer_ctor,
+		.dtor = nv04_timer_dtor,
+		.init = nv04_timer_init,
+		.fini = nv04_timer_fini,
+	}
+};
diff --git a/drivers/gpu/drm/nouveau/nouveau_vm.c b/drivers/gpu/drm/nouveau/core/subdev/vm/base.c
index 4c8d13965dd1..082c11b75acb 100644
--- a/drivers/gpu/drm/nouveau/nouveau_vm.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/vm/base.c
@@ -22,22 +22,24 @@
  * Authors: Ben Skeggs
  */
 
-#include <drm/drmP.h>
-#include "nouveau_drv.h"
-#include "nouveau_mm.h"
-#include "nouveau_vm.h"
+#include <core/gpuobj.h>
+#include <core/mm.h>
+
+#include <subdev/fb.h>
+#include <subdev/vm.h>
 
 void
 nouveau_vm_map_at(struct nouveau_vma *vma, u64 delta, struct nouveau_mem *node)
 {
 	struct nouveau_vm *vm = vma->vm;
+	struct nouveau_vmmgr *vmm = vm->vmm;
 	struct nouveau_mm_node *r;
-	int big = vma->node->type != vm->spg_shift;
+	int big = vma->node->type != vmm->spg_shift;
 	u32 offset = vma->node->offset + (delta >> 12);
 	u32 bits = vma->node->type - 12;
-	u32 pde  = (offset >> vm->pgt_bits) - vm->fpde;
-	u32 pte  = (offset & ((1 << vm->pgt_bits) - 1)) >> bits;
-	u32 max  = 1 << (vm->pgt_bits - bits);
+	u32 pde  = (offset >> vmm->pgt_bits) - vm->fpde;
+	u32 pte  = (offset & ((1 << vmm->pgt_bits) - 1)) >> bits;
+	u32 max  = 1 << (vmm->pgt_bits - bits);
 	u32 end, len;
 
 	delta = 0;
@@ -53,7 +55,7 @@ nouveau_vm_map_at(struct nouveau_vma *vma, u64 delta, struct nouveau_mem *node)
 				end = max;
 			len = end - pte;
 
-			vm->map(vma, pgt, node, pte, len, phys, delta);
+			vmm->map(vma, pgt, node, pte, len, phys, delta);
 
 			num -= len;
 			pte += len;
@@ -67,7 +69,7 @@ nouveau_vm_map_at(struct nouveau_vma *vma, u64 delta, struct nouveau_mem *node)
 		}
 	}
 
-	vm->flush(vm);
+	vmm->flush(vm);
 }
 
 void
@@ -81,13 +83,14 @@ nouveau_vm_map_sg_table(struct nouveau_vma *vma, u64 delta, u64 length,
 			struct nouveau_mem *mem)
 {
 	struct nouveau_vm *vm = vma->vm;
-	int big = vma->node->type != vm->spg_shift;
+	struct nouveau_vmmgr *vmm = vm->vmm;
+	int big = vma->node->type != vmm->spg_shift;
 	u32 offset = vma->node->offset + (delta >> 12);
 	u32 bits = vma->node->type - 12;
 	u32 num  = length >> vma->node->type;
-	u32 pde  = (offset >> vm->pgt_bits) - vm->fpde;
-	u32 pte  = (offset & ((1 << vm->pgt_bits) - 1)) >> bits;
-	u32 max  = 1 << (vm->pgt_bits - bits);
+	u32 pde  = (offset >> vmm->pgt_bits) - vm->fpde;
+	u32 pte  = (offset & ((1 << vmm->pgt_bits) - 1)) >> bits;
+	u32 max  = 1 << (vmm->pgt_bits - bits);
 	unsigned m, sglen;
 	u32 end, len;
 	int i;
@@ -105,7 +108,7 @@ nouveau_vm_map_sg_table(struct nouveau_vma *vma, u64 delta, u64 length,
 		for (m = 0; m < len; m++) {
 			dma_addr_t addr = sg_dma_address(sg) + (m << PAGE_SHIFT);
 
-			vm->map_sg(vma, pgt, mem, pte, 1, &addr);
+			vmm->map_sg(vma, pgt, mem, pte, 1, &addr);
 			num--;
 			pte++;
 
@@ -120,7 +123,7 @@ nouveau_vm_map_sg_table(struct nouveau_vma *vma, u64 delta, u64 length,
 			for (; m < sglen; m++) {
 				dma_addr_t addr = sg_dma_address(sg) + (m << PAGE_SHIFT);
 
-				vm->map_sg(vma, pgt, mem, pte, 1, &addr);
+				vmm->map_sg(vma, pgt, mem, pte, 1, &addr);
 				num--;
 				pte++;
 				if (num == 0)
@@ -130,7 +133,7 @@ nouveau_vm_map_sg_table(struct nouveau_vma *vma, u64 delta, u64 length,
 
 	}
 finish:
-	vm->flush(vm);
+	vmm->flush(vm);
 }
 
 void
@@ -138,14 +141,15 @@ nouveau_vm_map_sg(struct nouveau_vma *vma, u64 delta, u64 length,
 		  struct nouveau_mem *mem)
 {
 	struct nouveau_vm *vm = vma->vm;
+	struct nouveau_vmmgr *vmm = vm->vmm;
 	dma_addr_t *list = mem->pages;
-	int big = vma->node->type != vm->spg_shift;
+	int big = vma->node->type != vmm->spg_shift;
 	u32 offset = vma->node->offset + (delta >> 12);
 	u32 bits = vma->node->type - 12;
 	u32 num  = length >> vma->node->type;
-	u32 pde  = (offset >> vm->pgt_bits) - vm->fpde;
-	u32 pte  = (offset & ((1 << vm->pgt_bits) - 1)) >> bits;
-	u32 max  = 1 << (vm->pgt_bits - bits);
+	u32 pde  = (offset >> vmm->pgt_bits) - vm->fpde;
+	u32 pte  = (offset & ((1 << vmm->pgt_bits) - 1)) >> bits;
+	u32 max  = 1 << (vmm->pgt_bits - bits);
 	u32 end, len;
 
 	while (num) {
@@ -156,7 +160,7 @@ nouveau_vm_map_sg(struct nouveau_vma *vma, u64 delta, u64 length,
 			end = max;
 		len = end - pte;
 
-		vm->map_sg(vma, pgt, mem, pte, len, list);
+		vmm->map_sg(vma, pgt, mem, pte, len, list);
 
 		num  -= len;
 		pte  += len;
@@ -167,20 +171,21 @@ nouveau_vm_map_sg(struct nouveau_vma *vma, u64 delta, u64 length,
 		}
 	}
 
-	vm->flush(vm);
+	vmm->flush(vm);
 }
 
 void
 nouveau_vm_unmap_at(struct nouveau_vma *vma, u64 delta, u64 length)
 {
 	struct nouveau_vm *vm = vma->vm;
-	int big = vma->node->type != vm->spg_shift;
+	struct nouveau_vmmgr *vmm = vm->vmm;
+	int big = vma->node->type != vmm->spg_shift;
 	u32 offset = vma->node->offset + (delta >> 12);
 	u32 bits = vma->node->type - 12;
 	u32 num  = length >> vma->node->type;
-	u32 pde  = (offset >> vm->pgt_bits) - vm->fpde;
-	u32 pte  = (offset & ((1 << vm->pgt_bits) - 1)) >> bits;
-	u32 max  = 1 << (vm->pgt_bits - bits);
+	u32 pde  = (offset >> vmm->pgt_bits) - vm->fpde;
+	u32 pte  = (offset & ((1 << vmm->pgt_bits) - 1)) >> bits;
+	u32 max  = 1 << (vmm->pgt_bits - bits);
 	u32 end, len;
 
 	while (num) {
@@ -191,7 +196,7 @@ nouveau_vm_unmap_at(struct nouveau_vma *vma, u64 delta, u64 length)
 			end = max;
 		len = end - pte;
 
-		vm->unmap(pgt, pte, len);
+		vmm->unmap(pgt, pte, len);
 
 		num -= len;
 		pte += len;
@@ -201,7 +206,7 @@ nouveau_vm_unmap_at(struct nouveau_vma *vma, u64 delta, u64 length)
 		}
 	}
 
-	vm->flush(vm);
+	vmm->flush(vm);
 }
 
 void
@@ -213,6 +218,7 @@ nouveau_vm_unmap(struct nouveau_vma *vma)
 static void
 nouveau_vm_unmap_pgt(struct nouveau_vm *vm, int big, u32 fpde, u32 lpde)
 {
+	struct nouveau_vmmgr *vmm = vm->vmm;
 	struct nouveau_vm_pgd *vpgd;
 	struct nouveau_vm_pgt *vpgt;
 	struct nouveau_gpuobj *pgt;
@@ -227,7 +233,7 @@ nouveau_vm_unmap_pgt(struct nouveau_vm *vm, int big, u32 fpde, u32 lpde)
 		vpgt->obj[big] = NULL;
 
 		list_for_each_entry(vpgd, &vm->pgd_list, head) {
-			vm->map_pgt(vpgd->obj, pde, vpgt->obj);
+			vmm->map_pgt(vpgd->obj, pde, vpgt->obj);
 		}
 
 		mutex_unlock(&vm->mm.mutex);
@@ -239,18 +245,19 @@ nouveau_vm_unmap_pgt(struct nouveau_vm *vm, int big, u32 fpde, u32 lpde)
 static int
 nouveau_vm_map_pgt(struct nouveau_vm *vm, u32 pde, u32 type)
 {
+	struct nouveau_vmmgr *vmm = vm->vmm;
 	struct nouveau_vm_pgt *vpgt = &vm->pgt[pde - vm->fpde];
 	struct nouveau_vm_pgd *vpgd;
 	struct nouveau_gpuobj *pgt;
-	int big = (type != vm->spg_shift);
+	int big = (type != vmm->spg_shift);
 	u32 pgt_size;
 	int ret;
 
-	pgt_size  = (1 << (vm->pgt_bits + 12)) >> type;
+	pgt_size  = (1 << (vmm->pgt_bits + 12)) >> type;
 	pgt_size *= 8;
 
 	mutex_unlock(&vm->mm.mutex);
-	ret = nouveau_gpuobj_new(vm->dev, NULL, pgt_size, 0x1000,
+	ret = nouveau_gpuobj_new(nv_object(vm->vmm), NULL, pgt_size, 0x1000,
 				 NVOBJ_FLAG_ZERO_ALLOC, &pgt);
 	mutex_lock(&vm->mm.mutex);
 	if (unlikely(ret))
@@ -266,7 +273,7 @@ nouveau_vm_map_pgt(struct nouveau_vm *vm, u32 pde, u32 type)
 
 	vpgt->obj[big] = pgt;
 	list_for_each_entry(vpgd, &vm->pgd_list, head) {
-		vm->map_pgt(vpgd->obj, pde, vpgt->obj);
+		vmm->map_pgt(vpgd->obj, pde, vpgt->obj);
 	}
 
 	return 0;
@@ -276,23 +283,26 @@ int
 nouveau_vm_get(struct nouveau_vm *vm, u64 size, u32 page_shift,
 	       u32 access, struct nouveau_vma *vma)
 {
+	struct nouveau_vmmgr *vmm = vm->vmm;
 	u32 align = (1 << page_shift) >> 12;
 	u32 msize = size >> 12;
 	u32 fpde, lpde, pde;
 	int ret;
 
 	mutex_lock(&vm->mm.mutex);
-	ret = nouveau_mm_get(&vm->mm, page_shift, msize, 0, align, &vma->node);
+	ret = nouveau_mm_head(&vm->mm, page_shift, msize, msize, align,
+			     &vma->node);
 	if (unlikely(ret != 0)) {
 		mutex_unlock(&vm->mm.mutex);
 		return ret;
 	}
 
-	fpde = (vma->node->offset >> vm->pgt_bits);
-	lpde = (vma->node->offset + vma->node->length - 1) >> vm->pgt_bits;
+	fpde = (vma->node->offset >> vmm->pgt_bits);
+	lpde = (vma->node->offset + vma->node->length - 1) >> vmm->pgt_bits;
+
 	for (pde = fpde; pde <= lpde; pde++) {
 		struct nouveau_vm_pgt *vpgt = &vm->pgt[pde - vm->fpde];
-		int big = (vma->node->type != vm->spg_shift);
+		int big = (vma->node->type != vmm->spg_shift);
 
 		if (likely(vpgt->refcount[big])) {
 			vpgt->refcount[big]++;
@@ -303,9 +313,8 @@ nouveau_vm_get(struct nouveau_vm *vm, u64 size, u32 page_shift,
 		if (ret) {
 			if (pde != fpde)
 				nouveau_vm_unmap_pgt(vm, big, fpde, pde - 1);
-			nouveau_mm_put(&vm->mm, vma->node);
+			nouveau_mm_free(&vm->mm, &vma->node);
 			mutex_unlock(&vm->mm.mutex);
-			vma->node = NULL;
 			return ret;
 		}
 	}
@@ -321,91 +330,67 @@ void
 nouveau_vm_put(struct nouveau_vma *vma)
 {
 	struct nouveau_vm *vm = vma->vm;
+	struct nouveau_vmmgr *vmm = vm->vmm;
 	u32 fpde, lpde;
 
 	if (unlikely(vma->node == NULL))
 		return;
-	fpde = (vma->node->offset >> vm->pgt_bits);
-	lpde = (vma->node->offset + vma->node->length - 1) >> vm->pgt_bits;
+	fpde = (vma->node->offset >> vmm->pgt_bits);
+	lpde = (vma->node->offset + vma->node->length - 1) >> vmm->pgt_bits;
 
 	mutex_lock(&vm->mm.mutex);
-	nouveau_vm_unmap_pgt(vm, vma->node->type != vm->spg_shift, fpde, lpde);
-	nouveau_mm_put(&vm->mm, vma->node);
-	vma->node = NULL;
+	nouveau_vm_unmap_pgt(vm, vma->node->type != vmm->spg_shift, fpde, lpde);
+	nouveau_mm_free(&vm->mm, &vma->node);
 	mutex_unlock(&vm->mm.mutex);
 }
 
 int
-nouveau_vm_new(struct drm_device *dev, u64 offset, u64 length, u64 mm_offset,
-	       struct nouveau_vm **pvm)
+nouveau_vm_create(struct nouveau_vmmgr *vmm, u64 offset, u64 length,
+		  u64 mm_offset, u32 block, struct nouveau_vm **pvm)
 {
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
 	struct nouveau_vm *vm;
 	u64 mm_length = (offset + length) - mm_offset;
-	u32 block, pgt_bits;
 	int ret;
 
-	vm = kzalloc(sizeof(*vm), GFP_KERNEL);
+	vm = *pvm = kzalloc(sizeof(*vm), GFP_KERNEL);
 	if (!vm)
 		return -ENOMEM;
 
-	if (dev_priv->card_type == NV_50) {
-		vm->map_pgt = nv50_vm_map_pgt;
-		vm->map = nv50_vm_map;
-		vm->map_sg = nv50_vm_map_sg;
-		vm->unmap = nv50_vm_unmap;
-		vm->flush = nv50_vm_flush;
-		vm->spg_shift = 12;
-		vm->lpg_shift = 16;
-
-		pgt_bits = 29;
-		block = (1 << pgt_bits);
-		if (length < block)
-			block = length;
-
-	} else
-	if (dev_priv->card_type >= NV_C0) {
-		vm->map_pgt = nvc0_vm_map_pgt;
-		vm->map = nvc0_vm_map;
-		vm->map_sg = nvc0_vm_map_sg;
-		vm->unmap = nvc0_vm_unmap;
-		vm->flush = nvc0_vm_flush;
-		vm->spg_shift = 12;
-		vm->lpg_shift = 17;
-		pgt_bits = 27;
-		block = 4096;
-	} else {
-		kfree(vm);
-		return -ENOSYS;
-	}
+	INIT_LIST_HEAD(&vm->pgd_list);
+	vm->vmm = vmm;
+	vm->refcount = 1;
+	vm->fpde = offset >> (vmm->pgt_bits + 12);
+	vm->lpde = (offset + length - 1) >> (vmm->pgt_bits + 12);
 
-	vm->fpde   = offset >> pgt_bits;
-	vm->lpde   = (offset + length - 1) >> pgt_bits;
-	vm->pgt = kcalloc(vm->lpde - vm->fpde + 1, sizeof(*vm->pgt), GFP_KERNEL);
+	vm->pgt  = kcalloc(vm->lpde - vm->fpde + 1, sizeof(*vm->pgt), GFP_KERNEL);
 	if (!vm->pgt) {
 		kfree(vm);
 		return -ENOMEM;
 	}
 
-	INIT_LIST_HEAD(&vm->pgd_list);
-	vm->dev = dev;
-	vm->refcount = 1;
-	vm->pgt_bits = pgt_bits - 12;
-
 	ret = nouveau_mm_init(&vm->mm, mm_offset >> 12, mm_length >> 12,
 			      block >> 12);
 	if (ret) {
+		kfree(vm->pgt);
 		kfree(vm);
 		return ret;
 	}
 
-	*pvm = vm;
 	return 0;
 }
 
+int
+nouveau_vm_new(struct nouveau_device *device, u64 offset, u64 length,
+	       u64 mm_offset, struct nouveau_vm **pvm)
+{
+	struct nouveau_vmmgr *vmm = nouveau_vmmgr(device);
+	return vmm->create(vmm, offset, length, mm_offset, pvm);
+}
+
 static int
 nouveau_vm_link(struct nouveau_vm *vm, struct nouveau_gpuobj *pgd)
 {
+	struct nouveau_vmmgr *vmm = vm->vmm;
 	struct nouveau_vm_pgd *vpgd;
 	int i;
 
@@ -420,7 +405,7 @@ nouveau_vm_link(struct nouveau_vm *vm, struct nouveau_gpuobj *pgd)
 
 	mutex_lock(&vm->mm.mutex);
 	for (i = vm->fpde; i <= vm->lpde; i++)
-		vm->map_pgt(pgd, i, vm->pgt[i - vm->fpde].obj);
+		vmm->map_pgt(pgd, i, vm->pgt[i - vm->fpde].obj);
 	list_add(&vpgd->head, &vm->pgd_list);
 	mutex_unlock(&vm->mm.mutex);
 	return 0;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/vm/nv04.c b/drivers/gpu/drm/nouveau/core/subdev/vm/nv04.c
new file mode 100644
index 000000000000..6adbbc9cc361
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/vm/nv04.c
@@ -0,0 +1,151 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <core/gpuobj.h>
+
+#include "nv04.h"
+
+#define NV04_PDMA_SIZE (128 * 1024 * 1024)
+#define NV04_PDMA_PAGE (  4 * 1024)
+
+/*******************************************************************************
+ * VM map/unmap callbacks
+ ******************************************************************************/
+
+static void
+nv04_vm_map_sg(struct nouveau_vma *vma, struct nouveau_gpuobj *pgt,
+	       struct nouveau_mem *mem, u32 pte, u32 cnt, dma_addr_t *list)
+{
+	pte = 0x00008 + (pte * 4);
+	while (cnt) {
+		u32 page = PAGE_SIZE / NV04_PDMA_PAGE;
+		u32 phys = (u32)*list++;
+		while (cnt && page--) {
+			nv_wo32(pgt, pte, phys | 3);
+			phys += NV04_PDMA_PAGE;
+			pte += 4;
+			cnt -= 1;
+		}
+	}
+}
+
+static void
+nv04_vm_unmap(struct nouveau_gpuobj *pgt, u32 pte, u32 cnt)
+{
+	pte = 0x00008 + (pte * 4);
+	while (cnt--) {
+		nv_wo32(pgt, pte, 0x00000000);
+		pte += 4;
+	}
+}
+
+static void
+nv04_vm_flush(struct nouveau_vm *vm)
+{
+}
+
+/*******************************************************************************
+ * VM object
+ ******************************************************************************/
+
+int
+nv04_vm_create(struct nouveau_vmmgr *vmm, u64 offset, u64 length, u64 mmstart,
+	       struct nouveau_vm **pvm)
+{
+	return -EINVAL;
+}
+
+/*******************************************************************************
+ * VMMGR subdev
+ ******************************************************************************/
+
+static int
+nv04_vmmgr_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+		struct nouveau_oclass *oclass, void *data, u32 size,
+		struct nouveau_object **pobject)
+{
+	struct nv04_vmmgr_priv *priv;
+	struct nouveau_gpuobj *dma;
+	int ret;
+
+	ret = nouveau_vmmgr_create(parent, engine, oclass, "PCIGART",
+				   "pcigart", &priv);
+	*pobject = nv_object(priv);
+	if (ret)
+		return ret;
+
+	priv->base.create = nv04_vm_create;
+	priv->base.limit = NV04_PDMA_SIZE;
+	priv->base.dma_bits = 32;
+	priv->base.pgt_bits = 32 - 12;
+	priv->base.spg_shift = 12;
+	priv->base.lpg_shift = 12;
+	priv->base.map_sg = nv04_vm_map_sg;
+	priv->base.unmap = nv04_vm_unmap;
+	priv->base.flush = nv04_vm_flush;
+
+	ret = nouveau_vm_create(&priv->base, 0, NV04_PDMA_SIZE, 0, 4096,
+				&priv->vm);
+	if (ret)
+		return ret;
+
+	ret = nouveau_gpuobj_new(parent, NULL,
+				 (NV04_PDMA_SIZE / NV04_PDMA_PAGE) * 4 +
+				 8, 16, NVOBJ_FLAG_ZERO_ALLOC,
+				 &priv->vm->pgt[0].obj[0]);
+	dma = priv->vm->pgt[0].obj[0];
+	priv->vm->pgt[0].refcount[0] = 1;
+	if (ret)
+		return ret;
+
+	nv_wo32(dma, 0x00000, 0x0002103d); /* PCI, RW, PT, !LN */
+	nv_wo32(dma, 0x00004, NV04_PDMA_SIZE - 1);
+	return 0;
+}
+
+void
+nv04_vmmgr_dtor(struct nouveau_object *object)
+{
+	struct nv04_vmmgr_priv *priv = (void *)object;
+	if (priv->vm) {
+		nouveau_gpuobj_ref(NULL, &priv->vm->pgt[0].obj[0]);
+		nouveau_vm_ref(NULL, &priv->vm, NULL);
+	}
+	if (priv->nullp) {
+		pci_free_consistent(nv_device(priv)->pdev, 16 * 1024,
+				    priv->nullp, priv->null);
+	}
+	nouveau_vmmgr_destroy(&priv->base);
+}
+
+struct nouveau_oclass
+nv04_vmmgr_oclass = {
+	.handle = NV_SUBDEV(VM, 0x04),
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nv04_vmmgr_ctor,
+		.dtor = nv04_vmmgr_dtor,
+		.init = _nouveau_vmmgr_init,
+		.fini = _nouveau_vmmgr_fini,
+	},
+};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/vm/nv04.h b/drivers/gpu/drm/nouveau/core/subdev/vm/nv04.h
new file mode 100644
index 000000000000..ec42d4bc86a6
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/vm/nv04.h
@@ -0,0 +1,19 @@
+#ifndef __NV04_VMMGR_PRIV__
+#define __NV04_VMMGR_PRIV__
+
+#include <subdev/vm.h>
+
+struct nv04_vmmgr_priv {
+	struct nouveau_vmmgr base;
+	struct nouveau_vm *vm;
+	dma_addr_t null;
+	void *nullp;
+};
+
+static inline struct nv04_vmmgr_priv *
+nv04_vmmgr(void *obj)
+{
+	return (void *)nouveau_vmmgr(obj);
+}
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/core/subdev/vm/nv41.c b/drivers/gpu/drm/nouveau/core/subdev/vm/nv41.c
new file mode 100644
index 000000000000..0203e1e12caa
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/vm/nv41.c
@@ -0,0 +1,158 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <core/gpuobj.h>
+#include <core/option.h>
+
+#include <subdev/timer.h>
+#include <subdev/vm.h>
+
+#include "nv04.h"
+
+#define NV41_GART_SIZE (512 * 1024 * 1024)
+#define NV41_GART_PAGE (  4 * 1024)
+
+/*******************************************************************************
+ * VM map/unmap callbacks
+ ******************************************************************************/
+
+static void
+nv41_vm_map_sg(struct nouveau_vma *vma, struct nouveau_gpuobj *pgt,
+	       struct nouveau_mem *mem, u32 pte, u32 cnt, dma_addr_t *list)
+{
+	pte = pte * 4;
+	while (cnt) {
+		u32 page = PAGE_SIZE / NV41_GART_PAGE;
+		u64 phys = (u64)*list++;
+		while (cnt && page--) {
+			nv_wo32(pgt, pte, (phys >> 7) | 1);
+			phys += NV41_GART_PAGE;
+			pte += 4;
+			cnt -= 1;
+		}
+	}
+}
+
+static void
+nv41_vm_unmap(struct nouveau_gpuobj *pgt, u32 pte, u32 cnt)
+{
+	pte = pte * 4;
+	while (cnt--) {
+		nv_wo32(pgt, pte, 0x00000000);
+		pte += 4;
+	}
+}
+
+static void
+nv41_vm_flush(struct nouveau_vm *vm)
+{
+	struct nv04_vm_priv *priv = (void *)vm->vmm;
+
+	mutex_lock(&nv_subdev(priv)->mutex);
+	nv_wr32(priv, 0x100810, 0x00000022);
+	if (!nv_wait(priv, 0x100810, 0x00000020, 0x00000020)) {
+		nv_warn(priv, "flush timeout, 0x%08x\n",
+			nv_rd32(priv, 0x100810));
+	}
+	nv_wr32(priv, 0x100810, 0x00000000);
+	mutex_unlock(&nv_subdev(priv)->mutex);
+}
+
+/*******************************************************************************
+ * VMMGR subdev
+ ******************************************************************************/
+
+static int
+nv41_vmmgr_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+		struct nouveau_oclass *oclass, void *data, u32 size,
+		struct nouveau_object **pobject)
+{
+	struct nouveau_device *device = nv_device(parent);
+	struct nv04_vmmgr_priv *priv;
+	int ret;
+
+	if (!nouveau_boolopt(device->cfgopt, "NvPCIE", true)) {
+		return nouveau_object_ctor(parent, engine, &nv04_vmmgr_oclass,
+					   data, size, pobject);
+	}
+
+	ret = nouveau_vmmgr_create(parent, engine, oclass, "PCIEGART",
+				   "pciegart", &priv);
+	*pobject = nv_object(priv);
+	if (ret)
+		return ret;
+
+	priv->base.create = nv04_vm_create;
+	priv->base.limit = NV41_GART_SIZE;
+	priv->base.dma_bits = 39;
+	priv->base.pgt_bits = 32 - 12;
+	priv->base.spg_shift = 12;
+	priv->base.lpg_shift = 12;
+	priv->base.map_sg = nv41_vm_map_sg;
+	priv->base.unmap = nv41_vm_unmap;
+	priv->base.flush = nv41_vm_flush;
+
+	ret = nouveau_vm_create(&priv->base, 0, NV41_GART_SIZE, 0, 4096,
+				&priv->vm);
+	if (ret)
+		return ret;
+
+	ret = nouveau_gpuobj_new(parent, NULL,
+				(NV41_GART_SIZE / NV41_GART_PAGE) * 4,
+				 16, NVOBJ_FLAG_ZERO_ALLOC,
+				 &priv->vm->pgt[0].obj[0]);
+	priv->vm->pgt[0].refcount[0] = 1;
+	if (ret)
+		return ret;
+
+	return 0;
+}
+
+static int
+nv41_vmmgr_init(struct nouveau_object *object)
+{
+	struct nv04_vmmgr_priv *priv = (void *)object;
+	struct nouveau_gpuobj *dma = priv->vm->pgt[0].obj[0];
+	int ret;
+
+	ret = nouveau_vmmgr_init(&priv->base);
+	if (ret)
+		return ret;
+
+	nv_wr32(priv, 0x100800, dma->addr | 0x00000002);
+	nv_mask(priv, 0x10008c, 0x00000100, 0x00000100);
+	nv_wr32(priv, 0x100820, 0x00000000);
+	return 0;
+}
+
+struct nouveau_oclass
+nv41_vmmgr_oclass = {
+	.handle = NV_SUBDEV(VM, 0x41),
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nv41_vmmgr_ctor,
+		.dtor = nv04_vmmgr_dtor,
+		.init = nv41_vmmgr_init,
+		.fini = _nouveau_vmmgr_fini,
+	},
+};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/vm/nv44.c b/drivers/gpu/drm/nouveau/core/subdev/vm/nv44.c
new file mode 100644
index 000000000000..0ac18d05a146
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/vm/nv44.c
@@ -0,0 +1,248 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <core/gpuobj.h>
+#include <core/option.h>
+
+#include <subdev/timer.h>
+#include <subdev/vm.h>
+
+#include "nv04.h"
+
+#define NV44_GART_SIZE (512 * 1024 * 1024)
+#define NV44_GART_PAGE (  4 * 1024)
+
+/*******************************************************************************
+ * VM map/unmap callbacks
+ ******************************************************************************/
+
+static void
+nv44_vm_fill(struct nouveau_gpuobj *pgt, dma_addr_t null,
+	     dma_addr_t *list, u32 pte, u32 cnt)
+{
+	u32 base = (pte << 2) & ~0x0000000f;
+	u32 tmp[4];
+
+	tmp[0] = nv_ro32(pgt, base + 0x0);
+	tmp[1] = nv_ro32(pgt, base + 0x4);
+	tmp[2] = nv_ro32(pgt, base + 0x8);
+	tmp[3] = nv_ro32(pgt, base + 0xc);
+
+	while (cnt--) {
+		u32 addr = list ? (*list++ >> 12) : (null >> 12);
+		switch (pte++ & 0x3) {
+		case 0:
+			tmp[0] &= ~0x07ffffff;
+			tmp[0] |= addr;
+			break;
+		case 1:
+			tmp[0] &= ~0xf8000000;
+			tmp[0] |= addr << 27;
+			tmp[1] &= ~0x003fffff;
+			tmp[1] |= addr >> 5;
+			break;
+		case 2:
+			tmp[1] &= ~0xffc00000;
+			tmp[1] |= addr << 22;
+			tmp[2] &= ~0x0001ffff;
+			tmp[2] |= addr >> 10;
+			break;
+		case 3:
+			tmp[2] &= ~0xfffe0000;
+			tmp[2] |= addr << 17;
+			tmp[3] &= ~0x00000fff;
+			tmp[3] |= addr >> 15;
+			break;
+		}
+	}
+
+	nv_wo32(pgt, base + 0x0, tmp[0]);
+	nv_wo32(pgt, base + 0x4, tmp[1]);
+	nv_wo32(pgt, base + 0x8, tmp[2]);
+	nv_wo32(pgt, base + 0xc, tmp[3] | 0x40000000);
+}
+
+static void
+nv44_vm_map_sg(struct nouveau_vma *vma, struct nouveau_gpuobj *pgt,
+	       struct nouveau_mem *mem, u32 pte, u32 cnt, dma_addr_t *list)
+{
+	struct nv04_vmmgr_priv *priv = (void *)vma->vm->vmm;
+	u32 tmp[4];
+	int i;
+
+	if (pte & 3) {
+		u32  max = 4 - (pte & 3);
+		u32 part = (cnt > max) ? max : cnt;
+		nv44_vm_fill(pgt, priv->null, list, pte, part);
+		pte  += part;
+		list += part;
+		cnt  -= part;
+	}
+
+	while (cnt >= 4) {
+		for (i = 0; i < 4; i++)
+			tmp[i] = *list++ >> 12;
+		nv_wo32(pgt, pte++ * 4, tmp[0] >>  0 | tmp[1] << 27);
+		nv_wo32(pgt, pte++ * 4, tmp[1] >>  5 | tmp[2] << 22);
+		nv_wo32(pgt, pte++ * 4, tmp[2] >> 10 | tmp[3] << 17);
+		nv_wo32(pgt, pte++ * 4, tmp[3] >> 15 | 0x40000000);
+		cnt -= 4;
+	}
+
+	if (cnt)
+		nv44_vm_fill(pgt, priv->null, list, pte, cnt);
+}
+
+static void
+nv44_vm_unmap(struct nouveau_gpuobj *pgt, u32 pte, u32 cnt)
+{
+	struct nv04_vmmgr_priv *priv = (void *)nouveau_vmmgr(pgt);
+
+	if (pte & 3) {
+		u32  max = 4 - (pte & 3);
+		u32 part = (cnt > max) ? max : cnt;
+		nv44_vm_fill(pgt, priv->null, NULL, pte, part);
+		pte  += part;
+		cnt  -= part;
+	}
+
+	while (cnt >= 4) {
+		nv_wo32(pgt, pte++ * 4, 0x00000000);
+		nv_wo32(pgt, pte++ * 4, 0x00000000);
+		nv_wo32(pgt, pte++ * 4, 0x00000000);
+		nv_wo32(pgt, pte++ * 4, 0x00000000);
+		cnt -= 4;
+	}
+
+	if (cnt)
+		nv44_vm_fill(pgt, priv->null, NULL, pte, cnt);
+}
+
+static void
+nv44_vm_flush(struct nouveau_vm *vm)
+{
+	struct nv04_vmmgr_priv *priv = (void *)vm->vmm;
+	nv_wr32(priv, 0x100814, priv->base.limit - NV44_GART_PAGE);
+	nv_wr32(priv, 0x100808, 0x00000020);
+	if (!nv_wait(priv, 0x100808, 0x00000001, 0x00000001))
+		nv_error(priv, "timeout: 0x%08x\n", nv_rd32(priv, 0x100808));
+	nv_wr32(priv, 0x100808, 0x00000000);
+}
+
+/*******************************************************************************
+ * VMMGR subdev
+ ******************************************************************************/
+
+static int
+nv44_vmmgr_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+		struct nouveau_oclass *oclass, void *data, u32 size,
+		struct nouveau_object **pobject)
+{
+	struct nouveau_device *device = nv_device(parent);
+	struct nv04_vmmgr_priv *priv;
+	int ret;
+
+	if (!nouveau_boolopt(device->cfgopt, "NvPCIE", true)) {
+		return nouveau_object_ctor(parent, engine, &nv04_vmmgr_oclass,
+					   data, size, pobject);
+	}
+
+	ret = nouveau_vmmgr_create(parent, engine, oclass, "PCIEGART",
+				   "pciegart", &priv);
+	*pobject = nv_object(priv);
+	if (ret)
+		return ret;
+
+	priv->base.create = nv04_vm_create;
+	priv->base.limit = NV44_GART_SIZE;
+	priv->base.dma_bits = 39;
+	priv->base.pgt_bits = 32 - 12;
+	priv->base.spg_shift = 12;
+	priv->base.lpg_shift = 12;
+	priv->base.map_sg = nv44_vm_map_sg;
+	priv->base.unmap = nv44_vm_unmap;
+	priv->base.flush = nv44_vm_flush;
+
+	priv->nullp = pci_alloc_consistent(device->pdev, 16 * 1024, &priv->null);
+	if (!priv->nullp) {
+		nv_error(priv, "unable to allocate dummy pages\n");
+		return -ENOMEM;
+	}
+
+	ret = nouveau_vm_create(&priv->base, 0, NV44_GART_SIZE, 0, 4096,
+				&priv->vm);
+	if (ret)
+		return ret;
+
+	ret = nouveau_gpuobj_new(parent, NULL,
+				(NV44_GART_SIZE / NV44_GART_PAGE) * 4,
+				 512 * 1024, NVOBJ_FLAG_ZERO_ALLOC,
+				 &priv->vm->pgt[0].obj[0]);
+	priv->vm->pgt[0].refcount[0] = 1;
+	if (ret)
+		return ret;
+
+	return 0;
+}
+
+static int
+nv44_vmmgr_init(struct nouveau_object *object)
+{
+	struct nv04_vmmgr_priv *priv = (void *)object;
+	struct nouveau_gpuobj *gart = priv->vm->pgt[0].obj[0];
+	u32 addr;
+	int ret;
+
+	ret = nouveau_vmmgr_init(&priv->base);
+	if (ret)
+		return ret;
+
+	/* calculate vram address of this PRAMIN block, object must be
+	 * allocated on 512KiB alignment, and not exceed a total size
+	 * of 512KiB for this to work correctly
+	 */
+	addr  = nv_rd32(priv, 0x10020c);
+	addr -= ((gart->addr >> 19) + 1) << 19;
+
+	nv_wr32(priv, 0x100850, 0x80000000);
+	nv_wr32(priv, 0x100818, priv->null);
+	nv_wr32(priv, 0x100804, NV44_GART_SIZE);
+	nv_wr32(priv, 0x100850, 0x00008000);
+	nv_mask(priv, 0x10008c, 0x00000200, 0x00000200);
+	nv_wr32(priv, 0x100820, 0x00000000);
+	nv_wr32(priv, 0x10082c, 0x00000001);
+	nv_wr32(priv, 0x100800, addr | 0x00000010);
+	return 0;
+}
+
+struct nouveau_oclass
+nv44_vmmgr_oclass = {
+	.handle = NV_SUBDEV(VM, 0x44),
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nv44_vmmgr_ctor,
+		.dtor = nv04_vmmgr_dtor,
+		.init = nv44_vmmgr_init,
+		.fini = _nouveau_vmmgr_fini,
+	},
+};
diff --git a/drivers/gpu/drm/nouveau/nv50_vm.c b/drivers/gpu/drm/nouveau/core/subdev/vm/nv50.c
index c9fdfb48270b..e067f81c97b3 100644
--- a/drivers/gpu/drm/nouveau/nv50_vm.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/vm/nv50.c
@@ -22,12 +22,19 @@
  * Authors: Ben Skeggs
  */
 
-#include <drm/drmP.h>
+#include <core/device.h>
+#include <core/gpuobj.h>
 
-#include "nouveau_drv.h"
-#include "nouveau_vm.h"
+#include <subdev/timer.h>
+#include <subdev/fb.h>
+#include <subdev/vm.h>
 
-void
+struct nv50_vmmgr_priv {
+	struct nouveau_vmmgr base;
+	spinlock_t lock;
+};
+
+static void
 nv50_vm_map_pgt(struct nouveau_gpuobj *pgd, u32 pde,
 		struct nouveau_gpuobj *pgt[2])
 {
@@ -35,11 +42,11 @@ nv50_vm_map_pgt(struct nouveau_gpuobj *pgd, u32 pde,
 	u32 coverage = 0;
 
 	if (pgt[0]) {
-		phys = 0x00000003 | pgt[0]->vinst; /* present, 4KiB pages */
+		phys = 0x00000003 | pgt[0]->addr; /* present, 4KiB pages */
 		coverage = (pgt[0]->size >> 3) << 12;
 	} else
 	if (pgt[1]) {
-		phys = 0x00000001 | pgt[1]->vinst; /* present */
+		phys = 0x00000001 | pgt[1]->addr; /* present */
 		coverage = (pgt[1]->size >> 3) << 16;
 	}
 
@@ -69,19 +76,18 @@ vm_addr(struct nouveau_vma *vma, u64 phys, u32 memtype, u32 target)
 	return phys;
 }
 
-void
+static void
 nv50_vm_map(struct nouveau_vma *vma, struct nouveau_gpuobj *pgt,
 	    struct nouveau_mem *mem, u32 pte, u32 cnt, u64 phys, u64 delta)
 {
-	struct drm_nouveau_private *dev_priv = vma->vm->dev->dev_private;
 	u32 comp = (mem->memtype & 0x180) >> 7;
 	u32 block, target;
 	int i;
 
 	/* IGPs don't have real VRAM, re-target to stolen system memory */
 	target = 0;
-	if (dev_priv->vram_sys_base) {
-		phys += dev_priv->vram_sys_base;
+	if (nouveau_fb(vma->vm->vmm)->ram.stolen) {
+		phys += nouveau_fb(vma->vm->vmm)->ram.stolen;
 		target = 3;
 	}
 
@@ -103,7 +109,7 @@ nv50_vm_map(struct nouveau_vma *vma, struct nouveau_gpuobj *pgt,
 		phys += block << (vma->node->type - 3);
 		cnt  -= block;
 		if (comp) {
-			u32 tag = mem->tag->start + ((delta >> 16) * comp);
+			u32 tag = mem->tag->offset + ((delta >> 16) * comp);
 			offset_h |= (tag << 17);
 			delta    += block << (vma->node->type - 3);
 		}
@@ -117,7 +123,7 @@ nv50_vm_map(struct nouveau_vma *vma, struct nouveau_gpuobj *pgt,
 	}
 }
 
-void
+static void
 nv50_vm_map_sg(struct nouveau_vma *vma, struct nouveau_gpuobj *pgt,
 	       struct nouveau_mem *mem, u32 pte, u32 cnt, dma_addr_t *list)
 {
@@ -131,7 +137,7 @@ nv50_vm_map_sg(struct nouveau_vma *vma, struct nouveau_gpuobj *pgt,
 	}
 }
 
-void
+static void
 nv50_vm_unmap(struct nouveau_gpuobj *pgt, u32 pte, u32 cnt)
 {
 	pte <<= 3;
@@ -142,36 +148,80 @@ nv50_vm_unmap(struct nouveau_gpuobj *pgt, u32 pte, u32 cnt)
 	}
 }
 
-void
+static void
 nv50_vm_flush(struct nouveau_vm *vm)
 {
-	struct drm_nouveau_private *dev_priv = vm->dev->dev_private;
-	struct nouveau_instmem_engine *pinstmem = &dev_priv->engine.instmem;
+	struct nouveau_engine *engine;
 	int i;
 
-	pinstmem->flush(vm->dev);
-
-	/* BAR */
-	if (vm == dev_priv->bar1_vm || vm == dev_priv->bar3_vm) {
-		nv50_vm_flush_engine(vm->dev, 6);
-		return;
-	}
-
-	for (i = 0; i < NVOBJ_ENGINE_NR; i++) {
-		if (atomic_read(&vm->engref[i]))
-			dev_priv->eng[i]->tlb_flush(vm->dev, i);
+	for (i = 0; i < NVDEV_SUBDEV_NR; i++) {
+		if (atomic_read(&vm->engref[i])) {
+			engine = nouveau_engine(vm->vmm, i);
+			if (engine && engine->tlb_flush)
+				engine->tlb_flush(engine);
+		}
 	}
 }
 
 void
-nv50_vm_flush_engine(struct drm_device *dev, int engine)
+nv50_vm_flush_engine(struct nouveau_subdev *subdev, int engine)
 {
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nv50_vmmgr_priv *priv = (void *)nouveau_vmmgr(subdev);
 	unsigned long flags;
 
-	spin_lock_irqsave(&dev_priv->vm_lock, flags);
-	nv_wr32(dev, 0x100c80, (engine << 16) | 1);
-	if (!nv_wait(dev, 0x100c80, 0x00000001, 0x00000000))
-		NV_ERROR(dev, "vm flush timeout: engine %d\n", engine);
-	spin_unlock_irqrestore(&dev_priv->vm_lock, flags);
+	spin_lock_irqsave(&priv->lock, flags);
+	nv_wr32(subdev, 0x100c80, (engine << 16) | 1);
+	if (!nv_wait(subdev, 0x100c80, 0x00000001, 0x00000000))
+		nv_error(subdev, "vm flush timeout: engine %d\n", engine);
+	spin_unlock_irqrestore(&priv->lock, flags);
+}
+
+static int
+nv50_vm_create(struct nouveau_vmmgr *vmm, u64 offset, u64 length,
+	       u64 mm_offset, struct nouveau_vm **pvm)
+{
+	u32 block = (1 << (vmm->pgt_bits + 12));
+	if (block > length)
+		block = length;
+
+	return nouveau_vm_create(vmm, offset, length, mm_offset, block, pvm);
 }
+
+static int
+nv50_vmmgr_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+		struct nouveau_oclass *oclass, void *data, u32 size,
+		struct nouveau_object **pobject)
+{
+	struct nv50_vmmgr_priv *priv;
+	int ret;
+
+	ret = nouveau_vmmgr_create(parent, engine, oclass, "VM", "vm", &priv);
+	*pobject = nv_object(priv);
+	if (ret)
+		return ret;
+
+	priv->base.limit = 1ULL << 40;
+	priv->base.dma_bits = 40;
+	priv->base.pgt_bits  = 29 - 12;
+	priv->base.spg_shift = 12;
+	priv->base.lpg_shift = 16;
+	priv->base.create = nv50_vm_create;
+	priv->base.map_pgt = nv50_vm_map_pgt;
+	priv->base.map = nv50_vm_map;
+	priv->base.map_sg = nv50_vm_map_sg;
+	priv->base.unmap = nv50_vm_unmap;
+	priv->base.flush = nv50_vm_flush;
+	spin_lock_init(&priv->lock);
+	return 0;
+}
+
+struct nouveau_oclass
+nv50_vmmgr_oclass = {
+	.handle = NV_SUBDEV(VM, 0x50),
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nv50_vmmgr_ctor,
+		.dtor = _nouveau_vmmgr_dtor,
+		.init = _nouveau_vmmgr_init,
+		.fini = _nouveau_vmmgr_fini,
+	},
+};
diff --git a/drivers/gpu/drm/nouveau/nvc0_vm.c b/drivers/gpu/drm/nouveau/core/subdev/vm/nvc0.c
index fad338314881..30c61e6c2017 100644
--- a/drivers/gpu/drm/nouveau/nvc0_vm.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/vm/nvc0.c
@@ -22,21 +22,28 @@
  * Authors: Ben Skeggs
  */
 
-#include <drm/drmP.h>
+#include <core/device.h>
+#include <core/gpuobj.h>
 
-#include "nouveau_drv.h"
-#include "nouveau_vm.h"
+#include <subdev/timer.h>
+#include <subdev/fb.h>
+#include <subdev/vm.h>
 
-void
+struct nvc0_vmmgr_priv {
+	struct nouveau_vmmgr base;
+	spinlock_t lock;
+};
+
+static void
 nvc0_vm_map_pgt(struct nouveau_gpuobj *pgd, u32 index,
 		struct nouveau_gpuobj *pgt[2])
 {
 	u32 pde[2] = { 0, 0 };
 
 	if (pgt[0])
-		pde[1] = 0x00000001 | (pgt[0]->vinst >> 8);
+		pde[1] = 0x00000001 | (pgt[0]->addr >> 8);
 	if (pgt[1])
-		pde[0] = 0x00000001 | (pgt[1]->vinst >> 8);
+		pde[0] = 0x00000001 | (pgt[1]->addr >> 8);
 
 	nv_wo32(pgd, (index * 8) + 0, pde[0]);
 	nv_wo32(pgd, (index * 8) + 4, pde[1]);
@@ -57,7 +64,7 @@ nvc0_vm_addr(struct nouveau_vma *vma, u64 phys, u32 memtype, u32 target)
 	return phys;
 }
 
-void
+static void
 nvc0_vm_map(struct nouveau_vma *vma, struct nouveau_gpuobj *pgt,
 	    struct nouveau_mem *mem, u32 pte, u32 cnt, u64 phys, u64 delta)
 {
@@ -73,7 +80,7 @@ nvc0_vm_map(struct nouveau_vma *vma, struct nouveau_gpuobj *pgt,
 	}
 }
 
-void
+static void
 nvc0_vm_map_sg(struct nouveau_vma *vma, struct nouveau_gpuobj *pgt,
 	       struct nouveau_mem *mem, u32 pte, u32 cnt, dma_addr_t *list)
 {
@@ -88,7 +95,7 @@ nvc0_vm_map_sg(struct nouveau_vma *vma, struct nouveau_gpuobj *pgt,
 	}
 }
 
-void
+static void
 nvc0_vm_unmap(struct nouveau_gpuobj *pgt, u32 pte, u32 cnt)
 {
 	pte <<= 3;
@@ -100,37 +107,83 @@ nvc0_vm_unmap(struct nouveau_gpuobj *pgt, u32 pte, u32 cnt)
 }
 
 void
-nvc0_vm_flush(struct nouveau_vm *vm)
+nvc0_vm_flush_engine(struct nouveau_subdev *subdev, u64 addr, int type)
 {
-	struct drm_nouveau_private *dev_priv = vm->dev->dev_private;
-	struct nouveau_instmem_engine *pinstmem = &dev_priv->engine.instmem;
-	struct drm_device *dev = vm->dev;
-	struct nouveau_vm_pgd *vpgd;
+	struct nvc0_vmmgr_priv *priv = (void *)nouveau_vmmgr(subdev);
 	unsigned long flags;
-	u32 engine;
 
-	engine = 1;
-	if (vm == dev_priv->bar1_vm || vm == dev_priv->bar3_vm)
-		engine |= 4;
+	/* looks like maybe a "free flush slots" counter, the
+	 * faster you write to 0x100cbc to more it decreases
+	 */
+	spin_lock_irqsave(&priv->lock, flags);
+	if (!nv_wait_ne(subdev, 0x100c80, 0x00ff0000, 0x00000000)) {
+		nv_error(subdev, "vm timeout 0: 0x%08x %d\n",
+			 nv_rd32(subdev, 0x100c80), type);
+	}
+
+	nv_wr32(subdev, 0x100cb8, addr >> 8);
+	nv_wr32(subdev, 0x100cbc, 0x80000000 | type);
+
+	/* wait for flush to be queued? */
+	if (!nv_wait(subdev, 0x100c80, 0x00008000, 0x00008000)) {
+		nv_error(subdev, "vm timeout 1: 0x%08x %d\n",
+			 nv_rd32(subdev, 0x100c80), type);
+	}
+	spin_unlock_irqrestore(&priv->lock, flags);
+}
 
-	pinstmem->flush(vm->dev);
+static void
+nvc0_vm_flush(struct nouveau_vm *vm)
+{
+	struct nouveau_vm_pgd *vpgd;
 
-	spin_lock_irqsave(&dev_priv->vm_lock, flags);
 	list_for_each_entry(vpgd, &vm->pgd_list, head) {
-		/* looks like maybe a "free flush slots" counter, the
-		 * faster you write to 0x100cbc to more it decreases
-		 */
-		if (!nv_wait_ne(dev, 0x100c80, 0x00ff0000, 0x00000000)) {
-			NV_ERROR(dev, "vm timeout 0: 0x%08x %d\n",
-				 nv_rd32(dev, 0x100c80), engine);
-		}
-		nv_wr32(dev, 0x100cb8, vpgd->obj->vinst >> 8);
-		nv_wr32(dev, 0x100cbc, 0x80000000 | engine);
-		/* wait for flush to be queued? */
-		if (!nv_wait(dev, 0x100c80, 0x00008000, 0x00008000)) {
-			NV_ERROR(dev, "vm timeout 1: 0x%08x %d\n",
-				 nv_rd32(dev, 0x100c80), engine);
-		}
+		nvc0_vm_flush_engine(nv_subdev(vm->vmm), vpgd->obj->addr, 1);
 	}
-	spin_unlock_irqrestore(&dev_priv->vm_lock, flags);
 }
+
+static int
+nvc0_vm_create(struct nouveau_vmmgr *vmm, u64 offset, u64 length,
+	       u64 mm_offset, struct nouveau_vm **pvm)
+{
+	return nouveau_vm_create(vmm, offset, length, mm_offset, 4096, pvm);
+}
+
+static int
+nvc0_vmmgr_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+		struct nouveau_oclass *oclass, void *data, u32 size,
+		struct nouveau_object **pobject)
+{
+	struct nvc0_vmmgr_priv *priv;
+	int ret;
+
+	ret = nouveau_vmmgr_create(parent, engine, oclass, "VM", "vm", &priv);
+	*pobject = nv_object(priv);
+	if (ret)
+		return ret;
+
+	priv->base.limit = 1ULL << 40;
+	priv->base.dma_bits = 40;
+	priv->base.pgt_bits  = 27 - 12;
+	priv->base.spg_shift = 12;
+	priv->base.lpg_shift = 17;
+	priv->base.create = nvc0_vm_create;
+	priv->base.map_pgt = nvc0_vm_map_pgt;
+	priv->base.map = nvc0_vm_map;
+	priv->base.map_sg = nvc0_vm_map_sg;
+	priv->base.unmap = nvc0_vm_unmap;
+	priv->base.flush = nvc0_vm_flush;
+	spin_lock_init(&priv->lock);
+	return 0;
+}
+
+struct nouveau_oclass
+nvc0_vmmgr_oclass = {
+	.handle = NV_SUBDEV(VM, 0xc0),
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nvc0_vmmgr_ctor,
+		.dtor = _nouveau_vmmgr_dtor,
+		.init = _nouveau_vmmgr_init,
+		.fini = _nouveau_vmmgr_fini,
+	},
+};
diff --git a/drivers/gpu/drm/nouveau/nouveau_abi16.c b/drivers/gpu/drm/nouveau/nouveau_abi16.c
index 111d9eba7065..cc79c796afee 100644
--- a/drivers/gpu/drm/nouveau/nouveau_abi16.c
+++ b/drivers/gpu/drm/nouveau/nouveau_abi16.c
@@ -21,23 +21,153 @@
  *
  */
 
-#include <drm/drmP.h>
+#include <core/object.h>
+#include <core/client.h>
+#include <core/device.h>
+#include <core/class.h>
+#include <core/mm.h>
 
-#include "nouveau_drv.h"
+#include <subdev/fb.h>
+#include <subdev/timer.h>
+#include <subdev/instmem.h>
+
+#include "nouveau_drm.h"
 #include "nouveau_dma.h"
+#include "nouveau_gem.h"
+#include "nouveau_chan.h"
 #include "nouveau_abi16.h"
-#include "nouveau_ramht.h"
-#include "nouveau_software.h"
+
+struct nouveau_abi16 *
+nouveau_abi16_get(struct drm_file *file_priv, struct drm_device *dev)
+{
+	struct nouveau_cli *cli = nouveau_cli(file_priv);
+	mutex_lock(&cli->mutex);
+	if (!cli->abi16) {
+		struct nouveau_abi16 *abi16;
+		cli->abi16 = abi16 = kzalloc(sizeof(*abi16), GFP_KERNEL);
+		if (cli->abi16) {
+			INIT_LIST_HEAD(&abi16->channels);
+			abi16->client = nv_object(cli);
+
+			/* allocate device object targeting client's default
+			 * device (ie. the one that belongs to the fd it
+			 * opened)
+			 */
+			if (nouveau_object_new(abi16->client, NVDRM_CLIENT,
+					       NVDRM_DEVICE, 0x0080,
+					       &(struct nv_device_class) {
+						.device = ~0ULL,
+					       },
+					       sizeof(struct nv_device_class),
+					       &abi16->device) == 0)
+				return cli->abi16;
+
+			kfree(cli->abi16);
+			cli->abi16 = NULL;
+		}
+
+		mutex_unlock(&cli->mutex);
+	}
+	return cli->abi16;
+}
+
+int
+nouveau_abi16_put(struct nouveau_abi16 *abi16, int ret)
+{
+	struct nouveau_cli *cli = (void *)abi16->client;
+	mutex_unlock(&cli->mutex);
+	return ret;
+}
+
+u16
+nouveau_abi16_swclass(struct nouveau_drm *drm)
+{
+	switch (nv_device(drm->device)->card_type) {
+	case NV_04:
+		return 0x006e;
+	case NV_10:
+	case NV_20:
+	case NV_30:
+	case NV_40:
+		return 0x016e;
+	case NV_50:
+		return 0x506e;
+	case NV_C0:
+	case NV_D0:
+	case NV_E0:
+		return 0x906e;
+	}
+
+	return 0x0000;
+}
+
+static void
+nouveau_abi16_ntfy_fini(struct nouveau_abi16_chan *chan,
+			struct nouveau_abi16_ntfy *ntfy)
+{
+	nouveau_mm_free(&chan->heap, &ntfy->node);
+	list_del(&ntfy->head);
+	kfree(ntfy);
+}
+
+static void
+nouveau_abi16_chan_fini(struct nouveau_abi16 *abi16,
+			struct nouveau_abi16_chan *chan)
+{
+	struct nouveau_abi16_ntfy *ntfy, *temp;
+
+	/* cleanup notifier state */
+	list_for_each_entry_safe(ntfy, temp, &chan->notifiers, head) {
+		nouveau_abi16_ntfy_fini(chan, ntfy);
+	}
+
+	if (chan->ntfy) {
+		nouveau_bo_vma_del(chan->ntfy, &chan->ntfy_vma);
+		drm_gem_object_unreference_unlocked(chan->ntfy->gem);
+	}
+
+	if (chan->heap.block_size)
+		nouveau_mm_fini(&chan->heap);
+
+	/* destroy channel object, all children will be killed too */
+	if (chan->chan) {
+		abi16->handles &= ~(1 << (chan->chan->handle & 0xffff));
+		nouveau_channel_del(&chan->chan);
+	}
+
+	list_del(&chan->head);
+	kfree(chan);
+}
+
+void
+nouveau_abi16_fini(struct nouveau_abi16 *abi16)
+{
+	struct nouveau_cli *cli = (void *)abi16->client;
+	struct nouveau_abi16_chan *chan, *temp;
+
+	/* cleanup channels */
+	list_for_each_entry_safe(chan, temp, &abi16->channels, head) {
+		nouveau_abi16_chan_fini(abi16, chan);
+	}
+
+	/* destroy the device object */
+	nouveau_object_del(abi16->client, NVDRM_CLIENT, NVDRM_DEVICE);
+
+	kfree(cli->abi16);
+	cli->abi16 = NULL;
+}
 
 int
 nouveau_abi16_ioctl_getparam(ABI16_IOCTL_ARGS)
 {
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_drm *drm = nouveau_drm(dev);
+	struct nouveau_device *device = nv_device(drm->device);
+	struct nouveau_timer *ptimer = nouveau_timer(device);
 	struct drm_nouveau_getparam *getparam = data;
 
 	switch (getparam->param) {
 	case NOUVEAU_GETPARAM_CHIPSET_ID:
-		getparam->value = dev_priv->chipset;
+		getparam->value = device->chipset;
 		break;
 	case NOUVEAU_GETPARAM_PCI_VENDOR:
 		getparam->value = dev->pci_vendor;
@@ -55,16 +185,16 @@ nouveau_abi16_ioctl_getparam(ABI16_IOCTL_ARGS)
 			getparam->value = 2;
 		break;
 	case NOUVEAU_GETPARAM_FB_SIZE:
-		getparam->value = dev_priv->fb_available_size;
+		getparam->value = drm->gem.vram_available;
 		break;
 	case NOUVEAU_GETPARAM_AGP_SIZE:
-		getparam->value = dev_priv->gart_info.aper_size;
+		getparam->value = drm->gem.gart_available;
 		break;
 	case NOUVEAU_GETPARAM_VM_VRAM_BASE:
 		getparam->value = 0; /* deprecated */
 		break;
 	case NOUVEAU_GETPARAM_PTIMER_TIME:
-		getparam->value = dev_priv->engine.timer.read(dev);
+		getparam->value = ptimer->read(ptimer);
 		break;
 	case NOUVEAU_GETPARAM_HAS_BO_USAGE:
 		getparam->value = 1;
@@ -76,13 +206,13 @@ nouveau_abi16_ioctl_getparam(ABI16_IOCTL_ARGS)
 		/* NV40 and NV50 versions are quite different, but register
 		 * address is the same. User is supposed to know the card
 		 * family anyway... */
-		if (dev_priv->chipset >= 0x40) {
-			getparam->value = nv_rd32(dev, NV40_PMC_GRAPH_UNITS);
+		if (device->chipset >= 0x40) {
+			getparam->value = nv_rd32(device, 0x001540);
 			break;
 		}
 		/* FALLTHRU */
 	default:
-		NV_DEBUG(dev, "unknown parameter %lld\n", getparam->param);
+		nv_debug(device, "unknown parameter %lld\n", getparam->param);
 		return -EINVAL;
 	}
 
@@ -98,148 +228,252 @@ nouveau_abi16_ioctl_setparam(ABI16_IOCTL_ARGS)
 int
 nouveau_abi16_ioctl_channel_alloc(ABI16_IOCTL_ARGS)
 {
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
 	struct drm_nouveau_channel_alloc *init = data;
-	struct nouveau_channel *chan;
+	struct nouveau_cli *cli = nouveau_cli(file_priv);
+	struct nouveau_drm *drm = nouveau_drm(dev);
+	struct nouveau_abi16 *abi16 = nouveau_abi16_get(file_priv, dev);
+	struct nouveau_abi16_chan *chan;
+	struct nouveau_client *client;
+	struct nouveau_device *device;
+	struct nouveau_instmem *imem;
+	struct nouveau_fb *pfb;
 	int ret;
 
-	if (!dev_priv->eng[NVOBJ_ENGINE_GR])
-		return -ENODEV;
+	if (unlikely(!abi16))
+		return -ENOMEM;
+	client = nv_client(abi16->client);
 
 	if (init->fb_ctxdma_handle == ~0 || init->tt_ctxdma_handle == ~0)
-		return -EINVAL;
+		return nouveau_abi16_put(abi16, -EINVAL);
+
+	device = nv_device(abi16->device);
+	imem   = nouveau_instmem(device);
+	pfb    = nouveau_fb(device);
+
+	/* allocate "abi16 channel" data and make up a handle for it */
+	init->channel = ffsll(~abi16->handles);
+	if (!init->channel--)
+		return nouveau_abi16_put(abi16, -ENOSPC);
+
+	chan = kzalloc(sizeof(*chan), GFP_KERNEL);
+	if (!chan)
+		return nouveau_abi16_put(abi16, -ENOMEM);
+
+	INIT_LIST_HEAD(&chan->notifiers);
+	list_add(&chan->head, &abi16->channels);
+	abi16->handles |= (1 << init->channel);
+
+	/* create channel object and initialise dma and fence management */
+	if (device->card_type >= NV_E0) {
+		init->fb_ctxdma_handle = NVE0_CHANNEL_IND_ENGINE_GR;
+		init->tt_ctxdma_handle = 0;
+	}
 
-	ret = nouveau_channel_alloc(dev, &chan, file_priv,
-				    init->fb_ctxdma_handle,
-				    init->tt_ctxdma_handle);
+	ret = nouveau_channel_new(drm, cli, NVDRM_DEVICE, NVDRM_CHAN |
+				  init->channel, init->fb_ctxdma_handle,
+				  init->tt_ctxdma_handle, &chan->chan);
 	if (ret)
-		return ret;
-	init->channel  = chan->id;
-
-	if (nouveau_vram_pushbuf == 0) {
-		if (chan->dma.ib_max)
-			init->pushbuf_domains = NOUVEAU_GEM_DOMAIN_VRAM |
-						NOUVEAU_GEM_DOMAIN_GART;
-		else if (chan->pushbuf_bo->bo.mem.mem_type == TTM_PL_VRAM)
-			init->pushbuf_domains = NOUVEAU_GEM_DOMAIN_VRAM;
-		else
-			init->pushbuf_domains = NOUVEAU_GEM_DOMAIN_GART;
-	} else {
+		goto done;
+
+	if (device->card_type >= NV_50)
+		init->pushbuf_domains = NOUVEAU_GEM_DOMAIN_VRAM |
+					NOUVEAU_GEM_DOMAIN_GART;
+	else
+	if (chan->chan->push.buffer->bo.mem.mem_type == TTM_PL_VRAM)
 		init->pushbuf_domains = NOUVEAU_GEM_DOMAIN_VRAM;
-	}
+	else
+		init->pushbuf_domains = NOUVEAU_GEM_DOMAIN_GART;
 
-	if (dev_priv->card_type < NV_C0) {
+	if (device->card_type < NV_C0) {
 		init->subchan[0].handle = 0x00000000;
 		init->subchan[0].grclass = 0x0000;
 		init->subchan[1].handle = NvSw;
-		init->subchan[1].grclass = NV_SW;
+		init->subchan[1].grclass = 0x506e;
 		init->nr_subchan = 2;
 	}
 
 	/* Named memory object area */
-	ret = drm_gem_handle_create(file_priv, chan->notifier_bo->gem,
+	ret = nouveau_gem_new(dev, PAGE_SIZE, 0, NOUVEAU_GEM_DOMAIN_GART,
+			      0, 0, &chan->ntfy);
+	if (ret == 0)
+		ret = nouveau_bo_pin(chan->ntfy, TTM_PL_FLAG_TT);
+	if (ret)
+		goto done;
+
+	if (device->card_type >= NV_50) {
+		ret = nouveau_bo_vma_add(chan->ntfy, client->vm,
+					&chan->ntfy_vma);
+		if (ret)
+			goto done;
+	}
+
+	ret = drm_gem_handle_create(file_priv, chan->ntfy->gem,
 				    &init->notifier_handle);
+	if (ret)
+		goto done;
 
-	if (ret == 0)
-		atomic_inc(&chan->users); /* userspace reference */
-	nouveau_channel_put(&chan);
-	return ret;
+	ret = nouveau_mm_init(&chan->heap, 0, PAGE_SIZE, 1);
+done:
+	if (ret)
+		nouveau_abi16_chan_fini(abi16, chan);
+	return nouveau_abi16_put(abi16, ret);
 }
 
+
 int
 nouveau_abi16_ioctl_channel_free(ABI16_IOCTL_ARGS)
 {
 	struct drm_nouveau_channel_free *req = data;
-	struct nouveau_channel *chan;
+	struct nouveau_abi16 *abi16 = nouveau_abi16_get(file_priv, dev);
+	struct nouveau_abi16_chan *chan;
+	int ret = -ENOENT;
 
-	chan = nouveau_channel_get(file_priv, req->channel);
-	if (IS_ERR(chan))
-		return PTR_ERR(chan);
+	if (unlikely(!abi16))
+		return -ENOMEM;
 
-	list_del(&chan->list);
-	atomic_dec(&chan->users);
-	nouveau_channel_put(&chan);
-	return 0;
+	list_for_each_entry(chan, &abi16->channels, head) {
+		if (chan->chan->handle == (NVDRM_CHAN | req->channel)) {
+			nouveau_abi16_chan_fini(abi16, chan);
+			return nouveau_abi16_put(abi16, 0);
+		}
+	}
+
+	return nouveau_abi16_put(abi16, ret);
 }
 
 int
 nouveau_abi16_ioctl_grobj_alloc(ABI16_IOCTL_ARGS)
 {
 	struct drm_nouveau_grobj_alloc *init = data;
-	struct nouveau_channel *chan;
+	struct nouveau_abi16 *abi16 = nouveau_abi16_get(file_priv, dev);
+	struct nouveau_drm *drm = nouveau_drm(dev);
+	struct nouveau_object *object;
 	int ret;
 
+	if (unlikely(!abi16))
+		return -ENOMEM;
+
 	if (init->handle == ~0)
-		return -EINVAL;
+		return nouveau_abi16_put(abi16, -EINVAL);
 
 	/* compatibility with userspace that assumes 506e for all chipsets */
 	if (init->class == 0x506e) {
-		init->class = nouveau_software_class(dev);
+		init->class = nouveau_abi16_swclass(drm);
 		if (init->class == 0x906e)
-			return 0;
-	} else
-	if (init->class == 0x906e) {
-		NV_DEBUG(dev, "906e not supported yet\n");
-		return -EINVAL;
-	}
-
-	chan = nouveau_channel_get(file_priv, init->channel);
-	if (IS_ERR(chan))
-		return PTR_ERR(chan);
-
-	if (nouveau_ramht_find(chan, init->handle)) {
-		ret = -EEXIST;
-		goto out;
-	}
-
-	ret = nouveau_gpuobj_gr_new(chan, init->handle, init->class);
-	if (ret) {
-		NV_ERROR(dev, "Error creating object: %d (%d/0x%08x)\n",
-			 ret, init->channel, init->handle);
+			return nouveau_abi16_put(abi16, 0);
 	}
 
-out:
-	nouveau_channel_put(&chan);
-	return ret;
+	ret = nouveau_object_new(abi16->client, NVDRM_CHAN | init->channel,
+				  init->handle, init->class, NULL, 0, &object);
+	return nouveau_abi16_put(abi16, ret);
 }
 
 int
 nouveau_abi16_ioctl_notifierobj_alloc(ABI16_IOCTL_ARGS)
 {
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct drm_nouveau_notifierobj_alloc *na = data;
-	struct nouveau_channel *chan;
+	struct drm_nouveau_notifierobj_alloc *info = data;
+	struct nouveau_drm *drm = nouveau_drm(dev);
+	struct nouveau_device *device = nv_device(drm->device);
+	struct nouveau_abi16 *abi16 = nouveau_abi16_get(file_priv, dev);
+	struct nouveau_abi16_chan *chan, *temp;
+	struct nouveau_abi16_ntfy *ntfy;
+	struct nouveau_object *object;
+	struct nv_dma_class args;
 	int ret;
 
+	if (unlikely(!abi16))
+		return -ENOMEM;
+
 	/* completely unnecessary for these chipsets... */
-	if (unlikely(dev_priv->card_type >= NV_C0))
-		return -EINVAL;
+	if (unlikely(nv_device(abi16->device)->card_type >= NV_C0))
+		return nouveau_abi16_put(abi16, -EINVAL);
 
-	chan = nouveau_channel_get(file_priv, na->channel);
-	if (IS_ERR(chan))
-		return PTR_ERR(chan);
+	list_for_each_entry_safe(chan, temp, &abi16->channels, head) {
+		if (chan->chan->handle == (NVDRM_CHAN | info->channel))
+			break;
+		chan = NULL;
+	}
 
-	ret = nouveau_notifier_alloc(chan, na->handle, na->size, 0, 0x1000,
-				     &na->offset);
-	nouveau_channel_put(&chan);
-	return ret;
+	if (!chan)
+		return nouveau_abi16_put(abi16, -ENOENT);
+
+	ntfy = kzalloc(sizeof(*ntfy), GFP_KERNEL);
+	if (!ntfy)
+		return nouveau_abi16_put(abi16, -ENOMEM);
+
+	list_add(&ntfy->head, &chan->notifiers);
+	ntfy->handle = info->handle;
+
+	ret = nouveau_mm_head(&chan->heap, 1, info->size, info->size, 1,
+			      &ntfy->node);
+	if (ret)
+		goto done;
+
+	args.start = ntfy->node->offset;
+	args.limit = ntfy->node->offset + ntfy->node->length - 1;
+	if (device->card_type >= NV_50) {
+		args.flags  = NV_DMA_TARGET_VM | NV_DMA_ACCESS_VM;
+		args.start += chan->ntfy_vma.offset;
+		args.limit += chan->ntfy_vma.offset;
+	} else
+	if (drm->agp.stat == ENABLED) {
+		args.flags  = NV_DMA_TARGET_AGP | NV_DMA_ACCESS_RDWR;
+		args.start += drm->agp.base + chan->ntfy->bo.offset;
+		args.limit += drm->agp.base + chan->ntfy->bo.offset;
+	} else {
+		args.flags  = NV_DMA_TARGET_VM | NV_DMA_ACCESS_RDWR;
+		args.start += chan->ntfy->bo.offset;
+		args.limit += chan->ntfy->bo.offset;
+	}
+
+	ret = nouveau_object_new(abi16->client, chan->chan->handle,
+				 ntfy->handle, 0x003d, &args,
+				 sizeof(args), &object);
+	if (ret)
+		goto done;
+
+done:
+	if (ret)
+		nouveau_abi16_ntfy_fini(chan, ntfy);
+	return nouveau_abi16_put(abi16, ret);
 }
 
 int
 nouveau_abi16_ioctl_gpuobj_free(ABI16_IOCTL_ARGS)
 {
-	struct drm_nouveau_gpuobj_free *objfree = data;
-	struct nouveau_channel *chan;
+	struct drm_nouveau_gpuobj_free *fini = data;
+	struct nouveau_abi16 *abi16 = nouveau_abi16_get(file_priv, dev);
+	struct nouveau_abi16_chan *chan, *temp;
+	struct nouveau_abi16_ntfy *ntfy;
 	int ret;
 
-	chan = nouveau_channel_get(file_priv, objfree->channel);
-	if (IS_ERR(chan))
-		return PTR_ERR(chan);
+	if (unlikely(!abi16))
+		return -ENOMEM;
+
+	list_for_each_entry_safe(chan, temp, &abi16->channels, head) {
+		if (chan->chan->handle == (NVDRM_CHAN | fini->channel))
+			break;
+		chan = NULL;
+	}
+
+	if (!chan)
+		return nouveau_abi16_put(abi16, -ENOENT);
 
-	/* Synchronize with the user channel */
-	nouveau_channel_idle(chan);
+	/* synchronize with the user channel and destroy the gpu object */
+	nouveau_channel_idle(chan->chan);
 
-	ret = nouveau_ramht_remove(chan, objfree->handle);
-	nouveau_channel_put(&chan);
-	return ret;
+	ret = nouveau_object_del(abi16->client, chan->chan->handle, fini->handle);
+	if (ret)
+		return nouveau_abi16_put(abi16, ret);
+
+	/* cleanup extra state if this object was a notifier */
+	list_for_each_entry(ntfy, &chan->notifiers, head) {
+		if (ntfy->handle == fini->handle) {
+			nouveau_mm_free(&chan->heap, &ntfy->node);
+			list_del(&ntfy->head);
+			break;
+		}
+	}
+
+	return nouveau_abi16_put(abi16, 0);
 }
diff --git a/drivers/gpu/drm/nouveau/nouveau_abi16.h b/drivers/gpu/drm/nouveau/nouveau_abi16.h
index e6328b008a8c..90004081a501 100644
--- a/drivers/gpu/drm/nouveau/nouveau_abi16.h
+++ b/drivers/gpu/drm/nouveau/nouveau_abi16.h
@@ -3,6 +3,7 @@
 
 #define ABI16_IOCTL_ARGS                                                       \
 	struct drm_device *dev, void *data, struct drm_file *file_priv
+
 int nouveau_abi16_ioctl_getparam(ABI16_IOCTL_ARGS);
 int nouveau_abi16_ioctl_setparam(ABI16_IOCTL_ARGS);
 int nouveau_abi16_ioctl_channel_alloc(ABI16_IOCTL_ARGS);
@@ -11,6 +12,37 @@ int nouveau_abi16_ioctl_grobj_alloc(ABI16_IOCTL_ARGS);
 int nouveau_abi16_ioctl_notifierobj_alloc(ABI16_IOCTL_ARGS);
 int nouveau_abi16_ioctl_gpuobj_free(ABI16_IOCTL_ARGS);
 
+struct nouveau_abi16_ntfy {
+	struct list_head head;
+	struct nouveau_mm_node *node;
+	u32 handle;
+};
+
+struct nouveau_abi16_chan {
+	struct list_head head;
+	struct nouveau_channel *chan;
+	struct list_head notifiers;
+	struct nouveau_bo *ntfy;
+	struct nouveau_vma ntfy_vma;
+	struct nouveau_mm  heap;
+};
+
+struct nouveau_abi16 {
+	struct nouveau_object *client;
+	struct nouveau_object *device;
+	struct list_head channels;
+	u64 handles;
+};
+
+struct nouveau_drm;
+struct nouveau_abi16 *nouveau_abi16_get(struct drm_file *, struct drm_device *);
+int  nouveau_abi16_put(struct nouveau_abi16 *, int);
+void nouveau_abi16_fini(struct nouveau_abi16 *);
+u16  nouveau_abi16_swclass(struct nouveau_drm *);
+
+#define NOUVEAU_GEM_DOMAIN_VRAM      (1 << 1)
+#define NOUVEAU_GEM_DOMAIN_GART      (1 << 2)
+
 struct drm_nouveau_channel_alloc {
 	uint32_t     fb_ctxdma_handle;
 	uint32_t     tt_ctxdma_handle;
diff --git a/drivers/gpu/drm/nouveau/nouveau_acpi.c b/drivers/gpu/drm/nouveau/nouveau_acpi.c
index dea42bc515ec..48783e14114c 100644
--- a/drivers/gpu/drm/nouveau/nouveau_acpi.c
+++ b/drivers/gpu/drm/nouveau/nouveau_acpi.c
@@ -7,15 +7,13 @@
 #include <acpi/acpi.h>
 #include <linux/mxm-wmi.h>
 
-#include <drm/drmP.h>
-#include <drm/drm_crtc_helper.h>
-#include "nouveau_drv.h"
-#include <drm/nouveau_drm.h>
-#include "nv50_display.h"
-#include "nouveau_connector.h"
-
 #include <linux/vga_switcheroo.h>
 
+#include <drm/drm_edid.h>
+
+#include "nouveau_drm.h"
+#include "nouveau_acpi.h"
+
 #define NOUVEAU_DSM_LED 0x02
 #define NOUVEAU_DSM_LED_STATE 0x00
 #define NOUVEAU_DSM_LED_OFF 0x10
@@ -388,10 +386,9 @@ int nouveau_acpi_get_bios_chunk(uint8_t *bios, int offset, int len)
 	return nouveau_rom_call(nouveau_dsm_priv.rom_handle, bios, offset, len);
 }
 
-int
+void *
 nouveau_acpi_edid(struct drm_device *dev, struct drm_connector *connector)
 {
-	struct nouveau_connector *nv_connector = nouveau_connector(connector);
 	struct acpi_device *acpidev;
 	acpi_handle handle;
 	int type, ret;
@@ -403,21 +400,20 @@ nouveau_acpi_edid(struct drm_device *dev, struct drm_connector *connector)
 		type = ACPI_VIDEO_DISPLAY_LCD;
 		break;
 	default:
-		return -EINVAL;
+		return NULL;
 	}
 
 	handle = DEVICE_ACPI_HANDLE(&dev->pdev->dev);
 	if (!handle)
-		return -ENODEV;
+		return NULL;
 
 	ret = acpi_bus_get_device(handle, &acpidev);
 	if (ret)
-		return -ENODEV;
+		return NULL;
 
 	ret = acpi_video_get_edid(acpidev, type, -1, &edid);
 	if (ret < 0)
-		return ret;
+		return NULL;
 
-	nv_connector->edid = kmemdup(edid, EDID_LENGTH, GFP_KERNEL);
-	return 0;
+	return kmemdup(edid, EDID_LENGTH, GFP_KERNEL);
 }
diff --git a/drivers/gpu/drm/nouveau/nouveau_acpi.h b/drivers/gpu/drm/nouveau/nouveau_acpi.h
new file mode 100644
index 000000000000..08af67722b57
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_acpi.h
@@ -0,0 +1,22 @@
+#ifndef __NOUVEAU_ACPI_H__
+#define __NOUVEAU_ACPI_H__
+
+#define ROM_BIOS_PAGE 4096
+
+#if defined(CONFIG_ACPI)
+void nouveau_register_dsm_handler(void);
+void nouveau_unregister_dsm_handler(void);
+void nouveau_switcheroo_optimus_dsm(void);
+int nouveau_acpi_get_bios_chunk(uint8_t *bios, int offset, int len);
+bool nouveau_acpi_rom_supported(struct pci_dev *pdev);
+void *nouveau_acpi_edid(struct drm_device *, struct drm_connector *);
+#else
+static inline void nouveau_register_dsm_handler(void) {}
+static inline void nouveau_unregister_dsm_handler(void) {}
+static inline void nouveau_switcheroo_optimus_dsm(void) {}
+static inline bool nouveau_acpi_rom_supported(struct pci_dev *pdev) { return false; }
+static inline int nouveau_acpi_get_bios_chunk(uint8_t *bios, int offset, int len) { return -EINVAL; }
+static inline void *nouveau_acpi_edid(struct drm_device *dev, struct drm_connector *connector) { return NULL; }
+#endif
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_agp.c b/drivers/gpu/drm/nouveau/nouveau_agp.c
new file mode 100644
index 000000000000..d28430cd2ba6
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_agp.c
@@ -0,0 +1,152 @@
+#include <linux/module.h>
+
+#include <core/device.h>
+
+#include "nouveau_drm.h"
+#include "nouveau_agp.h"
+#include "nouveau_reg.h"
+
+#if __OS_HAS_AGP
+MODULE_PARM_DESC(agpmode, "AGP mode (0 to disable AGP)");
+static int nouveau_agpmode = -1;
+module_param_named(agpmode, nouveau_agpmode, int, 0400);
+
+static unsigned long
+get_agp_mode(struct nouveau_drm *drm, unsigned long mode)
+{
+	struct nouveau_device *device = nv_device(drm->device);
+
+	/*
+	 * FW seems to be broken on nv18, it makes the card lock up
+	 * randomly.
+	 */
+	if (device->chipset == 0x18)
+		mode &= ~PCI_AGP_COMMAND_FW;
+
+	/*
+	 * AGP mode set in the command line.
+	 */
+	if (nouveau_agpmode > 0) {
+		bool agpv3 = mode & 0x8;
+		int rate = agpv3 ? nouveau_agpmode / 4 : nouveau_agpmode;
+
+		mode = (mode & ~0x7) | (rate & 0x7);
+	}
+
+	return mode;
+}
+
+static bool
+nouveau_agp_enabled(struct nouveau_drm *drm)
+{
+	struct drm_device *dev = drm->dev;
+
+	if (!drm_pci_device_is_agp(dev) || !dev->agp)
+		return false;
+
+	if (drm->agp.stat == UNKNOWN) {
+		if (!nouveau_agpmode)
+			return false;
+		return true;
+	}
+
+	return (drm->agp.stat == ENABLED);
+}
+#endif
+
+void
+nouveau_agp_reset(struct nouveau_drm *drm)
+{
+#if __OS_HAS_AGP
+	struct nouveau_device *device = nv_device(drm->device);
+	struct drm_device *dev = drm->dev;
+	u32 save[2];
+	int ret;
+
+	if (!nouveau_agp_enabled(drm))
+		return;
+
+	/* First of all, disable fast writes, otherwise if it's
+	 * already enabled in the AGP bridge and we disable the card's
+	 * AGP controller we might be locking ourselves out of it. */
+	if ((nv_rd32(device, NV04_PBUS_PCI_NV_19) |
+	     dev->agp->mode) & PCI_AGP_COMMAND_FW) {
+		struct drm_agp_info info;
+		struct drm_agp_mode mode;
+
+		ret = drm_agp_info(dev, &info);
+		if (ret)
+			return;
+
+		mode.mode  = get_agp_mode(drm, info.mode);
+		mode.mode &= ~PCI_AGP_COMMAND_FW;
+
+		ret = drm_agp_enable(dev, mode);
+		if (ret)
+			return;
+	}
+
+
+	/* clear busmaster bit, and disable AGP */
+	save[0] = nv_mask(device, NV04_PBUS_PCI_NV_1, 0x00000004, 0x00000000);
+	nv_wr32(device, NV04_PBUS_PCI_NV_19, 0);
+
+	/* reset PGRAPH, PFIFO and PTIMER */
+	save[1] = nv_mask(device, 0x000200, 0x00011100, 0x00000000);
+	nv_mask(device, 0x000200, 0x00011100, save[1]);
+
+	/* and restore bustmaster bit (gives effect of resetting AGP) */
+	nv_wr32(device, NV04_PBUS_PCI_NV_1, save[0]);
+#endif
+}
+
+void
+nouveau_agp_init(struct nouveau_drm *drm)
+{
+#if __OS_HAS_AGP
+	struct nouveau_device *device = nv_device(drm->device);
+	struct drm_device *dev = drm->dev;
+	struct drm_agp_info info;
+	struct drm_agp_mode mode;
+	int ret;
+
+	if (!nouveau_agp_enabled(drm))
+		return;
+	drm->agp.stat = DISABLE;
+
+	ret = drm_agp_acquire(dev);
+	if (ret) {
+		nv_error(device, "unable to acquire AGP: %d\n", ret);
+		return;
+	}
+
+	ret = drm_agp_info(dev, &info);
+	if (ret) {
+		nv_error(device, "unable to get AGP info: %d\n", ret);
+		return;
+	}
+
+	/* see agp.h for the AGPSTAT_* modes available */
+	mode.mode = get_agp_mode(drm, info.mode);
+
+	ret = drm_agp_enable(dev, mode);
+	if (ret) {
+		nv_error(device, "unable to enable AGP: %d\n", ret);
+		return;
+	}
+
+	drm->agp.stat = ENABLED;
+	drm->agp.base = info.aperture_base;
+	drm->agp.size = info.aperture_size;
+#endif
+}
+
+void
+nouveau_agp_fini(struct nouveau_drm *drm)
+{
+#if __OS_HAS_AGP
+	struct drm_device *dev = drm->dev;
+	if (dev->agp && dev->agp->acquired)
+		drm_agp_release(dev);
+#endif
+}
diff --git a/drivers/gpu/drm/nouveau/nouveau_agp.h b/drivers/gpu/drm/nouveau/nouveau_agp.h
new file mode 100644
index 000000000000..b55c08652963
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_agp.h
@@ -0,0 +1,10 @@
+#ifndef __NOUVEAU_AGP_H__
+#define __NOUVEAU_AGP_H__
+
+struct nouveau_drm;
+
+void nouveau_agp_reset(struct nouveau_drm *);
+void nouveau_agp_init(struct nouveau_drm *);
+void nouveau_agp_fini(struct nouveau_drm *);
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_backlight.c b/drivers/gpu/drm/nouveau/nouveau_backlight.c
index 2036748e56b4..f65b20a375f6 100644
--- a/drivers/gpu/drm/nouveau/nouveau_backlight.c
+++ b/drivers/gpu/drm/nouveau/nouveau_backlight.c
@@ -33,18 +33,17 @@
 #include <linux/backlight.h>
 #include <linux/acpi.h>
 
-#include <drm/drmP.h>
-#include "nouveau_drv.h"
-#include <drm/nouveau_drm.h>
+#include "nouveau_drm.h"
 #include "nouveau_reg.h"
 #include "nouveau_encoder.h"
 
 static int
 nv40_get_intensity(struct backlight_device *bd)
 {
-	struct drm_device *dev = bl_get_data(bd);
-	int val = (nv_rd32(dev, NV40_PMC_BACKLIGHT) & NV40_PMC_BACKLIGHT_MASK)
-									>> 16;
+	struct nouveau_drm *drm = bl_get_data(bd);
+	struct nouveau_device *device = nv_device(drm->device);
+	int val = (nv_rd32(device, NV40_PMC_BACKLIGHT) &
+				   NV40_PMC_BACKLIGHT_MASK) >> 16;
 
 	return val;
 }
@@ -52,11 +51,12 @@ nv40_get_intensity(struct backlight_device *bd)
 static int
 nv40_set_intensity(struct backlight_device *bd)
 {
-	struct drm_device *dev = bl_get_data(bd);
+	struct nouveau_drm *drm = bl_get_data(bd);
+	struct nouveau_device *device = nv_device(drm->device);
 	int val = bd->props.brightness;
-	int reg = nv_rd32(dev, NV40_PMC_BACKLIGHT);
+	int reg = nv_rd32(device, NV40_PMC_BACKLIGHT);
 
-	nv_wr32(dev, NV40_PMC_BACKLIGHT,
+	nv_wr32(device, NV40_PMC_BACKLIGHT,
 		 (val << 16) | (reg & ~NV40_PMC_BACKLIGHT_MASK));
 
 	return 0;
@@ -71,23 +71,20 @@ static const struct backlight_ops nv40_bl_ops = {
 static int
 nv40_backlight_init(struct drm_connector *connector)
 {
-	struct drm_device *dev = connector->dev;
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_drm *drm = nouveau_drm(connector->dev);
+	struct nouveau_device *device = nv_device(drm->device);
 	struct backlight_properties props;
 	struct backlight_device *bd;
 
-	if (!(nv_rd32(dev, NV40_PMC_BACKLIGHT) & NV40_PMC_BACKLIGHT_MASK))
+	if (!(nv_rd32(device, NV40_PMC_BACKLIGHT) & NV40_PMC_BACKLIGHT_MASK))
 		return 0;
 
 	memset(&props, 0, sizeof(struct backlight_properties));
 	props.type = BACKLIGHT_RAW;
 	props.max_brightness = 31;
-	bd = backlight_device_register("nv_backlight", &connector->kdev, dev,
+	bd = backlight_device_register("nv_backlight", &connector->kdev, drm,
 				       &nv40_bl_ops, &props);
-	if (IS_ERR(bd))
-		return PTR_ERR(bd);
-
-	dev_priv->backlight = bd;
+	drm->backlight = bd;
 	bd->props.brightness = nv40_get_intensity(bd);
 	backlight_update_status(bd);
 
@@ -98,12 +95,13 @@ static int
 nv50_get_intensity(struct backlight_device *bd)
 {
 	struct nouveau_encoder *nv_encoder = bl_get_data(bd);
-	struct drm_device *dev = nv_encoder->base.base.dev;
+	struct nouveau_drm *drm = nouveau_drm(nv_encoder->base.base.dev);
+	struct nouveau_device *device = nv_device(drm->device);
 	int or = nv_encoder->or;
 	u32 div = 1025;
 	u32 val;
 
-	val  = nv_rd32(dev, NV50_PDISP_SOR_PWM_CTL(or));
+	val  = nv_rd32(device, NV50_PDISP_SOR_PWM_CTL(or));
 	val &= NV50_PDISP_SOR_PWM_CTL_VAL;
 	return ((val * 100) + (div / 2)) / div;
 }
@@ -112,13 +110,14 @@ static int
 nv50_set_intensity(struct backlight_device *bd)
 {
 	struct nouveau_encoder *nv_encoder = bl_get_data(bd);
-	struct drm_device *dev = nv_encoder->base.base.dev;
+	struct nouveau_drm *drm = nouveau_drm(nv_encoder->base.base.dev);
+	struct nouveau_device *device = nv_device(drm->device);
 	int or = nv_encoder->or;
 	u32 div = 1025;
 	u32 val = (bd->props.brightness * div) / 100;
 
-	nv_wr32(dev, NV50_PDISP_SOR_PWM_CTL(or),
-		     NV50_PDISP_SOR_PWM_CTL_NEW | val);
+	nv_wr32(device, NV50_PDISP_SOR_PWM_CTL(or),
+			NV50_PDISP_SOR_PWM_CTL_NEW | val);
 	return 0;
 }
 
@@ -132,12 +131,13 @@ static int
 nva3_get_intensity(struct backlight_device *bd)
 {
 	struct nouveau_encoder *nv_encoder = bl_get_data(bd);
-	struct drm_device *dev = nv_encoder->base.base.dev;
+	struct nouveau_drm *drm = nouveau_drm(nv_encoder->base.base.dev);
+	struct nouveau_device *device = nv_device(drm->device);
 	int or = nv_encoder->or;
 	u32 div, val;
 
-	div  = nv_rd32(dev, NV50_PDISP_SOR_PWM_DIV(or));
-	val  = nv_rd32(dev, NV50_PDISP_SOR_PWM_CTL(or));
+	div  = nv_rd32(device, NV50_PDISP_SOR_PWM_DIV(or));
+	val  = nv_rd32(device, NV50_PDISP_SOR_PWM_CTL(or));
 	val &= NVA3_PDISP_SOR_PWM_CTL_VAL;
 	if (div && div >= val)
 		return ((val * 100) + (div / 2)) / div;
@@ -149,16 +149,17 @@ static int
 nva3_set_intensity(struct backlight_device *bd)
 {
 	struct nouveau_encoder *nv_encoder = bl_get_data(bd);
-	struct drm_device *dev = nv_encoder->base.base.dev;
+	struct nouveau_drm *drm = nouveau_drm(nv_encoder->base.base.dev);
+	struct nouveau_device *device = nv_device(drm->device);
 	int or = nv_encoder->or;
 	u32 div, val;
 
-	div = nv_rd32(dev, NV50_PDISP_SOR_PWM_DIV(or));
+	div = nv_rd32(device, NV50_PDISP_SOR_PWM_DIV(or));
 	val = (bd->props.brightness * div) / 100;
 	if (div) {
-		nv_wr32(dev, NV50_PDISP_SOR_PWM_CTL(or), val |
-			     NV50_PDISP_SOR_PWM_CTL_NEW |
-			     NVA3_PDISP_SOR_PWM_CTL_UNK);
+		nv_wr32(device, NV50_PDISP_SOR_PWM_CTL(or), val |
+				NV50_PDISP_SOR_PWM_CTL_NEW |
+				NVA3_PDISP_SOR_PWM_CTL_UNK);
 		return 0;
 	}
 
@@ -174,26 +175,26 @@ static const struct backlight_ops nva3_bl_ops = {
 static int
 nv50_backlight_init(struct drm_connector *connector)
 {
-	struct drm_device *dev = connector->dev;
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_drm *drm = nouveau_drm(connector->dev);
+	struct nouveau_device *device = nv_device(drm->device);
 	struct nouveau_encoder *nv_encoder;
 	struct backlight_properties props;
 	struct backlight_device *bd;
 	const struct backlight_ops *ops;
 
-	nv_encoder = find_encoder(connector, OUTPUT_LVDS);
+	nv_encoder = find_encoder(connector, DCB_OUTPUT_LVDS);
 	if (!nv_encoder) {
-		nv_encoder = find_encoder(connector, OUTPUT_DP);
+		nv_encoder = find_encoder(connector, DCB_OUTPUT_DP);
 		if (!nv_encoder)
 			return -ENODEV;
 	}
 
-	if (!nv_rd32(dev, NV50_PDISP_SOR_PWM_CTL(nv_encoder->or)))
+	if (!nv_rd32(device, NV50_PDISP_SOR_PWM_CTL(nv_encoder->or)))
 		return 0;
 
-	if (dev_priv->chipset <= 0xa0 ||
-	    dev_priv->chipset == 0xaa ||
-	    dev_priv->chipset == 0xac)
+	if (device->chipset <= 0xa0 ||
+	    device->chipset == 0xaa ||
+	    device->chipset == 0xac)
 		ops = &nv50_bl_ops;
 	else
 		ops = &nva3_bl_ops;
@@ -206,7 +207,7 @@ nv50_backlight_init(struct drm_connector *connector)
 	if (IS_ERR(bd))
 		return PTR_ERR(bd);
 
-	dev_priv->backlight = bd;
+	drm->backlight = bd;
 	bd->props.brightness = bd->ops->get_brightness(bd);
 	backlight_update_status(bd);
 	return 0;
@@ -215,12 +216,13 @@ nv50_backlight_init(struct drm_connector *connector)
 int
 nouveau_backlight_init(struct drm_device *dev)
 {
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_drm *drm = nouveau_drm(dev);
+	struct nouveau_device *device = nv_device(drm->device);
 	struct drm_connector *connector;
 
 #ifdef CONFIG_ACPI
 	if (acpi_video_backlight_support()) {
-		NV_INFO(dev, "ACPI backlight interface available, "
+		NV_INFO(drm, "ACPI backlight interface available, "
 			     "not registering our own\n");
 		return 0;
 	}
@@ -231,7 +233,7 @@ nouveau_backlight_init(struct drm_device *dev)
 		    connector->connector_type != DRM_MODE_CONNECTOR_eDP)
 			continue;
 
-		switch (dev_priv->card_type) {
+		switch (device->card_type) {
 		case NV_40:
 			return nv40_backlight_init(connector);
 		case NV_50:
@@ -248,10 +250,10 @@ nouveau_backlight_init(struct drm_device *dev)
 void
 nouveau_backlight_exit(struct drm_device *dev)
 {
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_drm *drm = nouveau_drm(dev);
 
-	if (dev_priv->backlight) {
-		backlight_device_unregister(dev_priv->backlight);
-		dev_priv->backlight = NULL;
+	if (drm->backlight) {
+		backlight_device_unregister(drm->backlight);
+		drm->backlight = NULL;
 	}
 }
diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c
index a84290562ca7..09fdef235882 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bios.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bios.c
@@ -22,12 +22,14 @@
  * SOFTWARE.
  */
 
+#include <subdev/bios.h>
+
 #include <drm/drmP.h>
-#define NV_DEBUG_NOTRACE
-#include "nouveau_drv.h"
+
+#include "nouveau_drm.h"
+#include "nouveau_reg.h"
 #include "nouveau_hw.h"
 #include "nouveau_encoder.h"
-#include "nouveau_gpio.h"
 
 #include <linux/io-mapping.h>
 #include <linux/firmware.h>
@@ -65,3677 +67,6 @@ static bool nv_cksum(const uint8_t *data, unsigned int length)
 	return false;
 }
 
-static int
-score_vbios(struct nvbios *bios, const bool writeable)
-{
-	if (!bios->data || bios->data[0] != 0x55 || bios->data[1] != 0xAA) {
-		NV_TRACEWARN(bios->dev, "... BIOS signature not found\n");
-		return 0;
-	}
-
-	if (nv_cksum(bios->data, bios->data[2] * 512)) {
-		NV_TRACEWARN(bios->dev, "... BIOS checksum invalid\n");
-		/* if a ro image is somewhat bad, it's probably all rubbish */
-		return writeable ? 2 : 1;
-	}
-
-	NV_TRACE(bios->dev, "... appears to be valid\n");
-	return 3;
-}
-
-static void
-bios_shadow_prom(struct nvbios *bios)
-{
-	struct drm_device *dev = bios->dev;
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	u32 pcireg, access;
-	u16 pcir;
-	int i;
-
-	/* enable access to rom */
-	if (dev_priv->card_type >= NV_50)
-		pcireg = 0x088050;
-	else
-		pcireg = NV_PBUS_PCI_NV_20;
-	access = nv_mask(dev, pcireg, 0x00000001, 0x00000000);
-
-	/* bail if no rom signature, with a workaround for a PROM reading
-	 * issue on some chipsets.  the first read after a period of
-	 * inactivity returns the wrong result, so retry the first header
-	 * byte a few times before giving up as a workaround
-	 */
-	i = 16;
-	do {
-		if (nv_rd08(dev, NV_PROM_OFFSET + 0) == 0x55)
-			break;
-	} while (i--);
-
-	if (!i || nv_rd08(dev, NV_PROM_OFFSET + 1) != 0xaa)
-		goto out;
-
-	/* additional check (see note below) - read PCI record header */
-	pcir = nv_rd08(dev, NV_PROM_OFFSET + 0x18) |
-	       nv_rd08(dev, NV_PROM_OFFSET + 0x19) << 8;
-	if (nv_rd08(dev, NV_PROM_OFFSET + pcir + 0) != 'P' ||
-	    nv_rd08(dev, NV_PROM_OFFSET + pcir + 1) != 'C' ||
-	    nv_rd08(dev, NV_PROM_OFFSET + pcir + 2) != 'I' ||
-	    nv_rd08(dev, NV_PROM_OFFSET + pcir + 3) != 'R')
-		goto out;
-
-	/* read entire bios image to system memory */
-	bios->length = nv_rd08(dev, NV_PROM_OFFSET + 2) * 512;
-	bios->data = kmalloc(bios->length, GFP_KERNEL);
-	if (bios->data) {
-		for (i = 0; i < bios->length; i++)
-			bios->data[i] = nv_rd08(dev, NV_PROM_OFFSET + i);
-	}
-
-out:
-	/* disable access to rom */
-	nv_wr32(dev, pcireg, access);
-}
-
-static void
-bios_shadow_pramin(struct nvbios *bios)
-{
-	struct drm_device *dev = bios->dev;
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	u32 bar0 = 0;
-	int i;
-
-	if (dev_priv->card_type >= NV_50) {
-		u64 addr = (u64)(nv_rd32(dev, 0x619f04) & 0xffffff00) << 8;
-		if (!addr) {
-			addr  = (u64)nv_rd32(dev, 0x001700) << 16;
-			addr += 0xf0000;
-		}
-
-		bar0 = nv_mask(dev, 0x001700, 0xffffffff, addr >> 16);
-	}
-
-	/* bail if no rom signature */
-	if (nv_rd08(dev, NV_PRAMIN_OFFSET + 0) != 0x55 ||
-	    nv_rd08(dev, NV_PRAMIN_OFFSET + 1) != 0xaa)
-		goto out;
-
-	bios->length = nv_rd08(dev, NV_PRAMIN_OFFSET + 2) * 512;
-	bios->data = kmalloc(bios->length, GFP_KERNEL);
-	if (bios->data) {
-		for (i = 0; i < bios->length; i++)
-			bios->data[i] = nv_rd08(dev, NV_PRAMIN_OFFSET + i);
-	}
-
-out:
-	if (dev_priv->card_type >= NV_50)
-		nv_wr32(dev, 0x001700, bar0);
-}
-
-static void
-bios_shadow_pci(struct nvbios *bios)
-{
-	struct pci_dev *pdev = bios->dev->pdev;
-	size_t length;
-
-	if (!pci_enable_rom(pdev)) {
-		void __iomem *rom = pci_map_rom(pdev, &length);
-		if (rom && length) {
-			bios->data = kmalloc(length, GFP_KERNEL);
-			if (bios->data) {
-				memcpy_fromio(bios->data, rom, length);
-				bios->length = length;
-			}
-		}
-		if (rom)
-			pci_unmap_rom(pdev, rom);
-
-		pci_disable_rom(pdev);
-	}
-}
-
-static void
-bios_shadow_acpi(struct nvbios *bios)
-{
-	struct pci_dev *pdev = bios->dev->pdev;
-	int cnt = 65536 / ROM_BIOS_PAGE;
-	int ret;
-
-	if (!nouveau_acpi_rom_supported(pdev))
-		return;
-
-	bios->data = kmalloc(cnt * ROM_BIOS_PAGE, GFP_KERNEL);
-	if (!bios->data)
-		return;
-
-	bios->length = 0;
-	while (cnt--) {
-		ret = nouveau_acpi_get_bios_chunk(bios->data, bios->length,
-						  ROM_BIOS_PAGE);
-		if (ret != ROM_BIOS_PAGE)
-			return;
-
-		bios->length += ROM_BIOS_PAGE;
-	}
-}
-
-struct methods {
-	const char desc[8];
-	void (*shadow)(struct nvbios *);
-	const bool rw;
-	int score;
-	u32 size;
-	u8 *data;
-};
-
-static bool
-bios_shadow(struct drm_device *dev)
-{
-	struct methods shadow_methods[] = {
-		{ "PRAMIN", bios_shadow_pramin, true, 0, 0, NULL },
-		{ "PROM", bios_shadow_prom, false, 0, 0, NULL },
-		{ "ACPI", bios_shadow_acpi, true, 0, 0, NULL },
-		{ "PCIROM", bios_shadow_pci, true, 0, 0, NULL },
-		{}
-	};
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct nvbios *bios = &dev_priv->vbios;
-	struct methods *mthd, *best;
-	const struct firmware *fw;
-	char fname[32];
-	int ret;
-
-	if (nouveau_vbios) {
-		/* try to match one of the built-in methods */
-		mthd = shadow_methods;
-		do {
-			if (strcasecmp(nouveau_vbios, mthd->desc))
-				continue;
-			NV_INFO(dev, "VBIOS source: %s\n", mthd->desc);
-
-			mthd->shadow(bios);
-			mthd->score = score_vbios(bios, mthd->rw);
-			if (mthd->score)
-				return true;
-		} while ((++mthd)->shadow);
-
-		/* attempt to load firmware image */
-		snprintf(fname, sizeof(fname), "nouveau/%s", nouveau_vbios);
-		ret = request_firmware(&fw, fname, &dev->pdev->dev);
-		if (ret == 0) {
-			bios->length = fw->size;
-			bios->data   = kmemdup(fw->data, fw->size, GFP_KERNEL);
-			release_firmware(fw);
-
-			NV_INFO(dev, "VBIOS image: %s\n", nouveau_vbios);
-			if (score_vbios(bios, 1))
-				return true;
-
-			kfree(bios->data);
-			bios->data = NULL;
-		}
-
-		NV_ERROR(dev, "VBIOS source \'%s\' invalid\n", nouveau_vbios);
-	}
-
-	mthd = shadow_methods;
-	do {
-		NV_TRACE(dev, "Checking %s for VBIOS\n", mthd->desc);
-		mthd->shadow(bios);
-		mthd->score = score_vbios(bios, mthd->rw);
-		mthd->size = bios->length;
-		mthd->data = bios->data;
-		bios->data = NULL;
-	} while (mthd->score != 3 && (++mthd)->shadow);
-
-	mthd = shadow_methods;
-	best = mthd;
-	do {
-		if (mthd->score > best->score) {
-			kfree(best->data);
-			best = mthd;
-		}
-	} while ((++mthd)->shadow);
-
-	if (best->score) {
-		NV_TRACE(dev, "Using VBIOS from %s\n", best->desc);
-		bios->length = best->size;
-		bios->data = best->data;
-		return true;
-	}
-
-	NV_ERROR(dev, "No valid VBIOS image found\n");
-	return false;
-}
-
-struct init_tbl_entry {
-	char *name;
-	uint8_t id;
-	/* Return:
-	 *  > 0: success, length of opcode
-	 *    0: success, but abort further parsing of table (INIT_DONE etc)
-	 *  < 0: failure, table parsing will be aborted
-	 */
-	int (*handler)(struct nvbios *, uint16_t, struct init_exec *);
-};
-
-static int parse_init_table(struct nvbios *, uint16_t, struct init_exec *);
-
-#define MACRO_INDEX_SIZE	2
-#define MACRO_SIZE		8
-#define CONDITION_SIZE		12
-#define IO_FLAG_CONDITION_SIZE	9
-#define IO_CONDITION_SIZE	5
-#define MEM_INIT_SIZE		66
-
-static void still_alive(void)
-{
-#if 0
-	sync();
-	mdelay(2);
-#endif
-}
-
-static uint32_t
-munge_reg(struct nvbios *bios, uint32_t reg)
-{
-	struct drm_nouveau_private *dev_priv = bios->dev->dev_private;
-	struct dcb_entry *dcbent = bios->display.output;
-
-	if (dev_priv->card_type < NV_50)
-		return reg;
-
-	if (reg & 0x80000000) {
-		BUG_ON(bios->display.crtc < 0);
-		reg += bios->display.crtc * 0x800;
-	}
-
-	if (reg & 0x40000000) {
-		BUG_ON(!dcbent);
-
-		reg += (ffs(dcbent->or) - 1) * 0x800;
-		if ((reg & 0x20000000) && !(dcbent->sorconf.link & 1))
-			reg += 0x00000080;
-	}
-
-	reg &= ~0xe0000000;
-	return reg;
-}
-
-static int
-valid_reg(struct nvbios *bios, uint32_t reg)
-{
-	struct drm_nouveau_private *dev_priv = bios->dev->dev_private;
-	struct drm_device *dev = bios->dev;
-
-	/* C51 has misaligned regs on purpose. Marvellous */
-	if (reg & 0x2 ||
-	    (reg & 0x1 && dev_priv->vbios.chip_version != 0x51))
-		NV_ERROR(dev, "======= misaligned reg 0x%08X =======\n", reg);
-
-	/* warn on C51 regs that haven't been verified accessible in tracing */
-	if (reg & 0x1 && dev_priv->vbios.chip_version == 0x51 &&
-	    reg != 0x130d && reg != 0x1311 && reg != 0x60081d)
-		NV_WARN(dev, "=== C51 misaligned reg 0x%08X not verified ===\n",
-			reg);
-
-	if (reg >= (8*1024*1024)) {
-		NV_ERROR(dev, "=== reg 0x%08x out of mapped bounds ===\n", reg);
-		return 0;
-	}
-
-	return 1;
-}
-
-static bool
-valid_idx_port(struct nvbios *bios, uint16_t port)
-{
-	struct drm_nouveau_private *dev_priv = bios->dev->dev_private;
-	struct drm_device *dev = bios->dev;
-
-	/*
-	 * If adding more ports here, the read/write functions below will need
-	 * updating so that the correct mmio range (PRMCIO, PRMDIO, PRMVIO) is
-	 * used for the port in question
-	 */
-	if (dev_priv->card_type < NV_50) {
-		if (port == NV_CIO_CRX__COLOR)
-			return true;
-		if (port == NV_VIO_SRX)
-			return true;
-	} else {
-		if (port == NV_CIO_CRX__COLOR)
-			return true;
-	}
-
-	NV_ERROR(dev, "========== unknown indexed io port 0x%04X ==========\n",
-		 port);
-
-	return false;
-}
-
-static bool
-valid_port(struct nvbios *bios, uint16_t port)
-{
-	struct drm_device *dev = bios->dev;
-
-	/*
-	 * If adding more ports here, the read/write functions below will need
-	 * updating so that the correct mmio range (PRMCIO, PRMDIO, PRMVIO) is
-	 * used for the port in question
-	 */
-	if (port == NV_VIO_VSE2)
-		return true;
-
-	NV_ERROR(dev, "========== unknown io port 0x%04X ==========\n", port);
-
-	return false;
-}
-
-static uint32_t
-bios_rd32(struct nvbios *bios, uint32_t reg)
-{
-	uint32_t data;
-
-	reg = munge_reg(bios, reg);
-	if (!valid_reg(bios, reg))
-		return 0;
-
-	/*
-	 * C51 sometimes uses regs with bit0 set in the address. For these
-	 * cases there should exist a translation in a BIOS table to an IO
-	 * port address which the BIOS uses for accessing the reg
-	 *
-	 * These only seem to appear for the power control regs to a flat panel,
-	 * and the GPIO regs at 0x60081*.  In C51 mmio traces the normal regs
-	 * for 0x1308 and 0x1310 are used - hence the mask below.  An S3
-	 * suspend-resume mmio trace from a C51 will be required to see if this
-	 * is true for the power microcode in 0x14.., or whether the direct IO
-	 * port access method is needed
-	 */
-	if (reg & 0x1)
-		reg &= ~0x1;
-
-	data = nv_rd32(bios->dev, reg);
-
-	BIOSLOG(bios, "	Read:  Reg: 0x%08X, Data: 0x%08X\n", reg, data);
-
-	return data;
-}
-
-static void
-bios_wr32(struct nvbios *bios, uint32_t reg, uint32_t data)
-{
-	struct drm_nouveau_private *dev_priv = bios->dev->dev_private;
-
-	reg = munge_reg(bios, reg);
-	if (!valid_reg(bios, reg))
-		return;
-
-	/* see note in bios_rd32 */
-	if (reg & 0x1)
-		reg &= 0xfffffffe;
-
-	LOG_OLD_VALUE(bios_rd32(bios, reg));
-	BIOSLOG(bios, "	Write: Reg: 0x%08X, Data: 0x%08X\n", reg, data);
-
-	if (dev_priv->vbios.execute) {
-		still_alive();
-		nv_wr32(bios->dev, reg, data);
-	}
-}
-
-static uint8_t
-bios_idxprt_rd(struct nvbios *bios, uint16_t port, uint8_t index)
-{
-	struct drm_nouveau_private *dev_priv = bios->dev->dev_private;
-	struct drm_device *dev = bios->dev;
-	uint8_t data;
-
-	if (!valid_idx_port(bios, port))
-		return 0;
-
-	if (dev_priv->card_type < NV_50) {
-		if (port == NV_VIO_SRX)
-			data = NVReadVgaSeq(dev, bios->state.crtchead, index);
-		else	/* assume NV_CIO_CRX__COLOR */
-			data = NVReadVgaCrtc(dev, bios->state.crtchead, index);
-	} else {
-		uint32_t data32;
-
-		data32 = bios_rd32(bios, NV50_PDISPLAY_VGACRTC(index & ~3));
-		data = (data32 >> ((index & 3) << 3)) & 0xff;
-	}
-
-	BIOSLOG(bios, "	Indexed IO read:  Port: 0x%04X, Index: 0x%02X, "
-		      "Head: 0x%02X, Data: 0x%02X\n",
-		port, index, bios->state.crtchead, data);
-	return data;
-}
-
-static void
-bios_idxprt_wr(struct nvbios *bios, uint16_t port, uint8_t index, uint8_t data)
-{
-	struct drm_nouveau_private *dev_priv = bios->dev->dev_private;
-	struct drm_device *dev = bios->dev;
-
-	if (!valid_idx_port(bios, port))
-		return;
-
-	/*
-	 * The current head is maintained in the nvbios member  state.crtchead.
-	 * We trap changes to CR44 and update the head variable and hence the
-	 * register set written.
-	 * As CR44 only exists on CRTC0, we update crtchead to head0 in advance
-	 * of the write, and to head1 after the write
-	 */
-	if (port == NV_CIO_CRX__COLOR && index == NV_CIO_CRE_44 &&
-	    data != NV_CIO_CRE_44_HEADB)
-		bios->state.crtchead = 0;
-
-	LOG_OLD_VALUE(bios_idxprt_rd(bios, port, index));
-	BIOSLOG(bios, "	Indexed IO write: Port: 0x%04X, Index: 0x%02X, "
-		      "Head: 0x%02X, Data: 0x%02X\n",
-		port, index, bios->state.crtchead, data);
-
-	if (bios->execute && dev_priv->card_type < NV_50) {
-		still_alive();
-		if (port == NV_VIO_SRX)
-			NVWriteVgaSeq(dev, bios->state.crtchead, index, data);
-		else	/* assume NV_CIO_CRX__COLOR */
-			NVWriteVgaCrtc(dev, bios->state.crtchead, index, data);
-	} else
-	if (bios->execute) {
-		uint32_t data32, shift = (index & 3) << 3;
-
-		still_alive();
-
-		data32  = bios_rd32(bios, NV50_PDISPLAY_VGACRTC(index & ~3));
-		data32 &= ~(0xff << shift);
-		data32 |= (data << shift);
-		bios_wr32(bios, NV50_PDISPLAY_VGACRTC(index & ~3), data32);
-	}
-
-	if (port == NV_CIO_CRX__COLOR &&
-	    index == NV_CIO_CRE_44 && data == NV_CIO_CRE_44_HEADB)
-		bios->state.crtchead = 1;
-}
-
-static uint8_t
-bios_port_rd(struct nvbios *bios, uint16_t port)
-{
-	uint8_t data, head = bios->state.crtchead;
-
-	if (!valid_port(bios, port))
-		return 0;
-
-	data = NVReadPRMVIO(bios->dev, head, NV_PRMVIO0_OFFSET + port);
-
-	BIOSLOG(bios, "	IO read:  Port: 0x%04X, Head: 0x%02X, Data: 0x%02X\n",
-		port, head, data);
-
-	return data;
-}
-
-static void
-bios_port_wr(struct nvbios *bios, uint16_t port, uint8_t data)
-{
-	int head = bios->state.crtchead;
-
-	if (!valid_port(bios, port))
-		return;
-
-	LOG_OLD_VALUE(bios_port_rd(bios, port));
-	BIOSLOG(bios, "	IO write: Port: 0x%04X, Head: 0x%02X, Data: 0x%02X\n",
-		port, head, data);
-
-	if (!bios->execute)
-		return;
-
-	still_alive();
-	NVWritePRMVIO(bios->dev, head, NV_PRMVIO0_OFFSET + port, data);
-}
-
-static bool
-io_flag_condition_met(struct nvbios *bios, uint16_t offset, uint8_t cond)
-{
-	/*
-	 * The IO flag condition entry has 2 bytes for the CRTC port; 1 byte
-	 * for the CRTC index; 1 byte for the mask to apply to the value
-	 * retrieved from the CRTC; 1 byte for the shift right to apply to the
-	 * masked CRTC value; 2 bytes for the offset to the flag array, to
-	 * which the shifted value is added; 1 byte for the mask applied to the
-	 * value read from the flag array; and 1 byte for the value to compare
-	 * against the masked byte from the flag table.
-	 */
-
-	uint16_t condptr = bios->io_flag_condition_tbl_ptr + cond * IO_FLAG_CONDITION_SIZE;
-	uint16_t crtcport = ROM16(bios->data[condptr]);
-	uint8_t crtcindex = bios->data[condptr + 2];
-	uint8_t mask = bios->data[condptr + 3];
-	uint8_t shift = bios->data[condptr + 4];
-	uint16_t flagarray = ROM16(bios->data[condptr + 5]);
-	uint8_t flagarraymask = bios->data[condptr + 7];
-	uint8_t cmpval = bios->data[condptr + 8];
-	uint8_t data;
-
-	BIOSLOG(bios, "0x%04X: Port: 0x%04X, Index: 0x%02X, Mask: 0x%02X, "
-		      "Shift: 0x%02X, FlagArray: 0x%04X, FAMask: 0x%02X, "
-		      "Cmpval: 0x%02X\n",
-		offset, crtcport, crtcindex, mask, shift, flagarray, flagarraymask, cmpval);
-
-	data = bios_idxprt_rd(bios, crtcport, crtcindex);
-
-	data = bios->data[flagarray + ((data & mask) >> shift)];
-	data &= flagarraymask;
-
-	BIOSLOG(bios, "0x%04X: Checking if 0x%02X equals 0x%02X\n",
-		offset, data, cmpval);
-
-	return (data == cmpval);
-}
-
-static bool
-bios_condition_met(struct nvbios *bios, uint16_t offset, uint8_t cond)
-{
-	/*
-	 * The condition table entry has 4 bytes for the address of the
-	 * register to check, 4 bytes for a mask to apply to the register and
-	 * 4 for a test comparison value
-	 */
-
-	uint16_t condptr = bios->condition_tbl_ptr + cond * CONDITION_SIZE;
-	uint32_t reg = ROM32(bios->data[condptr]);
-	uint32_t mask = ROM32(bios->data[condptr + 4]);
-	uint32_t cmpval = ROM32(bios->data[condptr + 8]);
-	uint32_t data;
-
-	BIOSLOG(bios, "0x%04X: Cond: 0x%02X, Reg: 0x%08X, Mask: 0x%08X\n",
-		offset, cond, reg, mask);
-
-	data = bios_rd32(bios, reg) & mask;
-
-	BIOSLOG(bios, "0x%04X: Checking if 0x%08X equals 0x%08X\n",
-		offset, data, cmpval);
-
-	return (data == cmpval);
-}
-
-static bool
-io_condition_met(struct nvbios *bios, uint16_t offset, uint8_t cond)
-{
-	/*
-	 * The IO condition entry has 2 bytes for the IO port address; 1 byte
-	 * for the index to write to io_port; 1 byte for the mask to apply to
-	 * the byte read from io_port+1; and 1 byte for the value to compare
-	 * against the masked byte.
-	 */
-
-	uint16_t condptr = bios->io_condition_tbl_ptr + cond * IO_CONDITION_SIZE;
-	uint16_t io_port = ROM16(bios->data[condptr]);
-	uint8_t port_index = bios->data[condptr + 2];
-	uint8_t mask = bios->data[condptr + 3];
-	uint8_t cmpval = bios->data[condptr + 4];
-
-	uint8_t data = bios_idxprt_rd(bios, io_port, port_index) & mask;
-
-	BIOSLOG(bios, "0x%04X: Checking if 0x%02X equals 0x%02X\n",
-		offset, data, cmpval);
-
-	return (data == cmpval);
-}
-
-static int
-nv50_pll_set(struct drm_device *dev, uint32_t reg, uint32_t clk)
-{
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct nouveau_pll_vals pll;
-	struct pll_lims pll_limits;
-	u32 ctrl, mask, coef;
-	int ret;
-
-	ret = get_pll_limits(dev, reg, &pll_limits);
-	if (ret)
-		return ret;
-
-	clk = nouveau_calc_pll_mnp(dev, &pll_limits, clk, &pll);
-	if (!clk)
-		return -ERANGE;
-
-	coef = pll.N1 << 8 | pll.M1;
-	ctrl = pll.log2P << 16;
-	mask = 0x00070000;
-	if (reg == 0x004008) {
-		mask |= 0x01f80000;
-		ctrl |= (pll_limits.log2p_bias << 19);
-		ctrl |= (pll.log2P << 22);
-	}
-
-	if (!dev_priv->vbios.execute)
-		return 0;
-
-	nv_mask(dev, reg + 0, mask, ctrl);
-	nv_wr32(dev, reg + 4, coef);
-	return 0;
-}
-
-static int
-setPLL(struct nvbios *bios, uint32_t reg, uint32_t clk)
-{
-	struct drm_device *dev = bios->dev;
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	/* clk in kHz */
-	struct pll_lims pll_lim;
-	struct nouveau_pll_vals pllvals;
-	int ret;
-
-	if (dev_priv->card_type >= NV_50)
-		return nv50_pll_set(dev, reg, clk);
-
-	/* high regs (such as in the mac g5 table) are not -= 4 */
-	ret = get_pll_limits(dev, reg > 0x405c ? reg : reg - 4, &pll_lim);
-	if (ret)
-		return ret;
-
-	clk = nouveau_calc_pll_mnp(dev, &pll_lim, clk, &pllvals);
-	if (!clk)
-		return -ERANGE;
-
-	if (bios->execute) {
-		still_alive();
-		nouveau_hw_setpll(dev, reg, &pllvals);
-	}
-
-	return 0;
-}
-
-static int dcb_entry_idx_from_crtchead(struct drm_device *dev)
-{
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct nvbios *bios = &dev_priv->vbios;
-
-	/*
-	 * For the results of this function to be correct, CR44 must have been
-	 * set (using bios_idxprt_wr to set crtchead), CR58 set for CR57 = 0,
-	 * and the DCB table parsed, before the script calling the function is
-	 * run.  run_digital_op_script is example of how to do such setup
-	 */
-
-	uint8_t dcb_entry = NVReadVgaCrtc5758(dev, bios->state.crtchead, 0);
-
-	if (dcb_entry > bios->dcb.entries) {
-		NV_ERROR(dev, "CR58 doesn't have a valid DCB entry currently "
-				"(%02X)\n", dcb_entry);
-		dcb_entry = 0x7f;	/* unused / invalid marker */
-	}
-
-	return dcb_entry;
-}
-
-static struct nouveau_i2c_chan *
-init_i2c_device_find(struct drm_device *dev, int i2c_index)
-{
-	if (i2c_index == 0xff) {
-		struct drm_nouveau_private *dev_priv = dev->dev_private;
-		struct dcb_table *dcb = &dev_priv->vbios.dcb;
-		/* note: dcb_entry_idx_from_crtchead needs pre-script set-up */
-		int idx = dcb_entry_idx_from_crtchead(dev);
-
-		i2c_index = NV_I2C_DEFAULT(0);
-		if (idx != 0x7f && dcb->entry[idx].i2c_upper_default)
-			i2c_index = NV_I2C_DEFAULT(1);
-	}
-
-	return nouveau_i2c_find(dev, i2c_index);
-}
-
-static uint32_t
-get_tmds_index_reg(struct drm_device *dev, uint8_t mlv)
-{
-	/*
-	 * For mlv < 0x80, it is an index into a table of TMDS base addresses.
-	 * For mlv == 0x80 use the "or" value of the dcb_entry indexed by
-	 * CR58 for CR57 = 0 to index a table of offsets to the basic
-	 * 0x6808b0 address.
-	 * For mlv == 0x81 use the "or" value of the dcb_entry indexed by
-	 * CR58 for CR57 = 0 to index a table of offsets to the basic
-	 * 0x6808b0 address, and then flip the offset by 8.
-	 */
-
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct nvbios *bios = &dev_priv->vbios;
-	const int pramdac_offset[13] = {
-		0, 0, 0x8, 0, 0x2000, 0, 0, 0, 0x2008, 0, 0, 0, 0x2000 };
-	const uint32_t pramdac_table[4] = {
-		0x6808b0, 0x6808b8, 0x6828b0, 0x6828b8 };
-
-	if (mlv >= 0x80) {
-		int dcb_entry, dacoffset;
-
-		/* note: dcb_entry_idx_from_crtchead needs pre-script set-up */
-		dcb_entry = dcb_entry_idx_from_crtchead(dev);
-		if (dcb_entry == 0x7f)
-			return 0;
-		dacoffset = pramdac_offset[bios->dcb.entry[dcb_entry].or];
-		if (mlv == 0x81)
-			dacoffset ^= 8;
-		return 0x6808b0 + dacoffset;
-	} else {
-		if (mlv >= ARRAY_SIZE(pramdac_table)) {
-			NV_ERROR(dev, "Magic Lookup Value too big (%02X)\n",
-									mlv);
-			return 0;
-		}
-		return pramdac_table[mlv];
-	}
-}
-
-static int
-init_io_restrict_prog(struct nvbios *bios, uint16_t offset,
-		      struct init_exec *iexec)
-{
-	/*
-	 * INIT_IO_RESTRICT_PROG   opcode: 0x32 ('2')
-	 *
-	 * offset      (8  bit): opcode
-	 * offset + 1  (16 bit): CRTC port
-	 * offset + 3  (8  bit): CRTC index
-	 * offset + 4  (8  bit): mask
-	 * offset + 5  (8  bit): shift
-	 * offset + 6  (8  bit): count
-	 * offset + 7  (32 bit): register
-	 * offset + 11 (32 bit): configuration 1
-	 * ...
-	 *
-	 * Starting at offset + 11 there are "count" 32 bit values.
-	 * To find out which value to use read index "CRTC index" on "CRTC
-	 * port", AND this value with "mask" and then bit shift right "shift"
-	 * bits.  Read the appropriate value using this index and write to
-	 * "register"
-	 */
-
-	uint16_t crtcport = ROM16(bios->data[offset + 1]);
-	uint8_t crtcindex = bios->data[offset + 3];
-	uint8_t mask = bios->data[offset + 4];
-	uint8_t shift = bios->data[offset + 5];
-	uint8_t count = bios->data[offset + 6];
-	uint32_t reg = ROM32(bios->data[offset + 7]);
-	uint8_t config;
-	uint32_t configval;
-	int len = 11 + count * 4;
-
-	if (!iexec->execute)
-		return len;
-
-	BIOSLOG(bios, "0x%04X: Port: 0x%04X, Index: 0x%02X, Mask: 0x%02X, "
-		      "Shift: 0x%02X, Count: 0x%02X, Reg: 0x%08X\n",
-		offset, crtcport, crtcindex, mask, shift, count, reg);
-
-	config = (bios_idxprt_rd(bios, crtcport, crtcindex) & mask) >> shift;
-	if (config > count) {
-		NV_ERROR(bios->dev,
-			 "0x%04X: Config 0x%02X exceeds maximal bound 0x%02X\n",
-			 offset, config, count);
-		return len;
-	}
-
-	configval = ROM32(bios->data[offset + 11 + config * 4]);
-
-	BIOSLOG(bios, "0x%04X: Writing config %02X\n", offset, config);
-
-	bios_wr32(bios, reg, configval);
-
-	return len;
-}
-
-static int
-init_repeat(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
-{
-	/*
-	 * INIT_REPEAT   opcode: 0x33 ('3')
-	 *
-	 * offset      (8 bit): opcode
-	 * offset + 1  (8 bit): count
-	 *
-	 * Execute script following this opcode up to INIT_REPEAT_END
-	 * "count" times
-	 */
-
-	uint8_t count = bios->data[offset + 1];
-	uint8_t i;
-
-	/* no iexec->execute check by design */
-
-	BIOSLOG(bios, "0x%04X: Repeating following segment %d times\n",
-		offset, count);
-
-	iexec->repeat = true;
-
-	/*
-	 * count - 1, as the script block will execute once when we leave this
-	 * opcode -- this is compatible with bios behaviour as:
-	 * a) the block is always executed at least once, even if count == 0
-	 * b) the bios interpreter skips to the op following INIT_END_REPEAT,
-	 * while we don't
-	 */
-	for (i = 0; i < count - 1; i++)
-		parse_init_table(bios, offset + 2, iexec);
-
-	iexec->repeat = false;
-
-	return 2;
-}
-
-static int
-init_io_restrict_pll(struct nvbios *bios, uint16_t offset,
-		     struct init_exec *iexec)
-{
-	/*
-	 * INIT_IO_RESTRICT_PLL   opcode: 0x34 ('4')
-	 *
-	 * offset      (8  bit): opcode
-	 * offset + 1  (16 bit): CRTC port
-	 * offset + 3  (8  bit): CRTC index
-	 * offset + 4  (8  bit): mask
-	 * offset + 5  (8  bit): shift
-	 * offset + 6  (8  bit): IO flag condition index
-	 * offset + 7  (8  bit): count
-	 * offset + 8  (32 bit): register
-	 * offset + 12 (16 bit): frequency 1
-	 * ...
-	 *
-	 * Starting at offset + 12 there are "count" 16 bit frequencies (10kHz).
-	 * Set PLL register "register" to coefficients for frequency n,
-	 * selected by reading index "CRTC index" of "CRTC port" ANDed with
-	 * "mask" and shifted right by "shift".
-	 *
-	 * If "IO flag condition index" > 0, and condition met, double
-	 * frequency before setting it.
-	 */
-
-	uint16_t crtcport = ROM16(bios->data[offset + 1]);
-	uint8_t crtcindex = bios->data[offset + 3];
-	uint8_t mask = bios->data[offset + 4];
-	uint8_t shift = bios->data[offset + 5];
-	int8_t io_flag_condition_idx = bios->data[offset + 6];
-	uint8_t count = bios->data[offset + 7];
-	uint32_t reg = ROM32(bios->data[offset + 8]);
-	uint8_t config;
-	uint16_t freq;
-	int len = 12 + count * 2;
-
-	if (!iexec->execute)
-		return len;
-
-	BIOSLOG(bios, "0x%04X: Port: 0x%04X, Index: 0x%02X, Mask: 0x%02X, "
-		      "Shift: 0x%02X, IO Flag Condition: 0x%02X, "
-		      "Count: 0x%02X, Reg: 0x%08X\n",
-		offset, crtcport, crtcindex, mask, shift,
-		io_flag_condition_idx, count, reg);
-
-	config = (bios_idxprt_rd(bios, crtcport, crtcindex) & mask) >> shift;
-	if (config > count) {
-		NV_ERROR(bios->dev,
-			 "0x%04X: Config 0x%02X exceeds maximal bound 0x%02X\n",
-			 offset, config, count);
-		return len;
-	}
-
-	freq = ROM16(bios->data[offset + 12 + config * 2]);
-
-	if (io_flag_condition_idx > 0) {
-		if (io_flag_condition_met(bios, offset, io_flag_condition_idx)) {
-			BIOSLOG(bios, "0x%04X: Condition fulfilled -- "
-				      "frequency doubled\n", offset);
-			freq *= 2;
-		} else
-			BIOSLOG(bios, "0x%04X: Condition not fulfilled -- "
-				      "frequency unchanged\n", offset);
-	}
-
-	BIOSLOG(bios, "0x%04X: Reg: 0x%08X, Config: 0x%02X, Freq: %d0kHz\n",
-		offset, reg, config, freq);
-
-	setPLL(bios, reg, freq * 10);
-
-	return len;
-}
-
-static int
-init_end_repeat(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
-{
-	/*
-	 * INIT_END_REPEAT   opcode: 0x36 ('6')
-	 *
-	 * offset      (8 bit): opcode
-	 *
-	 * Marks the end of the block for INIT_REPEAT to repeat
-	 */
-
-	/* no iexec->execute check by design */
-
-	/*
-	 * iexec->repeat flag necessary to go past INIT_END_REPEAT opcode when
-	 * we're not in repeat mode
-	 */
-	if (iexec->repeat)
-		return 0;
-
-	return 1;
-}
-
-static int
-init_copy(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
-{
-	/*
-	 * INIT_COPY   opcode: 0x37 ('7')
-	 *
-	 * offset      (8  bit): opcode
-	 * offset + 1  (32 bit): register
-	 * offset + 5  (8  bit): shift
-	 * offset + 6  (8  bit): srcmask
-	 * offset + 7  (16 bit): CRTC port
-	 * offset + 9  (8 bit): CRTC index
-	 * offset + 10  (8 bit): mask
-	 *
-	 * Read index "CRTC index" on "CRTC port", AND with "mask", OR with
-	 * (REGVAL("register") >> "shift" & "srcmask") and write-back to CRTC
-	 * port
-	 */
-
-	uint32_t reg = ROM32(bios->data[offset + 1]);
-	uint8_t shift = bios->data[offset + 5];
-	uint8_t srcmask = bios->data[offset + 6];
-	uint16_t crtcport = ROM16(bios->data[offset + 7]);
-	uint8_t crtcindex = bios->data[offset + 9];
-	uint8_t mask = bios->data[offset + 10];
-	uint32_t data;
-	uint8_t crtcdata;
-
-	if (!iexec->execute)
-		return 11;
-
-	BIOSLOG(bios, "0x%04X: Reg: 0x%08X, Shift: 0x%02X, SrcMask: 0x%02X, "
-		      "Port: 0x%04X, Index: 0x%02X, Mask: 0x%02X\n",
-		offset, reg, shift, srcmask, crtcport, crtcindex, mask);
-
-	data = bios_rd32(bios, reg);
-
-	if (shift < 0x80)
-		data >>= shift;
-	else
-		data <<= (0x100 - shift);
-
-	data &= srcmask;
-
-	crtcdata  = bios_idxprt_rd(bios, crtcport, crtcindex) & mask;
-	crtcdata |= (uint8_t)data;
-	bios_idxprt_wr(bios, crtcport, crtcindex, crtcdata);
-
-	return 11;
-}
-
-static int
-init_not(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
-{
-	/*
-	 * INIT_NOT   opcode: 0x38 ('8')
-	 *
-	 * offset      (8  bit): opcode
-	 *
-	 * Invert the current execute / no-execute condition (i.e. "else")
-	 */
-	if (iexec->execute)
-		BIOSLOG(bios, "0x%04X: ------ Skipping following commands  ------\n", offset);
-	else
-		BIOSLOG(bios, "0x%04X: ------ Executing following commands ------\n", offset);
-
-	iexec->execute = !iexec->execute;
-	return 1;
-}
-
-static int
-init_io_flag_condition(struct nvbios *bios, uint16_t offset,
-		       struct init_exec *iexec)
-{
-	/*
-	 * INIT_IO_FLAG_CONDITION   opcode: 0x39 ('9')
-	 *
-	 * offset      (8 bit): opcode
-	 * offset + 1  (8 bit): condition number
-	 *
-	 * Check condition "condition number" in the IO flag condition table.
-	 * If condition not met skip subsequent opcodes until condition is
-	 * inverted (INIT_NOT), or we hit INIT_RESUME
-	 */
-
-	uint8_t cond = bios->data[offset + 1];
-
-	if (!iexec->execute)
-		return 2;
-
-	if (io_flag_condition_met(bios, offset, cond))
-		BIOSLOG(bios, "0x%04X: Condition fulfilled -- continuing to execute\n", offset);
-	else {
-		BIOSLOG(bios, "0x%04X: Condition not fulfilled -- skipping following commands\n", offset);
-		iexec->execute = false;
-	}
-
-	return 2;
-}
-
-static int
-init_dp_condition(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
-{
-	/*
-	 * INIT_DP_CONDITION   opcode: 0x3A ('')
-	 *
-	 * offset      (8 bit): opcode
-	 * offset + 1  (8 bit): "sub" opcode
-	 * offset + 2  (8 bit): unknown
-	 *
-	 */
-
-	struct dcb_entry *dcb = bios->display.output;
-	struct drm_device *dev = bios->dev;
-	uint8_t cond = bios->data[offset + 1];
-	uint8_t *table, *entry;
-
-	BIOSLOG(bios, "0x%04X: subop 0x%02X\n", offset, cond);
-
-	if (!iexec->execute)
-		return 3;
-
-	table = nouveau_dp_bios_data(dev, dcb, &entry);
-	if (!table)
-		return 3;
-
-	switch (cond) {
-	case 0:
-		entry = dcb_conn(dev, dcb->connector);
-		if (!entry || entry[0] != DCB_CONNECTOR_eDP)
-			iexec->execute = false;
-		break;
-	case 1:
-	case 2:
-		if ((table[0]  < 0x40 && !(entry[5] & cond)) ||
-		    (table[0] == 0x40 && !(entry[4] & cond)))
-			iexec->execute = false;
-		break;
-	case 5:
-	{
-		struct nouveau_i2c_chan *auxch;
-		int ret;
-
-		auxch = nouveau_i2c_find(dev, bios->display.output->i2c_index);
-		if (!auxch) {
-			NV_ERROR(dev, "0x%04X: couldn't get auxch\n", offset);
-			return 3;
-		}
-
-		ret = nouveau_dp_auxch(auxch, 9, 0xd, &cond, 1);
-		if (ret) {
-			NV_ERROR(dev, "0x%04X: auxch rd fail: %d\n", offset, ret);
-			return 3;
-		}
-
-		if (!(cond & 1))
-			iexec->execute = false;
-	}
-		break;
-	default:
-		NV_WARN(dev, "0x%04X: unknown INIT_3A op: %d\n", offset, cond);
-		break;
-	}
-
-	if (iexec->execute)
-		BIOSLOG(bios, "0x%04X: continuing to execute\n", offset);
-	else
-		BIOSLOG(bios, "0x%04X: skipping following commands\n", offset);
-
-	return 3;
-}
-
-static int
-init_op_3b(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
-{
-	/*
-	 * INIT_3B   opcode: 0x3B ('')
-	 *
-	 * offset      (8 bit): opcode
-	 * offset + 1  (8 bit): crtc index
-	 *
-	 */
-
-	uint8_t or = ffs(bios->display.output->or) - 1;
-	uint8_t index = bios->data[offset + 1];
-	uint8_t data;
-
-	if (!iexec->execute)
-		return 2;
-
-	data = bios_idxprt_rd(bios, 0x3d4, index);
-	bios_idxprt_wr(bios, 0x3d4, index, data & ~(1 << or));
-	return 2;
-}
-
-static int
-init_op_3c(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
-{
-	/*
-	 * INIT_3C   opcode: 0x3C ('')
-	 *
-	 * offset      (8 bit): opcode
-	 * offset + 1  (8 bit): crtc index
-	 *
-	 */
-
-	uint8_t or = ffs(bios->display.output->or) - 1;
-	uint8_t index = bios->data[offset + 1];
-	uint8_t data;
-
-	if (!iexec->execute)
-		return 2;
-
-	data = bios_idxprt_rd(bios, 0x3d4, index);
-	bios_idxprt_wr(bios, 0x3d4, index, data | (1 << or));
-	return 2;
-}
-
-static int
-init_idx_addr_latched(struct nvbios *bios, uint16_t offset,
-		      struct init_exec *iexec)
-{
-	/*
-	 * INIT_INDEX_ADDRESS_LATCHED   opcode: 0x49 ('I')
-	 *
-	 * offset      (8  bit): opcode
-	 * offset + 1  (32 bit): control register
-	 * offset + 5  (32 bit): data register
-	 * offset + 9  (32 bit): mask
-	 * offset + 13 (32 bit): data
-	 * offset + 17 (8  bit): count
-	 * offset + 18 (8  bit): address 1
-	 * offset + 19 (8  bit): data 1
-	 * ...
-	 *
-	 * For each of "count" address and data pairs, write "data n" to
-	 * "data register", read the current value of "control register",
-	 * and write it back once ANDed with "mask", ORed with "data",
-	 * and ORed with "address n"
-	 */
-
-	uint32_t controlreg = ROM32(bios->data[offset + 1]);
-	uint32_t datareg = ROM32(bios->data[offset + 5]);
-	uint32_t mask = ROM32(bios->data[offset + 9]);
-	uint32_t data = ROM32(bios->data[offset + 13]);
-	uint8_t count = bios->data[offset + 17];
-	int len = 18 + count * 2;
-	uint32_t value;
-	int i;
-
-	if (!iexec->execute)
-		return len;
-
-	BIOSLOG(bios, "0x%04X: ControlReg: 0x%08X, DataReg: 0x%08X, "
-		      "Mask: 0x%08X, Data: 0x%08X, Count: 0x%02X\n",
-		offset, controlreg, datareg, mask, data, count);
-
-	for (i = 0; i < count; i++) {
-		uint8_t instaddress = bios->data[offset + 18 + i * 2];
-		uint8_t instdata = bios->data[offset + 19 + i * 2];
-
-		BIOSLOG(bios, "0x%04X: Address: 0x%02X, Data: 0x%02X\n",
-			offset, instaddress, instdata);
-
-		bios_wr32(bios, datareg, instdata);
-		value  = bios_rd32(bios, controlreg) & mask;
-		value |= data;
-		value |= instaddress;
-		bios_wr32(bios, controlreg, value);
-	}
-
-	return len;
-}
-
-static int
-init_io_restrict_pll2(struct nvbios *bios, uint16_t offset,
-		      struct init_exec *iexec)
-{
-	/*
-	 * INIT_IO_RESTRICT_PLL2   opcode: 0x4A ('J')
-	 *
-	 * offset      (8  bit): opcode
-	 * offset + 1  (16 bit): CRTC port
-	 * offset + 3  (8  bit): CRTC index
-	 * offset + 4  (8  bit): mask
-	 * offset + 5  (8  bit): shift
-	 * offset + 6  (8  bit): count
-	 * offset + 7  (32 bit): register
-	 * offset + 11 (32 bit): frequency 1
-	 * ...
-	 *
-	 * Starting at offset + 11 there are "count" 32 bit frequencies (kHz).
-	 * Set PLL register "register" to coefficients for frequency n,
-	 * selected by reading index "CRTC index" of "CRTC port" ANDed with
-	 * "mask" and shifted right by "shift".
-	 */
-
-	uint16_t crtcport = ROM16(bios->data[offset + 1]);
-	uint8_t crtcindex = bios->data[offset + 3];
-	uint8_t mask = bios->data[offset + 4];
-	uint8_t shift = bios->data[offset + 5];
-	uint8_t count = bios->data[offset + 6];
-	uint32_t reg = ROM32(bios->data[offset + 7]);
-	int len = 11 + count * 4;
-	uint8_t config;
-	uint32_t freq;
-
-	if (!iexec->execute)
-		return len;
-
-	BIOSLOG(bios, "0x%04X: Port: 0x%04X, Index: 0x%02X, Mask: 0x%02X, "
-		      "Shift: 0x%02X, Count: 0x%02X, Reg: 0x%08X\n",
-		offset, crtcport, crtcindex, mask, shift, count, reg);
-
-	if (!reg)
-		return len;
-
-	config = (bios_idxprt_rd(bios, crtcport, crtcindex) & mask) >> shift;
-	if (config > count) {
-		NV_ERROR(bios->dev,
-			 "0x%04X: Config 0x%02X exceeds maximal bound 0x%02X\n",
-			 offset, config, count);
-		return len;
-	}
-
-	freq = ROM32(bios->data[offset + 11 + config * 4]);
-
-	BIOSLOG(bios, "0x%04X: Reg: 0x%08X, Config: 0x%02X, Freq: %dkHz\n",
-		offset, reg, config, freq);
-
-	setPLL(bios, reg, freq);
-
-	return len;
-}
-
-static int
-init_pll2(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
-{
-	/*
-	 * INIT_PLL2   opcode: 0x4B ('K')
-	 *
-	 * offset      (8  bit): opcode
-	 * offset + 1  (32 bit): register
-	 * offset + 5  (32 bit): freq
-	 *
-	 * Set PLL register "register" to coefficients for frequency "freq"
-	 */
-
-	uint32_t reg = ROM32(bios->data[offset + 1]);
-	uint32_t freq = ROM32(bios->data[offset + 5]);
-
-	if (!iexec->execute)
-		return 9;
-
-	BIOSLOG(bios, "0x%04X: Reg: 0x%04X, Freq: %dkHz\n",
-		offset, reg, freq);
-
-	setPLL(bios, reg, freq);
-	return 9;
-}
-
-static int
-init_i2c_byte(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
-{
-	/*
-	 * INIT_I2C_BYTE   opcode: 0x4C ('L')
-	 *
-	 * offset      (8 bit): opcode
-	 * offset + 1  (8 bit): DCB I2C table entry index
-	 * offset + 2  (8 bit): I2C slave address
-	 * offset + 3  (8 bit): count
-	 * offset + 4  (8 bit): I2C register 1
-	 * offset + 5  (8 bit): mask 1
-	 * offset + 6  (8 bit): data 1
-	 * ...
-	 *
-	 * For each of "count" registers given by "I2C register n" on the device
-	 * addressed by "I2C slave address" on the I2C bus given by
-	 * "DCB I2C table entry index", read the register, AND the result with
-	 * "mask n" and OR it with "data n" before writing it back to the device
-	 */
-
-	struct drm_device *dev = bios->dev;
-	uint8_t i2c_index = bios->data[offset + 1];
-	uint8_t i2c_address = bios->data[offset + 2] >> 1;
-	uint8_t count = bios->data[offset + 3];
-	struct nouveau_i2c_chan *chan;
-	int len = 4 + count * 3;
-	int ret, i;
-
-	if (!iexec->execute)
-		return len;
-
-	BIOSLOG(bios, "0x%04X: DCBI2CIndex: 0x%02X, I2CAddress: 0x%02X, "
-		      "Count: 0x%02X\n",
-		offset, i2c_index, i2c_address, count);
-
-	chan = init_i2c_device_find(dev, i2c_index);
-	if (!chan) {
-		NV_ERROR(dev, "0x%04X: i2c bus not found\n", offset);
-		return len;
-	}
-
-	for (i = 0; i < count; i++) {
-		uint8_t reg = bios->data[offset + 4 + i * 3];
-		uint8_t mask = bios->data[offset + 5 + i * 3];
-		uint8_t data = bios->data[offset + 6 + i * 3];
-		union i2c_smbus_data val;
-
-		ret = i2c_smbus_xfer(&chan->adapter, i2c_address, 0,
-				     I2C_SMBUS_READ, reg,
-				     I2C_SMBUS_BYTE_DATA, &val);
-		if (ret < 0) {
-			NV_ERROR(dev, "0x%04X: i2c rd fail: %d\n", offset, ret);
-			return len;
-		}
-
-		BIOSLOG(bios, "0x%04X: I2CReg: 0x%02X, Value: 0x%02X, "
-			      "Mask: 0x%02X, Data: 0x%02X\n",
-			offset, reg, val.byte, mask, data);
-
-		if (!bios->execute)
-			continue;
-
-		val.byte &= mask;
-		val.byte |= data;
-		ret = i2c_smbus_xfer(&chan->adapter, i2c_address, 0,
-				     I2C_SMBUS_WRITE, reg,
-				     I2C_SMBUS_BYTE_DATA, &val);
-		if (ret < 0) {
-			NV_ERROR(dev, "0x%04X: i2c wr fail: %d\n", offset, ret);
-			return len;
-		}
-	}
-
-	return len;
-}
-
-static int
-init_zm_i2c_byte(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
-{
-	/*
-	 * INIT_ZM_I2C_BYTE   opcode: 0x4D ('M')
-	 *
-	 * offset      (8 bit): opcode
-	 * offset + 1  (8 bit): DCB I2C table entry index
-	 * offset + 2  (8 bit): I2C slave address
-	 * offset + 3  (8 bit): count
-	 * offset + 4  (8 bit): I2C register 1
-	 * offset + 5  (8 bit): data 1
-	 * ...
-	 *
-	 * For each of "count" registers given by "I2C register n" on the device
-	 * addressed by "I2C slave address" on the I2C bus given by
-	 * "DCB I2C table entry index", set the register to "data n"
-	 */
-
-	struct drm_device *dev = bios->dev;
-	uint8_t i2c_index = bios->data[offset + 1];
-	uint8_t i2c_address = bios->data[offset + 2] >> 1;
-	uint8_t count = bios->data[offset + 3];
-	struct nouveau_i2c_chan *chan;
-	int len = 4 + count * 2;
-	int ret, i;
-
-	if (!iexec->execute)
-		return len;
-
-	BIOSLOG(bios, "0x%04X: DCBI2CIndex: 0x%02X, I2CAddress: 0x%02X, "
-		      "Count: 0x%02X\n",
-		offset, i2c_index, i2c_address, count);
-
-	chan = init_i2c_device_find(dev, i2c_index);
-	if (!chan) {
-		NV_ERROR(dev, "0x%04X: i2c bus not found\n", offset);
-		return len;
-	}
-
-	for (i = 0; i < count; i++) {
-		uint8_t reg = bios->data[offset + 4 + i * 2];
-		union i2c_smbus_data val;
-
-		val.byte = bios->data[offset + 5 + i * 2];
-
-		BIOSLOG(bios, "0x%04X: I2CReg: 0x%02X, Data: 0x%02X\n",
-			offset, reg, val.byte);
-
-		if (!bios->execute)
-			continue;
-
-		ret = i2c_smbus_xfer(&chan->adapter, i2c_address, 0,
-				     I2C_SMBUS_WRITE, reg,
-				     I2C_SMBUS_BYTE_DATA, &val);
-		if (ret < 0) {
-			NV_ERROR(dev, "0x%04X: i2c wr fail: %d\n", offset, ret);
-			return len;
-		}
-	}
-
-	return len;
-}
-
-static int
-init_zm_i2c(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
-{
-	/*
-	 * INIT_ZM_I2C   opcode: 0x4E ('N')
-	 *
-	 * offset      (8 bit): opcode
-	 * offset + 1  (8 bit): DCB I2C table entry index
-	 * offset + 2  (8 bit): I2C slave address
-	 * offset + 3  (8 bit): count
-	 * offset + 4  (8 bit): data 1
-	 * ...
-	 *
-	 * Send "count" bytes ("data n") to the device addressed by "I2C slave
-	 * address" on the I2C bus given by "DCB I2C table entry index"
-	 */
-
-	struct drm_device *dev = bios->dev;
-	uint8_t i2c_index = bios->data[offset + 1];
-	uint8_t i2c_address = bios->data[offset + 2] >> 1;
-	uint8_t count = bios->data[offset + 3];
-	int len = 4 + count;
-	struct nouveau_i2c_chan *chan;
-	struct i2c_msg msg;
-	uint8_t data[256];
-	int ret, i;
-
-	if (!iexec->execute)
-		return len;
-
-	BIOSLOG(bios, "0x%04X: DCBI2CIndex: 0x%02X, I2CAddress: 0x%02X, "
-		      "Count: 0x%02X\n",
-		offset, i2c_index, i2c_address, count);
-
-	chan = init_i2c_device_find(dev, i2c_index);
-	if (!chan) {
-		NV_ERROR(dev, "0x%04X: i2c bus not found\n", offset);
-		return len;
-	}
-
-	for (i = 0; i < count; i++) {
-		data[i] = bios->data[offset + 4 + i];
-
-		BIOSLOG(bios, "0x%04X: Data: 0x%02X\n", offset, data[i]);
-	}
-
-	if (bios->execute) {
-		msg.addr = i2c_address;
-		msg.flags = 0;
-		msg.len = count;
-		msg.buf = data;
-		ret = i2c_transfer(&chan->adapter, &msg, 1);
-		if (ret != 1) {
-			NV_ERROR(dev, "0x%04X: i2c wr fail: %d\n", offset, ret);
-			return len;
-		}
-	}
-
-	return len;
-}
-
-static int
-init_tmds(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
-{
-	/*
-	 * INIT_TMDS   opcode: 0x4F ('O')	(non-canon name)
-	 *
-	 * offset      (8 bit): opcode
-	 * offset + 1  (8 bit): magic lookup value
-	 * offset + 2  (8 bit): TMDS address
-	 * offset + 3  (8 bit): mask
-	 * offset + 4  (8 bit): data
-	 *
-	 * Read the data reg for TMDS address "TMDS address", AND it with mask
-	 * and OR it with data, then write it back
-	 * "magic lookup value" determines which TMDS base address register is
-	 * used -- see get_tmds_index_reg()
-	 */
-
-	struct drm_device *dev = bios->dev;
-	uint8_t mlv = bios->data[offset + 1];
-	uint32_t tmdsaddr = bios->data[offset + 2];
-	uint8_t mask = bios->data[offset + 3];
-	uint8_t data = bios->data[offset + 4];
-	uint32_t reg, value;
-
-	if (!iexec->execute)
-		return 5;
-
-	BIOSLOG(bios, "0x%04X: MagicLookupValue: 0x%02X, TMDSAddr: 0x%02X, "
-		      "Mask: 0x%02X, Data: 0x%02X\n",
-		offset, mlv, tmdsaddr, mask, data);
-
-	reg = get_tmds_index_reg(bios->dev, mlv);
-	if (!reg) {
-		NV_ERROR(dev, "0x%04X: no tmds_index_reg\n", offset);
-		return 5;
-	}
-
-	bios_wr32(bios, reg,
-		  tmdsaddr | NV_PRAMDAC_FP_TMDS_CONTROL_WRITE_DISABLE);
-	value = (bios_rd32(bios, reg + 4) & mask) | data;
-	bios_wr32(bios, reg + 4, value);
-	bios_wr32(bios, reg, tmdsaddr);
-
-	return 5;
-}
-
-static int
-init_zm_tmds_group(struct nvbios *bios, uint16_t offset,
-		   struct init_exec *iexec)
-{
-	/*
-	 * INIT_ZM_TMDS_GROUP   opcode: 0x50 ('P')	(non-canon name)
-	 *
-	 * offset      (8 bit): opcode
-	 * offset + 1  (8 bit): magic lookup value
-	 * offset + 2  (8 bit): count
-	 * offset + 3  (8 bit): addr 1
-	 * offset + 4  (8 bit): data 1
-	 * ...
-	 *
-	 * For each of "count" TMDS address and data pairs write "data n" to
-	 * "addr n".  "magic lookup value" determines which TMDS base address
-	 * register is used -- see get_tmds_index_reg()
-	 */
-
-	struct drm_device *dev = bios->dev;
-	uint8_t mlv = bios->data[offset + 1];
-	uint8_t count = bios->data[offset + 2];
-	int len = 3 + count * 2;
-	uint32_t reg;
-	int i;
-
-	if (!iexec->execute)
-		return len;
-
-	BIOSLOG(bios, "0x%04X: MagicLookupValue: 0x%02X, Count: 0x%02X\n",
-		offset, mlv, count);
-
-	reg = get_tmds_index_reg(bios->dev, mlv);
-	if (!reg) {
-		NV_ERROR(dev, "0x%04X: no tmds_index_reg\n", offset);
-		return len;
-	}
-
-	for (i = 0; i < count; i++) {
-		uint8_t tmdsaddr = bios->data[offset + 3 + i * 2];
-		uint8_t tmdsdata = bios->data[offset + 4 + i * 2];
-
-		bios_wr32(bios, reg + 4, tmdsdata);
-		bios_wr32(bios, reg, tmdsaddr);
-	}
-
-	return len;
-}
-
-static int
-init_cr_idx_adr_latch(struct nvbios *bios, uint16_t offset,
-		      struct init_exec *iexec)
-{
-	/*
-	 * INIT_CR_INDEX_ADDRESS_LATCHED   opcode: 0x51 ('Q')
-	 *
-	 * offset      (8 bit): opcode
-	 * offset + 1  (8 bit): CRTC index1
-	 * offset + 2  (8 bit): CRTC index2
-	 * offset + 3  (8 bit): baseaddr
-	 * offset + 4  (8 bit): count
-	 * offset + 5  (8 bit): data 1
-	 * ...
-	 *
-	 * For each of "count" address and data pairs, write "baseaddr + n" to
-	 * "CRTC index1" and "data n" to "CRTC index2"
-	 * Once complete, restore initial value read from "CRTC index1"
-	 */
-	uint8_t crtcindex1 = bios->data[offset + 1];
-	uint8_t crtcindex2 = bios->data[offset + 2];
-	uint8_t baseaddr = bios->data[offset + 3];
-	uint8_t count = bios->data[offset + 4];
-	int len = 5 + count;
-	uint8_t oldaddr, data;
-	int i;
-
-	if (!iexec->execute)
-		return len;
-
-	BIOSLOG(bios, "0x%04X: Index1: 0x%02X, Index2: 0x%02X, "
-		      "BaseAddr: 0x%02X, Count: 0x%02X\n",
-		offset, crtcindex1, crtcindex2, baseaddr, count);
-
-	oldaddr = bios_idxprt_rd(bios, NV_CIO_CRX__COLOR, crtcindex1);
-
-	for (i = 0; i < count; i++) {
-		bios_idxprt_wr(bios, NV_CIO_CRX__COLOR, crtcindex1,
-				     baseaddr + i);
-		data = bios->data[offset + 5 + i];
-		bios_idxprt_wr(bios, NV_CIO_CRX__COLOR, crtcindex2, data);
-	}
-
-	bios_idxprt_wr(bios, NV_CIO_CRX__COLOR, crtcindex1, oldaddr);
-
-	return len;
-}
-
-static int
-init_cr(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
-{
-	/*
-	 * INIT_CR   opcode: 0x52 ('R')
-	 *
-	 * offset      (8  bit): opcode
-	 * offset + 1  (8  bit): CRTC index
-	 * offset + 2  (8  bit): mask
-	 * offset + 3  (8  bit): data
-	 *
-	 * Assign the value of at "CRTC index" ANDed with mask and ORed with
-	 * data back to "CRTC index"
-	 */
-
-	uint8_t crtcindex = bios->data[offset + 1];
-	uint8_t mask = bios->data[offset + 2];
-	uint8_t data = bios->data[offset + 3];
-	uint8_t value;
-
-	if (!iexec->execute)
-		return 4;
-
-	BIOSLOG(bios, "0x%04X: Index: 0x%02X, Mask: 0x%02X, Data: 0x%02X\n",
-		offset, crtcindex, mask, data);
-
-	value  = bios_idxprt_rd(bios, NV_CIO_CRX__COLOR, crtcindex) & mask;
-	value |= data;
-	bios_idxprt_wr(bios, NV_CIO_CRX__COLOR, crtcindex, value);
-
-	return 4;
-}
-
-static int
-init_zm_cr(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
-{
-	/*
-	 * INIT_ZM_CR   opcode: 0x53 ('S')
-	 *
-	 * offset      (8 bit): opcode
-	 * offset + 1  (8 bit): CRTC index
-	 * offset + 2  (8 bit): value
-	 *
-	 * Assign "value" to CRTC register with index "CRTC index".
-	 */
-
-	uint8_t crtcindex = ROM32(bios->data[offset + 1]);
-	uint8_t data = bios->data[offset + 2];
-
-	if (!iexec->execute)
-		return 3;
-
-	bios_idxprt_wr(bios, NV_CIO_CRX__COLOR, crtcindex, data);
-
-	return 3;
-}
-
-static int
-init_zm_cr_group(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
-{
-	/*
-	 * INIT_ZM_CR_GROUP   opcode: 0x54 ('T')
-	 *
-	 * offset      (8 bit): opcode
-	 * offset + 1  (8 bit): count
-	 * offset + 2  (8 bit): CRTC index 1
-	 * offset + 3  (8 bit): value 1
-	 * ...
-	 *
-	 * For "count", assign "value n" to CRTC register with index
-	 * "CRTC index n".
-	 */
-
-	uint8_t count = bios->data[offset + 1];
-	int len = 2 + count * 2;
-	int i;
-
-	if (!iexec->execute)
-		return len;
-
-	for (i = 0; i < count; i++)
-		init_zm_cr(bios, offset + 2 + 2 * i - 1, iexec);
-
-	return len;
-}
-
-static int
-init_condition_time(struct nvbios *bios, uint16_t offset,
-		    struct init_exec *iexec)
-{
-	/*
-	 * INIT_CONDITION_TIME   opcode: 0x56 ('V')
-	 *
-	 * offset      (8 bit): opcode
-	 * offset + 1  (8 bit): condition number
-	 * offset + 2  (8 bit): retries / 50
-	 *
-	 * Check condition "condition number" in the condition table.
-	 * Bios code then sleeps for 2ms if the condition is not met, and
-	 * repeats up to "retries" times, but on one C51 this has proved
-	 * insufficient.  In mmiotraces the driver sleeps for 20ms, so we do
-	 * this, and bail after "retries" times, or 2s, whichever is less.
-	 * If still not met after retries, clear execution flag for this table.
-	 */
-
-	uint8_t cond = bios->data[offset + 1];
-	uint16_t retries = bios->data[offset + 2] * 50;
-	unsigned cnt;
-
-	if (!iexec->execute)
-		return 3;
-
-	if (retries > 100)
-		retries = 100;
-
-	BIOSLOG(bios, "0x%04X: Condition: 0x%02X, Retries: 0x%02X\n",
-		offset, cond, retries);
-
-	if (!bios->execute) /* avoid 2s delays when "faking" execution */
-		retries = 1;
-
-	for (cnt = 0; cnt < retries; cnt++) {
-		if (bios_condition_met(bios, offset, cond)) {
-			BIOSLOG(bios, "0x%04X: Condition met, continuing\n",
-								offset);
-			break;
-		} else {
-			BIOSLOG(bios, "0x%04X: "
-				"Condition not met, sleeping for 20ms\n",
-								offset);
-			mdelay(20);
-		}
-	}
-
-	if (!bios_condition_met(bios, offset, cond)) {
-		NV_WARN(bios->dev,
-			"0x%04X: Condition still not met after %dms, "
-			"skipping following opcodes\n", offset, 20 * retries);
-		iexec->execute = false;
-	}
-
-	return 3;
-}
-
-static int
-init_ltime(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
-{
-	/*
-	 * INIT_LTIME   opcode: 0x57 ('V')
-	 *
-	 * offset      (8  bit): opcode
-	 * offset + 1  (16 bit): time
-	 *
-	 * Sleep for "time" milliseconds.
-	 */
-
-	unsigned time = ROM16(bios->data[offset + 1]);
-
-	if (!iexec->execute)
-		return 3;
-
-	BIOSLOG(bios, "0x%04X: Sleeping for 0x%04X milliseconds\n",
-		offset, time);
-
-	mdelay(time);
-
-	return 3;
-}
-
-static int
-init_zm_reg_sequence(struct nvbios *bios, uint16_t offset,
-		     struct init_exec *iexec)
-{
-	/*
-	 * INIT_ZM_REG_SEQUENCE   opcode: 0x58 ('X')
-	 *
-	 * offset      (8  bit): opcode
-	 * offset + 1  (32 bit): base register
-	 * offset + 5  (8  bit): count
-	 * offset + 6  (32 bit): value 1
-	 * ...
-	 *
-	 * Starting at offset + 6 there are "count" 32 bit values.
-	 * For "count" iterations set "base register" + 4 * current_iteration
-	 * to "value current_iteration"
-	 */
-
-	uint32_t basereg = ROM32(bios->data[offset + 1]);
-	uint32_t count = bios->data[offset + 5];
-	int len = 6 + count * 4;
-	int i;
-
-	if (!iexec->execute)
-		return len;
-
-	BIOSLOG(bios, "0x%04X: BaseReg: 0x%08X, Count: 0x%02X\n",
-		offset, basereg, count);
-
-	for (i = 0; i < count; i++) {
-		uint32_t reg = basereg + i * 4;
-		uint32_t data = ROM32(bios->data[offset + 6 + i * 4]);
-
-		bios_wr32(bios, reg, data);
-	}
-
-	return len;
-}
-
-static int
-init_sub_direct(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
-{
-	/*
-	 * INIT_SUB_DIRECT   opcode: 0x5B ('[')
-	 *
-	 * offset      (8  bit): opcode
-	 * offset + 1  (16 bit): subroutine offset (in bios)
-	 *
-	 * Calls a subroutine that will execute commands until INIT_DONE
-	 * is found.
-	 */
-
-	uint16_t sub_offset = ROM16(bios->data[offset + 1]);
-
-	if (!iexec->execute)
-		return 3;
-
-	BIOSLOG(bios, "0x%04X: Executing subroutine at 0x%04X\n",
-		offset, sub_offset);
-
-	parse_init_table(bios, sub_offset, iexec);
-
-	BIOSLOG(bios, "0x%04X: End of 0x%04X subroutine\n", offset, sub_offset);
-
-	return 3;
-}
-
-static int
-init_jump(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
-{
-	/*
-	 * INIT_JUMP   opcode: 0x5C ('\')
-	 *
-	 * offset      (8  bit): opcode
-	 * offset + 1  (16 bit): offset (in bios)
-	 *
-	 * Continue execution of init table from 'offset'
-	 */
-
-	uint16_t jmp_offset = ROM16(bios->data[offset + 1]);
-
-	if (!iexec->execute)
-		return 3;
-
-	BIOSLOG(bios, "0x%04X: Jump to 0x%04X\n", offset, jmp_offset);
-	return jmp_offset - offset;
-}
-
-static int
-init_i2c_if(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
-{
-	/*
-	 * INIT_I2C_IF   opcode: 0x5E ('^')
-	 *
-	 * offset      (8 bit): opcode
-	 * offset + 1  (8 bit): DCB I2C table entry index
-	 * offset + 2  (8 bit): I2C slave address
-	 * offset + 3  (8 bit): I2C register
-	 * offset + 4  (8 bit): mask
-	 * offset + 5  (8 bit): data
-	 *
-	 * Read the register given by "I2C register" on the device addressed
-	 * by "I2C slave address" on the I2C bus given by "DCB I2C table
-	 * entry index". Compare the result AND "mask" to "data".
-	 * If they're not equal, skip subsequent opcodes until condition is
-	 * inverted (INIT_NOT), or we hit INIT_RESUME
-	 */
-
-	uint8_t i2c_index = bios->data[offset + 1];
-	uint8_t i2c_address = bios->data[offset + 2] >> 1;
-	uint8_t reg = bios->data[offset + 3];
-	uint8_t mask = bios->data[offset + 4];
-	uint8_t data = bios->data[offset + 5];
-	struct nouveau_i2c_chan *chan;
-	union i2c_smbus_data val;
-	int ret;
-
-	/* no execute check by design */
-
-	BIOSLOG(bios, "0x%04X: DCBI2CIndex: 0x%02X, I2CAddress: 0x%02X\n",
-		offset, i2c_index, i2c_address);
-
-	chan = init_i2c_device_find(bios->dev, i2c_index);
-	if (!chan)
-		return -ENODEV;
-
-	ret = i2c_smbus_xfer(&chan->adapter, i2c_address, 0,
-			     I2C_SMBUS_READ, reg,
-			     I2C_SMBUS_BYTE_DATA, &val);
-	if (ret < 0) {
-		BIOSLOG(bios, "0x%04X: I2CReg: 0x%02X, Value: [no device], "
-			      "Mask: 0x%02X, Data: 0x%02X\n",
-			offset, reg, mask, data);
-		iexec->execute = 0;
-		return 6;
-	}
-
-	BIOSLOG(bios, "0x%04X: I2CReg: 0x%02X, Value: 0x%02X, "
-		      "Mask: 0x%02X, Data: 0x%02X\n",
-		offset, reg, val.byte, mask, data);
-
-	iexec->execute = ((val.byte & mask) == data);
-
-	return 6;
-}
-
-static int
-init_copy_nv_reg(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
-{
-	/*
-	 * INIT_COPY_NV_REG   opcode: 0x5F ('_')
-	 *
-	 * offset      (8  bit): opcode
-	 * offset + 1  (32 bit): src reg
-	 * offset + 5  (8  bit): shift
-	 * offset + 6  (32 bit): src mask
-	 * offset + 10 (32 bit): xor
-	 * offset + 14 (32 bit): dst reg
-	 * offset + 18 (32 bit): dst mask
-	 *
-	 * Shift REGVAL("src reg") right by (signed) "shift", AND result with
-	 * "src mask", then XOR with "xor". Write this OR'd with
-	 * (REGVAL("dst reg") AND'd with "dst mask") to "dst reg"
-	 */
-
-	uint32_t srcreg = *((uint32_t *)(&bios->data[offset + 1]));
-	uint8_t shift = bios->data[offset + 5];
-	uint32_t srcmask = *((uint32_t *)(&bios->data[offset + 6]));
-	uint32_t xor = *((uint32_t *)(&bios->data[offset + 10]));
-	uint32_t dstreg = *((uint32_t *)(&bios->data[offset + 14]));
-	uint32_t dstmask = *((uint32_t *)(&bios->data[offset + 18]));
-	uint32_t srcvalue, dstvalue;
-
-	if (!iexec->execute)
-		return 22;
-
-	BIOSLOG(bios, "0x%04X: SrcReg: 0x%08X, Shift: 0x%02X, SrcMask: 0x%08X, "
-		      "Xor: 0x%08X, DstReg: 0x%08X, DstMask: 0x%08X\n",
-		offset, srcreg, shift, srcmask, xor, dstreg, dstmask);
-
-	srcvalue = bios_rd32(bios, srcreg);
-
-	if (shift < 0x80)
-		srcvalue >>= shift;
-	else
-		srcvalue <<= (0x100 - shift);
-
-	srcvalue = (srcvalue & srcmask) ^ xor;
-
-	dstvalue = bios_rd32(bios, dstreg) & dstmask;
-
-	bios_wr32(bios, dstreg, dstvalue | srcvalue);
-
-	return 22;
-}
-
-static int
-init_zm_index_io(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
-{
-	/*
-	 * INIT_ZM_INDEX_IO   opcode: 0x62 ('b')
-	 *
-	 * offset      (8  bit): opcode
-	 * offset + 1  (16 bit): CRTC port
-	 * offset + 3  (8  bit): CRTC index
-	 * offset + 4  (8  bit): data
-	 *
-	 * Write "data" to index "CRTC index" of "CRTC port"
-	 */
-	uint16_t crtcport = ROM16(bios->data[offset + 1]);
-	uint8_t crtcindex = bios->data[offset + 3];
-	uint8_t data = bios->data[offset + 4];
-
-	if (!iexec->execute)
-		return 5;
-
-	bios_idxprt_wr(bios, crtcport, crtcindex, data);
-
-	return 5;
-}
-
-static inline void
-bios_md32(struct nvbios *bios, uint32_t reg,
-	  uint32_t mask, uint32_t val)
-{
-	bios_wr32(bios, reg, (bios_rd32(bios, reg) & ~mask) | val);
-}
-
-static uint32_t
-peek_fb(struct drm_device *dev, struct io_mapping *fb,
-	uint32_t off)
-{
-	uint32_t val = 0;
-
-	if (off < pci_resource_len(dev->pdev, 1)) {
-		uint8_t __iomem *p =
-			io_mapping_map_atomic_wc(fb, off & PAGE_MASK);
-
-		val = ioread32(p + (off & ~PAGE_MASK));
-
-		io_mapping_unmap_atomic(p);
-	}
-
-	return val;
-}
-
-static void
-poke_fb(struct drm_device *dev, struct io_mapping *fb,
-	uint32_t off, uint32_t val)
-{
-	if (off < pci_resource_len(dev->pdev, 1)) {
-		uint8_t __iomem *p =
-			io_mapping_map_atomic_wc(fb, off & PAGE_MASK);
-
-		iowrite32(val, p + (off & ~PAGE_MASK));
-		wmb();
-
-		io_mapping_unmap_atomic(p);
-	}
-}
-
-static inline bool
-read_back_fb(struct drm_device *dev, struct io_mapping *fb,
-	     uint32_t off, uint32_t val)
-{
-	poke_fb(dev, fb, off, val);
-	return val == peek_fb(dev, fb, off);
-}
-
-static int
-nv04_init_compute_mem(struct nvbios *bios)
-{
-	struct drm_device *dev = bios->dev;
-	uint32_t patt = 0xdeadbeef;
-	struct io_mapping *fb;
-	int i;
-
-	/* Map the framebuffer aperture */
-	fb = io_mapping_create_wc(pci_resource_start(dev->pdev, 1),
-				  pci_resource_len(dev->pdev, 1));
-	if (!fb)
-		return -ENOMEM;
-
-	/* Sequencer and refresh off */
-	NVWriteVgaSeq(dev, 0, 1, NVReadVgaSeq(dev, 0, 1) | 0x20);
-	bios_md32(bios, NV04_PFB_DEBUG_0, 0, NV04_PFB_DEBUG_0_REFRESH_OFF);
-
-	bios_md32(bios, NV04_PFB_BOOT_0, ~0,
-		  NV04_PFB_BOOT_0_RAM_AMOUNT_16MB |
-		  NV04_PFB_BOOT_0_RAM_WIDTH_128 |
-		  NV04_PFB_BOOT_0_RAM_TYPE_SGRAM_16MBIT);
-
-	for (i = 0; i < 4; i++)
-		poke_fb(dev, fb, 4 * i, patt);
-
-	poke_fb(dev, fb, 0x400000, patt + 1);
-
-	if (peek_fb(dev, fb, 0) == patt + 1) {
-		bios_md32(bios, NV04_PFB_BOOT_0, NV04_PFB_BOOT_0_RAM_TYPE,
-			  NV04_PFB_BOOT_0_RAM_TYPE_SDRAM_16MBIT);
-		bios_md32(bios, NV04_PFB_DEBUG_0,
-			  NV04_PFB_DEBUG_0_REFRESH_OFF, 0);
-
-		for (i = 0; i < 4; i++)
-			poke_fb(dev, fb, 4 * i, patt);
-
-		if ((peek_fb(dev, fb, 0xc) & 0xffff) != (patt & 0xffff))
-			bios_md32(bios, NV04_PFB_BOOT_0,
-				  NV04_PFB_BOOT_0_RAM_WIDTH_128 |
-				  NV04_PFB_BOOT_0_RAM_AMOUNT,
-				  NV04_PFB_BOOT_0_RAM_AMOUNT_8MB);
-
-	} else if ((peek_fb(dev, fb, 0xc) & 0xffff0000) !=
-		   (patt & 0xffff0000)) {
-		bios_md32(bios, NV04_PFB_BOOT_0,
-			  NV04_PFB_BOOT_0_RAM_WIDTH_128 |
-			  NV04_PFB_BOOT_0_RAM_AMOUNT,
-			  NV04_PFB_BOOT_0_RAM_AMOUNT_4MB);
-
-	} else if (peek_fb(dev, fb, 0) != patt) {
-		if (read_back_fb(dev, fb, 0x800000, patt))
-			bios_md32(bios, NV04_PFB_BOOT_0,
-				  NV04_PFB_BOOT_0_RAM_AMOUNT,
-				  NV04_PFB_BOOT_0_RAM_AMOUNT_8MB);
-		else
-			bios_md32(bios, NV04_PFB_BOOT_0,
-				  NV04_PFB_BOOT_0_RAM_AMOUNT,
-				  NV04_PFB_BOOT_0_RAM_AMOUNT_4MB);
-
-		bios_md32(bios, NV04_PFB_BOOT_0, NV04_PFB_BOOT_0_RAM_TYPE,
-			  NV04_PFB_BOOT_0_RAM_TYPE_SGRAM_8MBIT);
-
-	} else if (!read_back_fb(dev, fb, 0x800000, patt)) {
-		bios_md32(bios, NV04_PFB_BOOT_0, NV04_PFB_BOOT_0_RAM_AMOUNT,
-			  NV04_PFB_BOOT_0_RAM_AMOUNT_8MB);
-
-	}
-
-	/* Refresh on, sequencer on */
-	bios_md32(bios, NV04_PFB_DEBUG_0, NV04_PFB_DEBUG_0_REFRESH_OFF, 0);
-	NVWriteVgaSeq(dev, 0, 1, NVReadVgaSeq(dev, 0, 1) & ~0x20);
-
-	io_mapping_free(fb);
-	return 0;
-}
-
-static const uint8_t *
-nv05_memory_config(struct nvbios *bios)
-{
-	/* Defaults for BIOSes lacking a memory config table */
-	static const uint8_t default_config_tab[][2] = {
-		{ 0x24, 0x00 },
-		{ 0x28, 0x00 },
-		{ 0x24, 0x01 },
-		{ 0x1f, 0x00 },
-		{ 0x0f, 0x00 },
-		{ 0x17, 0x00 },
-		{ 0x06, 0x00 },
-		{ 0x00, 0x00 }
-	};
-	int i = (bios_rd32(bios, NV_PEXTDEV_BOOT_0) &
-		 NV_PEXTDEV_BOOT_0_RAMCFG) >> 2;
-
-	if (bios->legacy.mem_init_tbl_ptr)
-		return &bios->data[bios->legacy.mem_init_tbl_ptr + 2 * i];
-	else
-		return default_config_tab[i];
-}
-
-static int
-nv05_init_compute_mem(struct nvbios *bios)
-{
-	struct drm_device *dev = bios->dev;
-	const uint8_t *ramcfg = nv05_memory_config(bios);
-	uint32_t patt = 0xdeadbeef;
-	struct io_mapping *fb;
-	int i, v;
-
-	/* Map the framebuffer aperture */
-	fb = io_mapping_create_wc(pci_resource_start(dev->pdev, 1),
-				  pci_resource_len(dev->pdev, 1));
-	if (!fb)
-		return -ENOMEM;
-
-	/* Sequencer off */
-	NVWriteVgaSeq(dev, 0, 1, NVReadVgaSeq(dev, 0, 1) | 0x20);
-
-	if (bios_rd32(bios, NV04_PFB_BOOT_0) & NV04_PFB_BOOT_0_UMA_ENABLE)
-		goto out;
-
-	bios_md32(bios, NV04_PFB_DEBUG_0, NV04_PFB_DEBUG_0_REFRESH_OFF, 0);
-
-	/* If present load the hardcoded scrambling table */
-	if (bios->legacy.mem_init_tbl_ptr) {
-		uint32_t *scramble_tab = (uint32_t *)&bios->data[
-			bios->legacy.mem_init_tbl_ptr + 0x10];
-
-		for (i = 0; i < 8; i++)
-			bios_wr32(bios, NV04_PFB_SCRAMBLE(i),
-				  ROM32(scramble_tab[i]));
-	}
-
-	/* Set memory type/width/length defaults depending on the straps */
-	bios_md32(bios, NV04_PFB_BOOT_0, 0x3f, ramcfg[0]);
-
-	if (ramcfg[1] & 0x80)
-		bios_md32(bios, NV04_PFB_CFG0, 0, NV04_PFB_CFG0_SCRAMBLE);
-
-	bios_md32(bios, NV04_PFB_CFG1, 0x700001, (ramcfg[1] & 1) << 20);
-	bios_md32(bios, NV04_PFB_CFG1, 0, 1);
-
-	/* Probe memory bus width */
-	for (i = 0; i < 4; i++)
-		poke_fb(dev, fb, 4 * i, patt);
-
-	if (peek_fb(dev, fb, 0xc) != patt)
-		bios_md32(bios, NV04_PFB_BOOT_0,
-			  NV04_PFB_BOOT_0_RAM_WIDTH_128, 0);
-
-	/* Probe memory length */
-	v = bios_rd32(bios, NV04_PFB_BOOT_0) & NV04_PFB_BOOT_0_RAM_AMOUNT;
-
-	if (v == NV04_PFB_BOOT_0_RAM_AMOUNT_32MB &&
-	    (!read_back_fb(dev, fb, 0x1000000, ++patt) ||
-	     !read_back_fb(dev, fb, 0, ++patt)))
-		bios_md32(bios, NV04_PFB_BOOT_0, NV04_PFB_BOOT_0_RAM_AMOUNT,
-			  NV04_PFB_BOOT_0_RAM_AMOUNT_16MB);
-
-	if (v == NV04_PFB_BOOT_0_RAM_AMOUNT_16MB &&
-	    !read_back_fb(dev, fb, 0x800000, ++patt))
-		bios_md32(bios, NV04_PFB_BOOT_0, NV04_PFB_BOOT_0_RAM_AMOUNT,
-			  NV04_PFB_BOOT_0_RAM_AMOUNT_8MB);
-
-	if (!read_back_fb(dev, fb, 0x400000, ++patt))
-		bios_md32(bios, NV04_PFB_BOOT_0, NV04_PFB_BOOT_0_RAM_AMOUNT,
-			  NV04_PFB_BOOT_0_RAM_AMOUNT_4MB);
-
-out:
-	/* Sequencer on */
-	NVWriteVgaSeq(dev, 0, 1, NVReadVgaSeq(dev, 0, 1) & ~0x20);
-
-	io_mapping_free(fb);
-	return 0;
-}
-
-static int
-nv10_init_compute_mem(struct nvbios *bios)
-{
-	struct drm_device *dev = bios->dev;
-	struct drm_nouveau_private *dev_priv = bios->dev->dev_private;
-	const int mem_width[] = { 0x10, 0x00, 0x20 };
-	const int mem_width_count = (dev_priv->chipset >= 0x17 ? 3 : 2);
-	uint32_t patt = 0xdeadbeef;
-	struct io_mapping *fb;
-	int i, j, k;
-
-	/* Map the framebuffer aperture */
-	fb = io_mapping_create_wc(pci_resource_start(dev->pdev, 1),
-				  pci_resource_len(dev->pdev, 1));
-	if (!fb)
-		return -ENOMEM;
-
-	bios_wr32(bios, NV10_PFB_REFCTRL, NV10_PFB_REFCTRL_VALID_1);
-
-	/* Probe memory bus width */
-	for (i = 0; i < mem_width_count; i++) {
-		bios_md32(bios, NV04_PFB_CFG0, 0x30, mem_width[i]);
-
-		for (j = 0; j < 4; j++) {
-			for (k = 0; k < 4; k++)
-				poke_fb(dev, fb, 0x1c, 0);
-
-			poke_fb(dev, fb, 0x1c, patt);
-			poke_fb(dev, fb, 0x3c, 0);
-
-			if (peek_fb(dev, fb, 0x1c) == patt)
-				goto mem_width_found;
-		}
-	}
-
-mem_width_found:
-	patt <<= 1;
-
-	/* Probe amount of installed memory */
-	for (i = 0; i < 4; i++) {
-		int off = bios_rd32(bios, NV04_PFB_FIFO_DATA) - 0x100000;
-
-		poke_fb(dev, fb, off, patt);
-		poke_fb(dev, fb, 0, 0);
-
-		peek_fb(dev, fb, 0);
-		peek_fb(dev, fb, 0);
-		peek_fb(dev, fb, 0);
-		peek_fb(dev, fb, 0);
-
-		if (peek_fb(dev, fb, off) == patt)
-			goto amount_found;
-	}
-
-	/* IC missing - disable the upper half memory space. */
-	bios_md32(bios, NV04_PFB_CFG0, 0x1000, 0);
-
-amount_found:
-	io_mapping_free(fb);
-	return 0;
-}
-
-static int
-nv20_init_compute_mem(struct nvbios *bios)
-{
-	struct drm_device *dev = bios->dev;
-	struct drm_nouveau_private *dev_priv = bios->dev->dev_private;
-	uint32_t mask = (dev_priv->chipset >= 0x25 ? 0x300 : 0x900);
-	uint32_t amount, off;
-	struct io_mapping *fb;
-
-	/* Map the framebuffer aperture */
-	fb = io_mapping_create_wc(pci_resource_start(dev->pdev, 1),
-				  pci_resource_len(dev->pdev, 1));
-	if (!fb)
-		return -ENOMEM;
-
-	bios_wr32(bios, NV10_PFB_REFCTRL, NV10_PFB_REFCTRL_VALID_1);
-
-	/* Allow full addressing */
-	bios_md32(bios, NV04_PFB_CFG0, 0, mask);
-
-	amount = bios_rd32(bios, NV04_PFB_FIFO_DATA);
-	for (off = amount; off > 0x2000000; off -= 0x2000000)
-		poke_fb(dev, fb, off - 4, off);
-
-	amount = bios_rd32(bios, NV04_PFB_FIFO_DATA);
-	if (amount != peek_fb(dev, fb, amount - 4))
-		/* IC missing - disable the upper half memory space. */
-		bios_md32(bios, NV04_PFB_CFG0, mask, 0);
-
-	io_mapping_free(fb);
-	return 0;
-}
-
-static int
-init_compute_mem(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
-{
-	/*
-	 * INIT_COMPUTE_MEM   opcode: 0x63 ('c')
-	 *
-	 * offset      (8 bit): opcode
-	 *
-	 * This opcode is meant to set the PFB memory config registers
-	 * appropriately so that we can correctly calculate how much VRAM it
-	 * has (on nv10 and better chipsets the amount of installed VRAM is
-	 * subsequently reported in NV_PFB_CSTATUS (0x10020C)).
-	 *
-	 * The implementation of this opcode in general consists of several
-	 * parts:
-	 *
-	 * 1) Determination of memory type and density. Only necessary for
-	 *    really old chipsets, the memory type reported by the strap bits
-	 *    (0x101000) is assumed to be accurate on nv05 and newer.
-	 *
-	 * 2) Determination of the memory bus width. Usually done by a cunning
-	 *    combination of writes to offsets 0x1c and 0x3c in the fb, and
-	 *    seeing whether the written values are read back correctly.
-	 *
-	 *    Only necessary on nv0x-nv1x and nv34, on the other cards we can
-	 *    trust the straps.
-	 *
-	 * 3) Determination of how many of the card's RAM pads have ICs
-	 *    attached, usually done by a cunning combination of writes to an
-	 *    offset slightly less than the maximum memory reported by
-	 *    NV_PFB_CSTATUS, then seeing if the test pattern can be read back.
-	 *
-	 * This appears to be a NOP on IGPs and NV4x or newer chipsets, both io
-	 * logs of the VBIOS and kmmio traces of the binary driver POSTing the
-	 * card show nothing being done for this opcode. Why is it still listed
-	 * in the table?!
-	 */
-
-	/* no iexec->execute check by design */
-
-	struct drm_nouveau_private *dev_priv = bios->dev->dev_private;
-	int ret;
-
-	if (dev_priv->chipset >= 0x40 ||
-	    dev_priv->chipset == 0x1a ||
-	    dev_priv->chipset == 0x1f)
-		ret = 0;
-	else if (dev_priv->chipset >= 0x20 &&
-		 dev_priv->chipset != 0x34)
-		ret = nv20_init_compute_mem(bios);
-	else if (dev_priv->chipset >= 0x10)
-		ret = nv10_init_compute_mem(bios);
-	else if (dev_priv->chipset >= 0x5)
-		ret = nv05_init_compute_mem(bios);
-	else
-		ret = nv04_init_compute_mem(bios);
-
-	if (ret)
-		return ret;
-
-	return 1;
-}
-
-static int
-init_reset(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
-{
-	/*
-	 * INIT_RESET   opcode: 0x65 ('e')
-	 *
-	 * offset      (8  bit): opcode
-	 * offset + 1  (32 bit): register
-	 * offset + 5  (32 bit): value1
-	 * offset + 9  (32 bit): value2
-	 *
-	 * Assign "value1" to "register", then assign "value2" to "register"
-	 */
-
-	uint32_t reg = ROM32(bios->data[offset + 1]);
-	uint32_t value1 = ROM32(bios->data[offset + 5]);
-	uint32_t value2 = ROM32(bios->data[offset + 9]);
-	uint32_t pci_nv_19, pci_nv_20;
-
-	/* no iexec->execute check by design */
-
-	pci_nv_19 = bios_rd32(bios, NV_PBUS_PCI_NV_19);
-	bios_wr32(bios, NV_PBUS_PCI_NV_19, pci_nv_19 & ~0xf00);
-
-	bios_wr32(bios, reg, value1);
-
-	udelay(10);
-
-	bios_wr32(bios, reg, value2);
-	bios_wr32(bios, NV_PBUS_PCI_NV_19, pci_nv_19);
-
-	pci_nv_20 = bios_rd32(bios, NV_PBUS_PCI_NV_20);
-	pci_nv_20 &= ~NV_PBUS_PCI_NV_20_ROM_SHADOW_ENABLED;	/* 0xfffffffe */
-	bios_wr32(bios, NV_PBUS_PCI_NV_20, pci_nv_20);
-
-	return 13;
-}
-
-static int
-init_configure_mem(struct nvbios *bios, uint16_t offset,
-		   struct init_exec *iexec)
-{
-	/*
-	 * INIT_CONFIGURE_MEM   opcode: 0x66 ('f')
-	 *
-	 * offset      (8 bit): opcode
-	 *
-	 * Equivalent to INIT_DONE on bios version 3 or greater.
-	 * For early bios versions, sets up the memory registers, using values
-	 * taken from the memory init table
-	 */
-
-	/* no iexec->execute check by design */
-
-	uint16_t meminitoffs = bios->legacy.mem_init_tbl_ptr + MEM_INIT_SIZE * (bios_idxprt_rd(bios, NV_CIO_CRX__COLOR, NV_CIO_CRE_SCRATCH4__INDEX) >> 4);
-	uint16_t seqtbloffs = bios->legacy.sdr_seq_tbl_ptr, meminitdata = meminitoffs + 6;
-	uint32_t reg, data;
-
-	if (bios->major_version > 2)
-		return 0;
-
-	bios_idxprt_wr(bios, NV_VIO_SRX, NV_VIO_SR_CLOCK_INDEX, bios_idxprt_rd(
-		       bios, NV_VIO_SRX, NV_VIO_SR_CLOCK_INDEX) | 0x20);
-
-	if (bios->data[meminitoffs] & 1)
-		seqtbloffs = bios->legacy.ddr_seq_tbl_ptr;
-
-	for (reg = ROM32(bios->data[seqtbloffs]);
-	     reg != 0xffffffff;
-	     reg = ROM32(bios->data[seqtbloffs += 4])) {
-
-		switch (reg) {
-		case NV04_PFB_PRE:
-			data = NV04_PFB_PRE_CMD_PRECHARGE;
-			break;
-		case NV04_PFB_PAD:
-			data = NV04_PFB_PAD_CKE_NORMAL;
-			break;
-		case NV04_PFB_REF:
-			data = NV04_PFB_REF_CMD_REFRESH;
-			break;
-		default:
-			data = ROM32(bios->data[meminitdata]);
-			meminitdata += 4;
-			if (data == 0xffffffff)
-				continue;
-		}
-
-		bios_wr32(bios, reg, data);
-	}
-
-	return 1;
-}
-
-static int
-init_configure_clk(struct nvbios *bios, uint16_t offset,
-		   struct init_exec *iexec)
-{
-	/*
-	 * INIT_CONFIGURE_CLK   opcode: 0x67 ('g')
-	 *
-	 * offset      (8 bit): opcode
-	 *
-	 * Equivalent to INIT_DONE on bios version 3 or greater.
-	 * For early bios versions, sets up the NVClk and MClk PLLs, using
-	 * values taken from the memory init table
-	 */
-
-	/* no iexec->execute check by design */
-
-	uint16_t meminitoffs = bios->legacy.mem_init_tbl_ptr + MEM_INIT_SIZE * (bios_idxprt_rd(bios, NV_CIO_CRX__COLOR, NV_CIO_CRE_SCRATCH4__INDEX) >> 4);
-	int clock;
-
-	if (bios->major_version > 2)
-		return 0;
-
-	clock = ROM16(bios->data[meminitoffs + 4]) * 10;
-	setPLL(bios, NV_PRAMDAC_NVPLL_COEFF, clock);
-
-	clock = ROM16(bios->data[meminitoffs + 2]) * 10;
-	if (bios->data[meminitoffs] & 1) /* DDR */
-		clock *= 2;
-	setPLL(bios, NV_PRAMDAC_MPLL_COEFF, clock);
-
-	return 1;
-}
-
-static int
-init_configure_preinit(struct nvbios *bios, uint16_t offset,
-		       struct init_exec *iexec)
-{
-	/*
-	 * INIT_CONFIGURE_PREINIT   opcode: 0x68 ('h')
-	 *
-	 * offset      (8 bit): opcode
-	 *
-	 * Equivalent to INIT_DONE on bios version 3 or greater.
-	 * For early bios versions, does early init, loading ram and crystal
-	 * configuration from straps into CR3C
-	 */
-
-	/* no iexec->execute check by design */
-
-	uint32_t straps = bios_rd32(bios, NV_PEXTDEV_BOOT_0);
-	uint8_t cr3c = ((straps << 2) & 0xf0) | (straps & 0x40) >> 6;
-
-	if (bios->major_version > 2)
-		return 0;
-
-	bios_idxprt_wr(bios, NV_CIO_CRX__COLOR,
-			     NV_CIO_CRE_SCRATCH4__INDEX, cr3c);
-
-	return 1;
-}
-
-static int
-init_io(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
-{
-	/*
-	 * INIT_IO   opcode: 0x69 ('i')
-	 *
-	 * offset      (8  bit): opcode
-	 * offset + 1  (16 bit): CRTC port
-	 * offset + 3  (8  bit): mask
-	 * offset + 4  (8  bit): data
-	 *
-	 * Assign ((IOVAL("crtc port") & "mask") | "data") to "crtc port"
-	 */
-
-	struct drm_nouveau_private *dev_priv = bios->dev->dev_private;
-	uint16_t crtcport = ROM16(bios->data[offset + 1]);
-	uint8_t mask = bios->data[offset + 3];
-	uint8_t data = bios->data[offset + 4];
-
-	if (!iexec->execute)
-		return 5;
-
-	BIOSLOG(bios, "0x%04X: Port: 0x%04X, Mask: 0x%02X, Data: 0x%02X\n",
-		offset, crtcport, mask, data);
-
-	/*
-	 * I have no idea what this does, but NVIDIA do this magic sequence
-	 * in the places where this INIT_IO happens..
-	 */
-	if (dev_priv->card_type >= NV_50 && crtcport == 0x3c3 && data == 1) {
-		int i;
-
-		bios_wr32(bios, 0x614100, (bios_rd32(
-			  bios, 0x614100) & 0x0fffffff) | 0x00800000);
-
-		bios_wr32(bios, 0x00e18c, bios_rd32(
-			  bios, 0x00e18c) | 0x00020000);
-
-		bios_wr32(bios, 0x614900, (bios_rd32(
-			  bios, 0x614900) & 0x0fffffff) | 0x00800000);
-
-		bios_wr32(bios, 0x000200, bios_rd32(
-			  bios, 0x000200) & ~0x40000000);
-
-		mdelay(10);
-
-		bios_wr32(bios, 0x00e18c, bios_rd32(
-			  bios, 0x00e18c) & ~0x00020000);
-
-		bios_wr32(bios, 0x000200, bios_rd32(
-			  bios, 0x000200) | 0x40000000);
-
-		bios_wr32(bios, 0x614100, 0x00800018);
-		bios_wr32(bios, 0x614900, 0x00800018);
-
-		mdelay(10);
-
-		bios_wr32(bios, 0x614100, 0x10000018);
-		bios_wr32(bios, 0x614900, 0x10000018);
-
-		for (i = 0; i < 3; i++)
-			bios_wr32(bios, 0x614280 + (i*0x800), bios_rd32(
-				  bios, 0x614280 + (i*0x800)) & 0xf0f0f0f0);
-
-		for (i = 0; i < 2; i++)
-			bios_wr32(bios, 0x614300 + (i*0x800), bios_rd32(
-				  bios, 0x614300 + (i*0x800)) & 0xfffff0f0);
-
-		for (i = 0; i < 3; i++)
-			bios_wr32(bios, 0x614380 + (i*0x800), bios_rd32(
-				  bios, 0x614380 + (i*0x800)) & 0xfffff0f0);
-
-		for (i = 0; i < 2; i++)
-			bios_wr32(bios, 0x614200 + (i*0x800), bios_rd32(
-				  bios, 0x614200 + (i*0x800)) & 0xfffffff0);
-
-		for (i = 0; i < 2; i++)
-			bios_wr32(bios, 0x614108 + (i*0x800), bios_rd32(
-				  bios, 0x614108 + (i*0x800)) & 0x0fffffff);
-		return 5;
-	}
-
-	bios_port_wr(bios, crtcport, (bios_port_rd(bios, crtcport) & mask) |
-									data);
-	return 5;
-}
-
-static int
-init_sub(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
-{
-	/*
-	 * INIT_SUB   opcode: 0x6B ('k')
-	 *
-	 * offset      (8 bit): opcode
-	 * offset + 1  (8 bit): script number
-	 *
-	 * Execute script number "script number", as a subroutine
-	 */
-
-	uint8_t sub = bios->data[offset + 1];
-
-	if (!iexec->execute)
-		return 2;
-
-	BIOSLOG(bios, "0x%04X: Calling script %d\n", offset, sub);
-
-	parse_init_table(bios,
-			 ROM16(bios->data[bios->init_script_tbls_ptr + sub * 2]),
-			 iexec);
-
-	BIOSLOG(bios, "0x%04X: End of script %d\n", offset, sub);
-
-	return 2;
-}
-
-static int
-init_ram_condition(struct nvbios *bios, uint16_t offset,
-		   struct init_exec *iexec)
-{
-	/*
-	 * INIT_RAM_CONDITION   opcode: 0x6D ('m')
-	 *
-	 * offset      (8 bit): opcode
-	 * offset + 1  (8 bit): mask
-	 * offset + 2  (8 bit): cmpval
-	 *
-	 * Test if (NV04_PFB_BOOT_0 & "mask") equals "cmpval".
-	 * If condition not met skip subsequent opcodes until condition is
-	 * inverted (INIT_NOT), or we hit INIT_RESUME
-	 */
-
-	uint8_t mask = bios->data[offset + 1];
-	uint8_t cmpval = bios->data[offset + 2];
-	uint8_t data;
-
-	if (!iexec->execute)
-		return 3;
-
-	data = bios_rd32(bios, NV04_PFB_BOOT_0) & mask;
-
-	BIOSLOG(bios, "0x%04X: Checking if 0x%08X equals 0x%08X\n",
-		offset, data, cmpval);
-
-	if (data == cmpval)
-		BIOSLOG(bios, "0x%04X: Condition fulfilled -- continuing to execute\n", offset);
-	else {
-		BIOSLOG(bios, "0x%04X: Condition not fulfilled -- skipping following commands\n", offset);
-		iexec->execute = false;
-	}
-
-	return 3;
-}
-
-static int
-init_nv_reg(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
-{
-	/*
-	 * INIT_NV_REG   opcode: 0x6E ('n')
-	 *
-	 * offset      (8  bit): opcode
-	 * offset + 1  (32 bit): register
-	 * offset + 5  (32 bit): mask
-	 * offset + 9  (32 bit): data
-	 *
-	 * Assign ((REGVAL("register") & "mask") | "data") to "register"
-	 */
-
-	uint32_t reg = ROM32(bios->data[offset + 1]);
-	uint32_t mask = ROM32(bios->data[offset + 5]);
-	uint32_t data = ROM32(bios->data[offset + 9]);
-
-	if (!iexec->execute)
-		return 13;
-
-	BIOSLOG(bios, "0x%04X: Reg: 0x%08X, Mask: 0x%08X, Data: 0x%08X\n",
-		offset, reg, mask, data);
-
-	bios_wr32(bios, reg, (bios_rd32(bios, reg) & mask) | data);
-
-	return 13;
-}
-
-static int
-init_macro(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
-{
-	/*
-	 * INIT_MACRO   opcode: 0x6F ('o')
-	 *
-	 * offset      (8 bit): opcode
-	 * offset + 1  (8 bit): macro number
-	 *
-	 * Look up macro index "macro number" in the macro index table.
-	 * The macro index table entry has 1 byte for the index in the macro
-	 * table, and 1 byte for the number of times to repeat the macro.
-	 * The macro table entry has 4 bytes for the register address and
-	 * 4 bytes for the value to write to that register
-	 */
-
-	uint8_t macro_index_tbl_idx = bios->data[offset + 1];
-	uint16_t tmp = bios->macro_index_tbl_ptr + (macro_index_tbl_idx * MACRO_INDEX_SIZE);
-	uint8_t macro_tbl_idx = bios->data[tmp];
-	uint8_t count = bios->data[tmp + 1];
-	uint32_t reg, data;
-	int i;
-
-	if (!iexec->execute)
-		return 2;
-
-	BIOSLOG(bios, "0x%04X: Macro: 0x%02X, MacroTableIndex: 0x%02X, "
-		      "Count: 0x%02X\n",
-		offset, macro_index_tbl_idx, macro_tbl_idx, count);
-
-	for (i = 0; i < count; i++) {
-		uint16_t macroentryptr = bios->macro_tbl_ptr + (macro_tbl_idx + i) * MACRO_SIZE;
-
-		reg = ROM32(bios->data[macroentryptr]);
-		data = ROM32(bios->data[macroentryptr + 4]);
-
-		bios_wr32(bios, reg, data);
-	}
-
-	return 2;
-}
-
-static int
-init_done(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
-{
-	/*
-	 * INIT_DONE   opcode: 0x71 ('q')
-	 *
-	 * offset      (8  bit): opcode
-	 *
-	 * End the current script
-	 */
-
-	/* mild retval abuse to stop parsing this table */
-	return 0;
-}
-
-static int
-init_resume(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
-{
-	/*
-	 * INIT_RESUME   opcode: 0x72 ('r')
-	 *
-	 * offset      (8  bit): opcode
-	 *
-	 * End the current execute / no-execute condition
-	 */
-
-	if (iexec->execute)
-		return 1;
-
-	iexec->execute = true;
-	BIOSLOG(bios, "0x%04X: ---- Executing following commands ----\n", offset);
-
-	return 1;
-}
-
-static int
-init_time(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
-{
-	/*
-	 * INIT_TIME   opcode: 0x74 ('t')
-	 *
-	 * offset      (8  bit): opcode
-	 * offset + 1  (16 bit): time
-	 *
-	 * Sleep for "time" microseconds.
-	 */
-
-	unsigned time = ROM16(bios->data[offset + 1]);
-
-	if (!iexec->execute)
-		return 3;
-
-	BIOSLOG(bios, "0x%04X: Sleeping for 0x%04X microseconds\n",
-		offset, time);
-
-	if (time < 1000)
-		udelay(time);
-	else
-		mdelay((time + 900) / 1000);
-
-	return 3;
-}
-
-static int
-init_condition(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
-{
-	/*
-	 * INIT_CONDITION   opcode: 0x75 ('u')
-	 *
-	 * offset      (8 bit): opcode
-	 * offset + 1  (8 bit): condition number
-	 *
-	 * Check condition "condition number" in the condition table.
-	 * If condition not met skip subsequent opcodes until condition is
-	 * inverted (INIT_NOT), or we hit INIT_RESUME
-	 */
-
-	uint8_t cond = bios->data[offset + 1];
-
-	if (!iexec->execute)
-		return 2;
-
-	BIOSLOG(bios, "0x%04X: Condition: 0x%02X\n", offset, cond);
-
-	if (bios_condition_met(bios, offset, cond))
-		BIOSLOG(bios, "0x%04X: Condition fulfilled -- continuing to execute\n", offset);
-	else {
-		BIOSLOG(bios, "0x%04X: Condition not fulfilled -- skipping following commands\n", offset);
-		iexec->execute = false;
-	}
-
-	return 2;
-}
-
-static int
-init_io_condition(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
-{
-	/*
-	 * INIT_IO_CONDITION  opcode: 0x76
-	 *
-	 * offset      (8 bit): opcode
-	 * offset + 1  (8 bit): condition number
-	 *
-	 * Check condition "condition number" in the io condition table.
-	 * If condition not met skip subsequent opcodes until condition is
-	 * inverted (INIT_NOT), or we hit INIT_RESUME
-	 */
-
-	uint8_t cond = bios->data[offset + 1];
-
-	if (!iexec->execute)
-		return 2;
-
-	BIOSLOG(bios, "0x%04X: IO condition: 0x%02X\n", offset, cond);
-
-	if (io_condition_met(bios, offset, cond))
-		BIOSLOG(bios, "0x%04X: Condition fulfilled -- continuing to execute\n", offset);
-	else {
-		BIOSLOG(bios, "0x%04X: Condition not fulfilled -- skipping following commands\n", offset);
-		iexec->execute = false;
-	}
-
-	return 2;
-}
-
-static int
-init_index_io(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
-{
-	/*
-	 * INIT_INDEX_IO   opcode: 0x78 ('x')
-	 *
-	 * offset      (8  bit): opcode
-	 * offset + 1  (16 bit): CRTC port
-	 * offset + 3  (8  bit): CRTC index
-	 * offset + 4  (8  bit): mask
-	 * offset + 5  (8  bit): data
-	 *
-	 * Read value at index "CRTC index" on "CRTC port", AND with "mask",
-	 * OR with "data", write-back
-	 */
-
-	uint16_t crtcport = ROM16(bios->data[offset + 1]);
-	uint8_t crtcindex = bios->data[offset + 3];
-	uint8_t mask = bios->data[offset + 4];
-	uint8_t data = bios->data[offset + 5];
-	uint8_t value;
-
-	if (!iexec->execute)
-		return 6;
-
-	BIOSLOG(bios, "0x%04X: Port: 0x%04X, Index: 0x%02X, Mask: 0x%02X, "
-		      "Data: 0x%02X\n",
-		offset, crtcport, crtcindex, mask, data);
-
-	value = (bios_idxprt_rd(bios, crtcport, crtcindex) & mask) | data;
-	bios_idxprt_wr(bios, crtcport, crtcindex, value);
-
-	return 6;
-}
-
-static int
-init_pll(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
-{
-	/*
-	 * INIT_PLL   opcode: 0x79 ('y')
-	 *
-	 * offset      (8  bit): opcode
-	 * offset + 1  (32 bit): register
-	 * offset + 5  (16 bit): freq
-	 *
-	 * Set PLL register "register" to coefficients for frequency (10kHz)
-	 * "freq"
-	 */
-
-	uint32_t reg = ROM32(bios->data[offset + 1]);
-	uint16_t freq = ROM16(bios->data[offset + 5]);
-
-	if (!iexec->execute)
-		return 7;
-
-	BIOSLOG(bios, "0x%04X: Reg: 0x%08X, Freq: %d0kHz\n", offset, reg, freq);
-
-	setPLL(bios, reg, freq * 10);
-
-	return 7;
-}
-
-static int
-init_zm_reg(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
-{
-	/*
-	 * INIT_ZM_REG   opcode: 0x7A ('z')
-	 *
-	 * offset      (8  bit): opcode
-	 * offset + 1  (32 bit): register
-	 * offset + 5  (32 bit): value
-	 *
-	 * Assign "value" to "register"
-	 */
-
-	uint32_t reg = ROM32(bios->data[offset + 1]);
-	uint32_t value = ROM32(bios->data[offset + 5]);
-
-	if (!iexec->execute)
-		return 9;
-
-	if (reg == 0x000200)
-		value |= 1;
-
-	bios_wr32(bios, reg, value);
-
-	return 9;
-}
-
-static int
-init_ram_restrict_pll(struct nvbios *bios, uint16_t offset,
-		      struct init_exec *iexec)
-{
-	/*
-	 * INIT_RAM_RESTRICT_PLL   opcode: 0x87 ('')
-	 *
-	 * offset      (8 bit): opcode
-	 * offset + 1  (8 bit): PLL type
-	 * offset + 2 (32 bit): frequency 0
-	 *
-	 * Uses the RAMCFG strap of PEXTDEV_BOOT as an index into the table at
-	 * ram_restrict_table_ptr.  The value read from there is used to select
-	 * a frequency from the table starting at 'frequency 0' to be
-	 * programmed into the PLL corresponding to 'type'.
-	 *
-	 * The PLL limits table on cards using this opcode has a mapping of
-	 * 'type' to the relevant registers.
-	 */
-
-	struct drm_device *dev = bios->dev;
-	uint32_t strap = (bios_rd32(bios, NV_PEXTDEV_BOOT_0) & 0x0000003c) >> 2;
-	uint8_t index = bios->data[bios->ram_restrict_tbl_ptr + strap];
-	uint8_t type = bios->data[offset + 1];
-	uint32_t freq = ROM32(bios->data[offset + 2 + (index * 4)]);
-	uint8_t *pll_limits = &bios->data[bios->pll_limit_tbl_ptr], *entry;
-	int len = 2 + bios->ram_restrict_group_count * 4;
-	int i;
-
-	if (!iexec->execute)
-		return len;
-
-	if (!bios->pll_limit_tbl_ptr || (pll_limits[0] & 0xf0) != 0x30) {
-		NV_ERROR(dev, "PLL limits table not version 3.x\n");
-		return len; /* deliberate, allow default clocks to remain */
-	}
-
-	entry = pll_limits + pll_limits[1];
-	for (i = 0; i < pll_limits[3]; i++, entry += pll_limits[2]) {
-		if (entry[0] == type) {
-			uint32_t reg = ROM32(entry[3]);
-
-			BIOSLOG(bios, "0x%04X: "
-				      "Type %02x Reg 0x%08x Freq %dKHz\n",
-				offset, type, reg, freq);
-
-			setPLL(bios, reg, freq);
-			return len;
-		}
-	}
-
-	NV_ERROR(dev, "PLL type 0x%02x not found in PLL limits table", type);
-	return len;
-}
-
-static int
-init_8c(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
-{
-	/*
-	 * INIT_8C   opcode: 0x8C ('')
-	 *
-	 * NOP so far....
-	 *
-	 */
-
-	return 1;
-}
-
-static int
-init_8d(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
-{
-	/*
-	 * INIT_8D   opcode: 0x8D ('')
-	 *
-	 * NOP so far....
-	 *
-	 */
-
-	return 1;
-}
-
-static int
-init_gpio(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
-{
-	/*
-	 * INIT_GPIO   opcode: 0x8E ('')
-	 *
-	 * offset      (8 bit): opcode
-	 *
-	 * Loop over all entries in the DCB GPIO table, and initialise
-	 * each GPIO according to various values listed in each entry
-	 */
-
-	if (iexec->execute && bios->execute)
-		nouveau_gpio_reset(bios->dev);
-
-	return 1;
-}
-
-static int
-init_ram_restrict_zm_reg_group(struct nvbios *bios, uint16_t offset,
-			       struct init_exec *iexec)
-{
-	/*
-	 * INIT_RAM_RESTRICT_ZM_REG_GROUP   opcode: 0x8F ('')
-	 *
-	 * offset      (8  bit): opcode
-	 * offset + 1  (32 bit): reg
-	 * offset + 5  (8  bit): regincrement
-	 * offset + 6  (8  bit): count
-	 * offset + 7  (32 bit): value 1,1
-	 * ...
-	 *
-	 * Use the RAMCFG strap of PEXTDEV_BOOT as an index into the table at
-	 * ram_restrict_table_ptr. The value read from here is 'n', and
-	 * "value 1,n" gets written to "reg". This repeats "count" times and on
-	 * each iteration 'm', "reg" increases by "regincrement" and
-	 * "value m,n" is used. The extent of n is limited by a number read
-	 * from the 'M' BIT table, herein called "blocklen"
-	 */
-
-	uint32_t reg = ROM32(bios->data[offset + 1]);
-	uint8_t regincrement = bios->data[offset + 5];
-	uint8_t count = bios->data[offset + 6];
-	uint32_t strap_ramcfg, data;
-	/* previously set by 'M' BIT table */
-	uint16_t blocklen = bios->ram_restrict_group_count * 4;
-	int len = 7 + count * blocklen;
-	uint8_t index;
-	int i;
-
-	/* critical! to know the length of the opcode */;
-	if (!blocklen) {
-		NV_ERROR(bios->dev,
-			 "0x%04X: Zero block length - has the M table "
-			 "been parsed?\n", offset);
-		return -EINVAL;
-	}
-
-	if (!iexec->execute)
-		return len;
-
-	strap_ramcfg = (bios_rd32(bios, NV_PEXTDEV_BOOT_0) >> 2) & 0xf;
-	index = bios->data[bios->ram_restrict_tbl_ptr + strap_ramcfg];
-
-	BIOSLOG(bios, "0x%04X: Reg: 0x%08X, RegIncrement: 0x%02X, "
-		      "Count: 0x%02X, StrapRamCfg: 0x%02X, Index: 0x%02X\n",
-		offset, reg, regincrement, count, strap_ramcfg, index);
-
-	for (i = 0; i < count; i++) {
-		data = ROM32(bios->data[offset + 7 + index * 4 + blocklen * i]);
-
-		bios_wr32(bios, reg, data);
-
-		reg += regincrement;
-	}
-
-	return len;
-}
-
-static int
-init_copy_zm_reg(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
-{
-	/*
-	 * INIT_COPY_ZM_REG   opcode: 0x90 ('')
-	 *
-	 * offset      (8  bit): opcode
-	 * offset + 1  (32 bit): src reg
-	 * offset + 5  (32 bit): dst reg
-	 *
-	 * Put contents of "src reg" into "dst reg"
-	 */
-
-	uint32_t srcreg = ROM32(bios->data[offset + 1]);
-	uint32_t dstreg = ROM32(bios->data[offset + 5]);
-
-	if (!iexec->execute)
-		return 9;
-
-	bios_wr32(bios, dstreg, bios_rd32(bios, srcreg));
-
-	return 9;
-}
-
-static int
-init_zm_reg_group_addr_latched(struct nvbios *bios, uint16_t offset,
-			       struct init_exec *iexec)
-{
-	/*
-	 * INIT_ZM_REG_GROUP_ADDRESS_LATCHED   opcode: 0x91 ('')
-	 *
-	 * offset      (8  bit): opcode
-	 * offset + 1  (32 bit): dst reg
-	 * offset + 5  (8  bit): count
-	 * offset + 6  (32 bit): data 1
-	 * ...
-	 *
-	 * For each of "count" values write "data n" to "dst reg"
-	 */
-
-	uint32_t reg = ROM32(bios->data[offset + 1]);
-	uint8_t count = bios->data[offset + 5];
-	int len = 6 + count * 4;
-	int i;
-
-	if (!iexec->execute)
-		return len;
-
-	for (i = 0; i < count; i++) {
-		uint32_t data = ROM32(bios->data[offset + 6 + 4 * i]);
-		bios_wr32(bios, reg, data);
-	}
-
-	return len;
-}
-
-static int
-init_reserved(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
-{
-	/*
-	 * INIT_RESERVED   opcode: 0x92 ('')
-	 *
-	 * offset      (8 bit): opcode
-	 *
-	 * Seemingly does nothing
-	 */
-
-	return 1;
-}
-
-static int
-init_96(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
-{
-	/*
-	 * INIT_96   opcode: 0x96 ('')
-	 *
-	 * offset      (8  bit): opcode
-	 * offset + 1  (32 bit): sreg
-	 * offset + 5  (8  bit): sshift
-	 * offset + 6  (8  bit): smask
-	 * offset + 7  (8  bit): index
-	 * offset + 8  (32 bit): reg
-	 * offset + 12 (32 bit): mask
-	 * offset + 16 (8  bit): shift
-	 *
-	 */
-
-	uint16_t xlatptr = bios->init96_tbl_ptr + (bios->data[offset + 7] * 2);
-	uint32_t reg = ROM32(bios->data[offset + 8]);
-	uint32_t mask = ROM32(bios->data[offset + 12]);
-	uint32_t val;
-
-	val = bios_rd32(bios, ROM32(bios->data[offset + 1]));
-	if (bios->data[offset + 5] < 0x80)
-		val >>= bios->data[offset + 5];
-	else
-		val <<= (0x100 - bios->data[offset + 5]);
-	val &= bios->data[offset + 6];
-
-	val   = bios->data[ROM16(bios->data[xlatptr]) + val];
-	val <<= bios->data[offset + 16];
-
-	if (!iexec->execute)
-		return 17;
-
-	bios_wr32(bios, reg, (bios_rd32(bios, reg) & mask) | val);
-	return 17;
-}
-
-static int
-init_97(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
-{
-	/*
-	 * INIT_97   opcode: 0x97 ('')
-	 *
-	 * offset      (8  bit): opcode
-	 * offset + 1  (32 bit): register
-	 * offset + 5  (32 bit): mask
-	 * offset + 9  (32 bit): value
-	 *
-	 * Adds "value" to "register" preserving the fields specified
-	 * by "mask"
-	 */
-
-	uint32_t reg = ROM32(bios->data[offset + 1]);
-	uint32_t mask = ROM32(bios->data[offset + 5]);
-	uint32_t add = ROM32(bios->data[offset + 9]);
-	uint32_t val;
-
-	val = bios_rd32(bios, reg);
-	val = (val & mask) | ((val + add) & ~mask);
-
-	if (!iexec->execute)
-		return 13;
-
-	bios_wr32(bios, reg, val);
-	return 13;
-}
-
-static int
-init_auxch(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
-{
-	/*
-	 * INIT_AUXCH   opcode: 0x98 ('')
-	 *
-	 * offset      (8  bit): opcode
-	 * offset + 1  (32 bit): address
-	 * offset + 5  (8  bit): count
-	 * offset + 6  (8  bit): mask 0
-	 * offset + 7  (8  bit): data 0
-	 *  ...
-	 *
-	 */
-
-	struct drm_device *dev = bios->dev;
-	struct nouveau_i2c_chan *auxch;
-	uint32_t addr = ROM32(bios->data[offset + 1]);
-	uint8_t count = bios->data[offset + 5];
-	int len = 6 + count * 2;
-	int ret, i;
-
-	if (!bios->display.output) {
-		NV_ERROR(dev, "INIT_AUXCH: no active output\n");
-		return len;
-	}
-
-	auxch = init_i2c_device_find(dev, bios->display.output->i2c_index);
-	if (!auxch) {
-		NV_ERROR(dev, "INIT_AUXCH: couldn't get auxch %d\n",
-			 bios->display.output->i2c_index);
-		return len;
-	}
-
-	if (!iexec->execute)
-		return len;
-
-	offset += 6;
-	for (i = 0; i < count; i++, offset += 2) {
-		uint8_t data;
-
-		ret = nouveau_dp_auxch(auxch, 9, addr, &data, 1);
-		if (ret) {
-			NV_ERROR(dev, "INIT_AUXCH: rd auxch fail %d\n", ret);
-			return len;
-		}
-
-		data &= bios->data[offset + 0];
-		data |= bios->data[offset + 1];
-
-		ret = nouveau_dp_auxch(auxch, 8, addr, &data, 1);
-		if (ret) {
-			NV_ERROR(dev, "INIT_AUXCH: wr auxch fail %d\n", ret);
-			return len;
-		}
-	}
-
-	return len;
-}
-
-static int
-init_zm_auxch(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
-{
-	/*
-	 * INIT_ZM_AUXCH   opcode: 0x99 ('')
-	 *
-	 * offset      (8  bit): opcode
-	 * offset + 1  (32 bit): address
-	 * offset + 5  (8  bit): count
-	 * offset + 6  (8  bit): data 0
-	 *  ...
-	 *
-	 */
-
-	struct drm_device *dev = bios->dev;
-	struct nouveau_i2c_chan *auxch;
-	uint32_t addr = ROM32(bios->data[offset + 1]);
-	uint8_t count = bios->data[offset + 5];
-	int len = 6 + count;
-	int ret, i;
-
-	if (!bios->display.output) {
-		NV_ERROR(dev, "INIT_ZM_AUXCH: no active output\n");
-		return len;
-	}
-
-	auxch = init_i2c_device_find(dev, bios->display.output->i2c_index);
-	if (!auxch) {
-		NV_ERROR(dev, "INIT_ZM_AUXCH: couldn't get auxch %d\n",
-			 bios->display.output->i2c_index);
-		return len;
-	}
-
-	if (!iexec->execute)
-		return len;
-
-	offset += 6;
-	for (i = 0; i < count; i++, offset++) {
-		ret = nouveau_dp_auxch(auxch, 8, addr, &bios->data[offset], 1);
-		if (ret) {
-			NV_ERROR(dev, "INIT_ZM_AUXCH: wr auxch fail %d\n", ret);
-			return len;
-		}
-	}
-
-	return len;
-}
-
-static int
-init_i2c_long_if(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
-{
-	/*
-	 * INIT_I2C_LONG_IF   opcode: 0x9A ('')
-	 *
-	 * offset      (8 bit): opcode
-	 * offset + 1  (8 bit): DCB I2C table entry index
-	 * offset + 2  (8 bit): I2C slave address
-	 * offset + 3  (16 bit): I2C register
-	 * offset + 5  (8 bit): mask
-	 * offset + 6  (8 bit): data
-	 *
-	 * Read the register given by "I2C register" on the device addressed
-	 * by "I2C slave address" on the I2C bus given by "DCB I2C table
-	 * entry index". Compare the result AND "mask" to "data".
-	 * If they're not equal, skip subsequent opcodes until condition is
-	 * inverted (INIT_NOT), or we hit INIT_RESUME
-	 */
-
-	uint8_t i2c_index = bios->data[offset + 1];
-	uint8_t i2c_address = bios->data[offset + 2] >> 1;
-	uint8_t reglo = bios->data[offset + 3];
-	uint8_t reghi = bios->data[offset + 4];
-	uint8_t mask = bios->data[offset + 5];
-	uint8_t data = bios->data[offset + 6];
-	struct nouveau_i2c_chan *chan;
-	uint8_t buf0[2] = { reghi, reglo };
-	uint8_t buf1[1];
-	struct i2c_msg msg[2] = {
-		{ i2c_address, 0, 1, buf0 },
-		{ i2c_address, I2C_M_RD, 1, buf1 },
-	};
-	int ret;
-
-	/* no execute check by design */
-
-	BIOSLOG(bios, "0x%04X: DCBI2CIndex: 0x%02X, I2CAddress: 0x%02X\n",
-		offset, i2c_index, i2c_address);
-
-	chan = init_i2c_device_find(bios->dev, i2c_index);
-	if (!chan)
-		return -ENODEV;
-
-
-	ret = i2c_transfer(&chan->adapter, msg, 2);
-	if (ret < 0) {
-		BIOSLOG(bios, "0x%04X: I2CReg: 0x%02X:0x%02X, Value: [no device], "
-			      "Mask: 0x%02X, Data: 0x%02X\n",
-			offset, reghi, reglo, mask, data);
-		iexec->execute = 0;
-		return 7;
-	}
-
-	BIOSLOG(bios, "0x%04X: I2CReg: 0x%02X:0x%02X, Value: 0x%02X, "
-		      "Mask: 0x%02X, Data: 0x%02X\n",
-		offset, reghi, reglo, buf1[0], mask, data);
-
-	iexec->execute = ((buf1[0] & mask) == data);
-
-	return 7;
-}
-
-static struct init_tbl_entry itbl_entry[] = {
-	/* command name                       , id  , length  , offset  , mult    , command handler                 */
-	/* INIT_PROG (0x31, 15, 10, 4) removed due to no example of use */
-	{ "INIT_IO_RESTRICT_PROG"             , 0x32, init_io_restrict_prog           },
-	{ "INIT_REPEAT"                       , 0x33, init_repeat                     },
-	{ "INIT_IO_RESTRICT_PLL"              , 0x34, init_io_restrict_pll            },
-	{ "INIT_END_REPEAT"                   , 0x36, init_end_repeat                 },
-	{ "INIT_COPY"                         , 0x37, init_copy                       },
-	{ "INIT_NOT"                          , 0x38, init_not                        },
-	{ "INIT_IO_FLAG_CONDITION"            , 0x39, init_io_flag_condition          },
-	{ "INIT_DP_CONDITION"                 , 0x3A, init_dp_condition               },
-	{ "INIT_OP_3B"                        , 0x3B, init_op_3b                      },
-	{ "INIT_OP_3C"                        , 0x3C, init_op_3c                      },
-	{ "INIT_INDEX_ADDRESS_LATCHED"        , 0x49, init_idx_addr_latched           },
-	{ "INIT_IO_RESTRICT_PLL2"             , 0x4A, init_io_restrict_pll2           },
-	{ "INIT_PLL2"                         , 0x4B, init_pll2                       },
-	{ "INIT_I2C_BYTE"                     , 0x4C, init_i2c_byte                   },
-	{ "INIT_ZM_I2C_BYTE"                  , 0x4D, init_zm_i2c_byte                },
-	{ "INIT_ZM_I2C"                       , 0x4E, init_zm_i2c                     },
-	{ "INIT_TMDS"                         , 0x4F, init_tmds                       },
-	{ "INIT_ZM_TMDS_GROUP"                , 0x50, init_zm_tmds_group              },
-	{ "INIT_CR_INDEX_ADDRESS_LATCHED"     , 0x51, init_cr_idx_adr_latch           },
-	{ "INIT_CR"                           , 0x52, init_cr                         },
-	{ "INIT_ZM_CR"                        , 0x53, init_zm_cr                      },
-	{ "INIT_ZM_CR_GROUP"                  , 0x54, init_zm_cr_group                },
-	{ "INIT_CONDITION_TIME"               , 0x56, init_condition_time             },
-	{ "INIT_LTIME"                        , 0x57, init_ltime                      },
-	{ "INIT_ZM_REG_SEQUENCE"              , 0x58, init_zm_reg_sequence            },
-	/* INIT_INDIRECT_REG (0x5A, 7, 0, 0) removed due to no example of use */
-	{ "INIT_SUB_DIRECT"                   , 0x5B, init_sub_direct                 },
-	{ "INIT_JUMP"                         , 0x5C, init_jump                       },
-	{ "INIT_I2C_IF"                       , 0x5E, init_i2c_if                     },
-	{ "INIT_COPY_NV_REG"                  , 0x5F, init_copy_nv_reg                },
-	{ "INIT_ZM_INDEX_IO"                  , 0x62, init_zm_index_io                },
-	{ "INIT_COMPUTE_MEM"                  , 0x63, init_compute_mem                },
-	{ "INIT_RESET"                        , 0x65, init_reset                      },
-	{ "INIT_CONFIGURE_MEM"                , 0x66, init_configure_mem              },
-	{ "INIT_CONFIGURE_CLK"                , 0x67, init_configure_clk              },
-	{ "INIT_CONFIGURE_PREINIT"            , 0x68, init_configure_preinit          },
-	{ "INIT_IO"                           , 0x69, init_io                         },
-	{ "INIT_SUB"                          , 0x6B, init_sub                        },
-	{ "INIT_RAM_CONDITION"                , 0x6D, init_ram_condition              },
-	{ "INIT_NV_REG"                       , 0x6E, init_nv_reg                     },
-	{ "INIT_MACRO"                        , 0x6F, init_macro                      },
-	{ "INIT_DONE"                         , 0x71, init_done                       },
-	{ "INIT_RESUME"                       , 0x72, init_resume                     },
-	/* INIT_RAM_CONDITION2 (0x73, 9, 0, 0) removed due to no example of use */
-	{ "INIT_TIME"                         , 0x74, init_time                       },
-	{ "INIT_CONDITION"                    , 0x75, init_condition                  },
-	{ "INIT_IO_CONDITION"                 , 0x76, init_io_condition               },
-	{ "INIT_INDEX_IO"                     , 0x78, init_index_io                   },
-	{ "INIT_PLL"                          , 0x79, init_pll                        },
-	{ "INIT_ZM_REG"                       , 0x7A, init_zm_reg                     },
-	{ "INIT_RAM_RESTRICT_PLL"             , 0x87, init_ram_restrict_pll           },
-	{ "INIT_8C"                           , 0x8C, init_8c                         },
-	{ "INIT_8D"                           , 0x8D, init_8d                         },
-	{ "INIT_GPIO"                         , 0x8E, init_gpio                       },
-	{ "INIT_RAM_RESTRICT_ZM_REG_GROUP"    , 0x8F, init_ram_restrict_zm_reg_group  },
-	{ "INIT_COPY_ZM_REG"                  , 0x90, init_copy_zm_reg                },
-	{ "INIT_ZM_REG_GROUP_ADDRESS_LATCHED" , 0x91, init_zm_reg_group_addr_latched  },
-	{ "INIT_RESERVED"                     , 0x92, init_reserved                   },
-	{ "INIT_96"                           , 0x96, init_96                         },
-	{ "INIT_97"                           , 0x97, init_97                         },
-	{ "INIT_AUXCH"                        , 0x98, init_auxch                      },
-	{ "INIT_ZM_AUXCH"                     , 0x99, init_zm_auxch                   },
-	{ "INIT_I2C_LONG_IF"                  , 0x9A, init_i2c_long_if                },
-	{ NULL                                , 0   , NULL                            }
-};
-
-#define MAX_TABLE_OPS 1000
-
-static int
-parse_init_table(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
-{
-	/*
-	 * Parses all commands in an init table.
-	 *
-	 * We start out executing all commands found in the init table. Some
-	 * opcodes may change the status of iexec->execute to SKIP, which will
-	 * cause the following opcodes to perform no operation until the value
-	 * is changed back to EXECUTE.
-	 */
-
-	int count = 0, i, ret;
-	uint8_t id;
-
-	/* catch NULL script pointers */
-	if (offset == 0)
-		return 0;
-
-	/*
-	 * Loop until INIT_DONE causes us to break out of the loop
-	 * (or until offset > bios length just in case... )
-	 * (and no more than MAX_TABLE_OPS iterations, just in case... )
-	 */
-	while ((offset < bios->length) && (count++ < MAX_TABLE_OPS)) {
-		id = bios->data[offset];
-
-		/* Find matching id in itbl_entry */
-		for (i = 0; itbl_entry[i].name && (itbl_entry[i].id != id); i++)
-			;
-
-		if (!itbl_entry[i].name) {
-			NV_ERROR(bios->dev,
-				 "0x%04X: Init table command not found: "
-				 "0x%02X\n", offset, id);
-			return -ENOENT;
-		}
-
-		BIOSLOG(bios, "0x%04X: [ (0x%02X) - %s ]\n", offset,
-			itbl_entry[i].id, itbl_entry[i].name);
-
-		/* execute eventual command handler */
-		ret = (*itbl_entry[i].handler)(bios, offset, iexec);
-		if (ret < 0) {
-			NV_ERROR(bios->dev, "0x%04X: Failed parsing init "
-				 "table opcode: %s %d\n", offset,
-				 itbl_entry[i].name, ret);
-		}
-
-		if (ret <= 0)
-			break;
-
-		/*
-		 * Add the offset of the current command including all data
-		 * of that command. The offset will then be pointing on the
-		 * next op code.
-		 */
-		offset += ret;
-	}
-
-	if (offset >= bios->length)
-		NV_WARN(bios->dev,
-			"Offset 0x%04X greater than known bios image length.  "
-			"Corrupt image?\n", offset);
-	if (count >= MAX_TABLE_OPS)
-		NV_WARN(bios->dev,
-			"More than %d opcodes to a table is unlikely, "
-			"is the bios image corrupt?\n", MAX_TABLE_OPS);
-
-	return 0;
-}
-
-static void
-parse_init_tables(struct nvbios *bios)
-{
-	/* Loops and calls parse_init_table() for each present table. */
-
-	int i = 0;
-	uint16_t table;
-	struct init_exec iexec = {true, false};
-
-	if (bios->old_style_init) {
-		if (bios->init_script_tbls_ptr)
-			parse_init_table(bios, bios->init_script_tbls_ptr, &iexec);
-		if (bios->extra_init_script_tbl_ptr)
-			parse_init_table(bios, bios->extra_init_script_tbl_ptr, &iexec);
-
-		return;
-	}
-
-	while ((table = ROM16(bios->data[bios->init_script_tbls_ptr + i]))) {
-		NV_INFO(bios->dev,
-			"Parsing VBIOS init table %d at offset 0x%04X\n",
-			i / 2, table);
-		BIOSLOG(bios, "0x%04X: ------ Executing following commands ------\n", table);
-
-		parse_init_table(bios, table, &iexec);
-		i += 2;
-	}
-}
-
 static uint16_t clkcmptable(struct nvbios *bios, uint16_t clktable, int pxclk)
 {
 	int compare_record_len, i = 0;
@@ -3764,28 +95,24 @@ static uint16_t clkcmptable(struct nvbios *bios, uint16_t clktable, int pxclk)
 
 static void
 run_digital_op_script(struct drm_device *dev, uint16_t scriptptr,
-		      struct dcb_entry *dcbent, int head, bool dl)
+		      struct dcb_output *dcbent, int head, bool dl)
 {
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct nvbios *bios = &dev_priv->vbios;
-	struct init_exec iexec = {true, false};
+	struct nouveau_drm *drm = nouveau_drm(dev);
 
-	NV_TRACE(dev, "0x%04X: Parsing digital output script table\n",
+	NV_INFO(drm, "0x%04X: Parsing digital output script table\n",
 		 scriptptr);
-	bios_idxprt_wr(bios, NV_CIO_CRX__COLOR, NV_CIO_CRE_44,
-		       head ? NV_CIO_CRE_44_HEADB : NV_CIO_CRE_44_HEADA);
-	/* note: if dcb entries have been merged, index may be misleading */
-	NVWriteVgaCrtc5758(dev, head, 0, dcbent->index);
-	parse_init_table(bios, scriptptr, &iexec);
+	NVWriteVgaCrtc(dev, 0, NV_CIO_CRE_44, head ? NV_CIO_CRE_44_HEADB :
+					         NV_CIO_CRE_44_HEADA);
+	nouveau_bios_run_init_table(dev, scriptptr, dcbent, head);
 
 	nv04_dfp_bind_head(dev, dcbent, head, dl);
 }
 
-static int call_lvds_manufacturer_script(struct drm_device *dev, struct dcb_entry *dcbent, int head, enum LVDS_script script)
+static int call_lvds_manufacturer_script(struct drm_device *dev, struct dcb_output *dcbent, int head, enum LVDS_script script)
 {
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct nvbios *bios = &dev_priv->vbios;
-	uint8_t sub = bios->data[bios->fp.xlated_entry + script] + (bios->fp.link_c_increment && dcbent->or & OUTPUT_C ? 1 : 0);
+	struct nouveau_drm *drm = nouveau_drm(dev);
+	struct nvbios *bios = &drm->vbios;
+	uint8_t sub = bios->data[bios->fp.xlated_entry + script] + (bios->fp.link_c_increment && dcbent->or & DCB_OUTPUT_C ? 1 : 0);
 	uint16_t scriptofs = ROM16(bios->data[bios->init_script_tbls_ptr + sub * 2]);
 
 	if (!bios->fp.xlated_entry || !sub || !scriptofs)
@@ -3808,7 +135,7 @@ static int call_lvds_manufacturer_script(struct drm_device *dev, struct dcb_entr
 	return 0;
 }
 
-static int run_lvds_table(struct drm_device *dev, struct dcb_entry *dcbent, int head, enum LVDS_script script, int pxclk)
+static int run_lvds_table(struct drm_device *dev, struct dcb_output *dcbent, int head, enum LVDS_script script, int pxclk)
 {
 	/*
 	 * The BIT LVDS table's header has the information to setup the
@@ -3820,8 +147,8 @@ static int run_lvds_table(struct drm_device *dev, struct dcb_entry *dcbent, int
 	 * conf byte. These tables are similar to the TMDS tables, consisting
 	 * of a list of pxclks and script pointers.
 	 */
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct nvbios *bios = &dev_priv->vbios;
+	struct nouveau_drm *drm = nouveau_drm(dev);
+	struct nvbios *bios = &drm->vbios;
 	unsigned int outputset = (dcbent->or == 4) ? 1 : 0;
 	uint16_t scriptptr = 0, clktable;
 
@@ -3866,14 +193,14 @@ static int run_lvds_table(struct drm_device *dev, struct dcb_entry *dcbent, int
 
 		clktable = ROM16(bios->data[clktable]);
 		if (!clktable) {
-			NV_ERROR(dev, "Pixel clock comparison table not found\n");
+			NV_ERROR(drm, "Pixel clock comparison table not found\n");
 			return -ENOENT;
 		}
 		scriptptr = clkcmptable(bios, clktable, pxclk);
 	}
 
 	if (!scriptptr) {
-		NV_ERROR(dev, "LVDS output init script not found\n");
+		NV_ERROR(drm, "LVDS output init script not found\n");
 		return -ENOENT;
 	}
 	run_digital_op_script(dev, scriptptr, dcbent, head, bios->fp.dual_link);
@@ -3881,7 +208,7 @@ static int run_lvds_table(struct drm_device *dev, struct dcb_entry *dcbent, int
 	return 0;
 }
 
-int call_lvds_script(struct drm_device *dev, struct dcb_entry *dcbent, int head, enum LVDS_script script, int pxclk)
+int call_lvds_script(struct drm_device *dev, struct dcb_output *dcbent, int head, enum LVDS_script script, int pxclk)
 {
 	/*
 	 * LVDS operations are multiplexed in an effort to present a single API
@@ -3889,8 +216,9 @@ int call_lvds_script(struct drm_device *dev, struct dcb_entry *dcbent, int head,
 	 * This acts as the demux
 	 */
 
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct nvbios *bios = &dev_priv->vbios;
+	struct nouveau_drm *drm = nouveau_drm(dev);
+	struct nouveau_device *device = nv_device(drm->device);
+	struct nvbios *bios = &drm->vbios;
 	uint8_t lvds_ver = bios->data[bios->fp.lvdsmanufacturerpointer];
 	uint32_t sel_clk_binding, sel_clk;
 	int ret;
@@ -3909,10 +237,10 @@ int call_lvds_script(struct drm_device *dev, struct dcb_entry *dcbent, int head,
 	if (script == LVDS_RESET && bios->fp.power_off_for_reset)
 		call_lvds_script(dev, dcbent, head, LVDS_PANEL_OFF, pxclk);
 
-	NV_TRACE(dev, "Calling LVDS script %d:\n", script);
+	NV_INFO(drm, "Calling LVDS script %d:\n", script);
 
 	/* don't let script change pll->head binding */
-	sel_clk_binding = bios_rd32(bios, NV_PRAMDAC_SEL_CLK) & 0x50000;
+	sel_clk_binding = nv_rd32(device, NV_PRAMDAC_SEL_CLK) & 0x50000;
 
 	if (lvds_ver < 0x30)
 		ret = call_lvds_manufacturer_script(dev, dcbent, head, script);
@@ -3924,7 +252,7 @@ int call_lvds_script(struct drm_device *dev, struct dcb_entry *dcbent, int head,
 	sel_clk = NVReadRAMDAC(dev, 0, NV_PRAMDAC_SEL_CLK) & ~0x50000;
 	NVWriteRAMDAC(dev, 0, NV_PRAMDAC_SEL_CLK, sel_clk | sel_clk_binding);
 	/* some scripts set a value in NV_PBUS_POWERCTRL_2 and break video overlay */
-	nvWriteMC(dev, NV_PBUS_POWERCTRL_2, 0);
+	nv_wr32(device, NV_PBUS_POWERCTRL_2, 0);
 
 	return ret;
 }
@@ -3942,12 +270,13 @@ static int parse_lvds_manufacturer_table_header(struct drm_device *dev, struct n
 	 * the maximum number of records that can be held in the table.
 	 */
 
+	struct nouveau_drm *drm = nouveau_drm(dev);
 	uint8_t lvds_ver, headerlen, recordlen;
 
 	memset(lth, 0, sizeof(struct lvdstableheader));
 
 	if (bios->fp.lvdsmanufacturerpointer == 0x0) {
-		NV_ERROR(dev, "Pointer to LVDS manufacturer table invalid\n");
+		NV_ERROR(drm, "Pointer to LVDS manufacturer table invalid\n");
 		return -EINVAL;
 	}
 
@@ -3961,7 +290,7 @@ static int parse_lvds_manufacturer_table_header(struct drm_device *dev, struct n
 	case 0x30:	/* NV4x */
 		headerlen = bios->data[bios->fp.lvdsmanufacturerpointer + 1];
 		if (headerlen < 0x1f) {
-			NV_ERROR(dev, "LVDS table header not understood\n");
+			NV_ERROR(drm, "LVDS table header not understood\n");
 			return -EINVAL;
 		}
 		recordlen = bios->data[bios->fp.lvdsmanufacturerpointer + 2];
@@ -3969,13 +298,13 @@ static int parse_lvds_manufacturer_table_header(struct drm_device *dev, struct n
 	case 0x40:	/* G80/G90 */
 		headerlen = bios->data[bios->fp.lvdsmanufacturerpointer + 1];
 		if (headerlen < 0x7) {
-			NV_ERROR(dev, "LVDS table header not understood\n");
+			NV_ERROR(drm, "LVDS table header not understood\n");
 			return -EINVAL;
 		}
 		recordlen = bios->data[bios->fp.lvdsmanufacturerpointer + 2];
 		break;
 	default:
-		NV_ERROR(dev,
+		NV_ERROR(drm,
 			 "LVDS table revision %d.%d not currently supported\n",
 			 lvds_ver >> 4, lvds_ver & 0xf);
 		return -ENOSYS;
@@ -3991,7 +320,7 @@ static int parse_lvds_manufacturer_table_header(struct drm_device *dev, struct n
 static int
 get_fp_strap(struct drm_device *dev, struct nvbios *bios)
 {
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_device *device = nouveau_dev(dev);
 
 	/*
 	 * The fp strap is normally dictated by the "User Strap" in
@@ -4005,14 +334,15 @@ get_fp_strap(struct drm_device *dev, struct nvbios *bios)
 	if (bios->major_version < 5 && bios->data[0x48] & 0x4)
 		return NVReadVgaCrtc5758(dev, 0, 0xf) & 0xf;
 
-	if (dev_priv->card_type >= NV_50)
-		return (bios_rd32(bios, NV_PEXTDEV_BOOT_0) >> 24) & 0xf;
+	if (device->card_type >= NV_50)
+		return (nv_rd32(device, NV_PEXTDEV_BOOT_0) >> 24) & 0xf;
 	else
-		return (bios_rd32(bios, NV_PEXTDEV_BOOT_0) >> 16) & 0xf;
+		return (nv_rd32(device, NV_PEXTDEV_BOOT_0) >> 16) & 0xf;
 }
 
 static int parse_fp_mode_table(struct drm_device *dev, struct nvbios *bios)
 {
+	struct nouveau_drm *drm = nouveau_drm(dev);
 	uint8_t *fptable;
 	uint8_t fptable_ver, headerlen = 0, recordlen, fpentries = 0xf, fpindex;
 	int ret, ofs, fpstrapping;
@@ -4022,7 +352,7 @@ static int parse_fp_mode_table(struct drm_device *dev, struct nvbios *bios)
 		/* Apple cards don't have the fp table; the laptops use DDC */
 		/* The table is also missing on some x86 IGPs */
 #ifndef __powerpc__
-		NV_ERROR(dev, "Pointer to flat panel table invalid\n");
+		NV_ERROR(drm, "Pointer to flat panel table invalid\n");
 #endif
 		bios->digital_min_front_porch = 0x4b;
 		return 0;
@@ -4061,7 +391,7 @@ static int parse_fp_mode_table(struct drm_device *dev, struct nvbios *bios)
 		ofs = -7;
 		break;
 	default:
-		NV_ERROR(dev,
+		NV_ERROR(drm,
 			 "FP table revision %d.%d not currently supported\n",
 			 fptable_ver >> 4, fptable_ver & 0xf);
 		return -ENOSYS;
@@ -4080,7 +410,7 @@ static int parse_fp_mode_table(struct drm_device *dev, struct nvbios *bios)
 		bios->fp.xlatwidth = lth.recordlen;
 	}
 	if (bios->fp.fpxlatetableptr == 0x0) {
-		NV_ERROR(dev, "Pointer to flat panel xlat table invalid\n");
+		NV_ERROR(drm, "Pointer to flat panel xlat table invalid\n");
 		return -EINVAL;
 	}
 
@@ -4090,7 +420,7 @@ static int parse_fp_mode_table(struct drm_device *dev, struct nvbios *bios)
 					fpstrapping * bios->fp.xlatwidth];
 
 	if (fpindex > fpentries) {
-		NV_ERROR(dev, "Bad flat panel table index\n");
+		NV_ERROR(drm, "Bad flat panel table index\n");
 		return -ENOENT;
 	}
 
@@ -4109,7 +439,7 @@ static int parse_fp_mode_table(struct drm_device *dev, struct nvbios *bios)
 	bios->fp.mode_ptr = bios->fp.fptablepointer + headerlen +
 			    recordlen * fpindex + ofs;
 
-	NV_TRACE(dev, "BIOS FP mode: %dx%d (%dkHz pixel clock)\n",
+	NV_INFO(drm, "BIOS FP mode: %dx%d (%dkHz pixel clock)\n",
 		 ROM16(bios->data[bios->fp.mode_ptr + 11]) + 1,
 		 ROM16(bios->data[bios->fp.mode_ptr + 25]) + 1,
 		 ROM16(bios->data[bios->fp.mode_ptr + 7]) * 10);
@@ -4119,8 +449,8 @@ static int parse_fp_mode_table(struct drm_device *dev, struct nvbios *bios)
 
 bool nouveau_bios_fp_mode(struct drm_device *dev, struct drm_display_mode *mode)
 {
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct nvbios *bios = &dev_priv->vbios;
+	struct nouveau_drm *drm = nouveau_drm(dev);
+	struct nvbios *bios = &drm->vbios;
 	uint8_t *mode_entry = &bios->data[bios->fp.mode_ptr];
 
 	if (!mode)	/* just checking whether we can produce a mode */
@@ -4190,8 +520,8 @@ int nouveau_bios_parse_lvds_table(struct drm_device *dev, int pxclk, bool *dl, b
 	 * requiring tests against the native-mode pixel clock, cannot be done
 	 * until later, when this function should be called with non-zero pxclk
 	 */
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct nvbios *bios = &dev_priv->vbios;
+	struct nouveau_drm *drm = nouveau_drm(dev);
+	struct nvbios *bios = &drm->vbios;
 	int fpstrapping = get_fp_strap(dev, bios), lvdsmanufacturerindex = 0;
 	struct lvdstableheader lth;
 	uint16_t lvdsofs;
@@ -4252,7 +582,7 @@ int nouveau_bios_parse_lvds_table(struct drm_device *dev, int pxclk, bool *dl, b
 		lvdsmanufacturerindex = fpstrapping;
 		break;
 	default:
-		NV_ERROR(dev, "LVDS table revision not currently supported\n");
+		NV_ERROR(drm, "LVDS table revision not currently supported\n");
 		return -ENOSYS;
 	}
 
@@ -4300,7 +630,7 @@ int nouveau_bios_parse_lvds_table(struct drm_device *dev, int pxclk, bool *dl, b
  * This function returns true if a particular DCB entry matches.
  */
 bool
-bios_encoder_match(struct dcb_entry *dcb, u32 hash)
+bios_encoder_match(struct dcb_output *dcb, u32 hash)
 {
 	if ((hash & 0x000000f0) != (dcb->location << 4))
 		return false;
@@ -4310,9 +640,9 @@ bios_encoder_match(struct dcb_entry *dcb, u32 hash)
 		return false;
 
 	switch (dcb->type) {
-	case OUTPUT_TMDS:
-	case OUTPUT_LVDS:
-	case OUTPUT_DP:
+	case DCB_OUTPUT_TMDS:
+	case DCB_OUTPUT_LVDS:
+	case DCB_OUTPUT_DP:
 		if (hash & 0x00c00000) {
 			if (!(hash & (dcb->sorconf.link << 22)))
 				return false;
@@ -4324,7 +654,7 @@ bios_encoder_match(struct dcb_entry *dcb, u32 hash)
 
 int
 nouveau_bios_run_display_table(struct drm_device *dev, u16 type, int pclk,
-			       struct dcb_entry *dcbent, int crtc)
+			       struct dcb_output *dcbent, int crtc)
 {
 	/*
 	 * The display script table is located by the BIT 'U' table.
@@ -4349,15 +679,15 @@ nouveau_bios_run_display_table(struct drm_device *dev, u16 type, int pclk,
 	 * offset + 5   (16 bits): pointer to first output script table
 	 */
 
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct nvbios *bios = &dev_priv->vbios;
+	struct nouveau_drm *drm = nouveau_drm(dev);
+	struct nvbios *bios = &drm->vbios;
 	uint8_t *table = &bios->data[bios->display.script_table_ptr];
 	uint8_t *otable = NULL;
 	uint16_t script;
 	int i;
 
 	if (!bios->display.script_table_ptr) {
-		NV_ERROR(dev, "No pointer to output script table\n");
+		NV_ERROR(drm, "No pointer to output script table\n");
 		return 1;
 	}
 
@@ -4369,7 +699,7 @@ nouveau_bios_run_display_table(struct drm_device *dev, u16 type, int pclk,
 		return 1;
 
 	if (table[0] != 0x20 && table[0] != 0x21) {
-		NV_ERROR(dev, "Output script table version 0x%02x unknown\n",
+		NV_ERROR(drm, "Output script table version 0x%02x unknown\n",
 			 table[0]);
 		return 1;
 	}
@@ -4404,7 +734,7 @@ nouveau_bios_run_display_table(struct drm_device *dev, u16 type, int pclk,
 	 * script tables is a pointer to the script to execute.
 	 */
 
-	NV_DEBUG_KMS(dev, "Searching for output entry for %d %d %d\n",
+	NV_DEBUG(drm, "Searching for output entry for %d %d %d\n",
 			dcbent->type, dcbent->location, dcbent->or);
 	for (i = 0; i < table[3]; i++) {
 		otable = ROMPTR(dev, table[table[1] + (i * table[2])]);
@@ -4413,7 +743,7 @@ nouveau_bios_run_display_table(struct drm_device *dev, u16 type, int pclk,
 	}
 
 	if (!otable) {
-		NV_DEBUG_KMS(dev, "failed to match any output table\n");
+		NV_DEBUG(drm, "failed to match any output table\n");
 		return 1;
 	}
 
@@ -4425,7 +755,7 @@ nouveau_bios_run_display_table(struct drm_device *dev, u16 type, int pclk,
 		}
 
 		if (i == otable[5]) {
-			NV_ERROR(dev, "Table 0x%04x not found for %d/%d, "
+			NV_ERROR(drm, "Table 0x%04x not found for %d/%d, "
 				      "using first\n",
 				 type, dcbent->type, dcbent->or);
 			i = 0;
@@ -4435,21 +765,21 @@ nouveau_bios_run_display_table(struct drm_device *dev, u16 type, int pclk,
 	if (pclk == 0) {
 		script = ROM16(otable[6]);
 		if (!script) {
-			NV_DEBUG_KMS(dev, "output script 0 not found\n");
+			NV_DEBUG(drm, "output script 0 not found\n");
 			return 1;
 		}
 
-		NV_DEBUG_KMS(dev, "0x%04X: parsing output script 0\n", script);
+		NV_DEBUG(drm, "0x%04X: parsing output script 0\n", script);
 		nouveau_bios_run_init_table(dev, script, dcbent, crtc);
 	} else
 	if (pclk == -1) {
 		script = ROM16(otable[8]);
 		if (!script) {
-			NV_DEBUG_KMS(dev, "output script 1 not found\n");
+			NV_DEBUG(drm, "output script 1 not found\n");
 			return 1;
 		}
 
-		NV_DEBUG_KMS(dev, "0x%04X: parsing output script 1\n", script);
+		NV_DEBUG(drm, "0x%04X: parsing output script 1\n", script);
 		nouveau_bios_run_init_table(dev, script, dcbent, crtc);
 	} else
 	if (pclk == -2) {
@@ -4458,11 +788,11 @@ nouveau_bios_run_display_table(struct drm_device *dev, u16 type, int pclk,
 		else
 			script = 0;
 		if (!script) {
-			NV_DEBUG_KMS(dev, "output script 2 not found\n");
+			NV_DEBUG(drm, "output script 2 not found\n");
 			return 1;
 		}
 
-		NV_DEBUG_KMS(dev, "0x%04X: parsing output script 2\n", script);
+		NV_DEBUG(drm, "0x%04X: parsing output script 2\n", script);
 		nouveau_bios_run_init_table(dev, script, dcbent, crtc);
 	} else
 	if (pclk > 0) {
@@ -4470,11 +800,11 @@ nouveau_bios_run_display_table(struct drm_device *dev, u16 type, int pclk,
 		if (script)
 			script = clkcmptable(bios, script, pclk);
 		if (!script) {
-			NV_DEBUG_KMS(dev, "clock script 0 not found\n");
+			NV_DEBUG(drm, "clock script 0 not found\n");
 			return 1;
 		}
 
-		NV_DEBUG_KMS(dev, "0x%04X: parsing clock script 0\n", script);
+		NV_DEBUG(drm, "0x%04X: parsing clock script 0\n", script);
 		nouveau_bios_run_init_table(dev, script, dcbent, crtc);
 	} else
 	if (pclk < 0) {
@@ -4482,11 +812,11 @@ nouveau_bios_run_display_table(struct drm_device *dev, u16 type, int pclk,
 		if (script)
 			script = clkcmptable(bios, script, -pclk);
 		if (!script) {
-			NV_DEBUG_KMS(dev, "clock script 1 not found\n");
+			NV_DEBUG(drm, "clock script 1 not found\n");
 			return 1;
 		}
 
-		NV_DEBUG_KMS(dev, "0x%04X: parsing clock script 1\n", script);
+		NV_DEBUG(drm, "0x%04X: parsing clock script 1\n", script);
 		nouveau_bios_run_init_table(dev, script, dcbent, crtc);
 	}
 
@@ -4494,7 +824,7 @@ nouveau_bios_run_display_table(struct drm_device *dev, u16 type, int pclk,
 }
 
 
-int run_tmds_table(struct drm_device *dev, struct dcb_entry *dcbent, int head, int pxclk)
+int run_tmds_table(struct drm_device *dev, struct dcb_output *dcbent, int head, int pxclk)
 {
 	/*
 	 * the pxclk parameter is in kHz
@@ -4505,8 +835,9 @@ int run_tmds_table(struct drm_device *dev, struct dcb_entry *dcbent, int head, i
 	 * ffs(or) == 3, use the second.
 	 */
 
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct nvbios *bios = &dev_priv->vbios;
+	struct nouveau_drm *drm = nouveau_drm(dev);
+	struct nouveau_device *device = nv_device(drm->device);
+	struct nvbios *bios = &drm->vbios;
 	int cv = bios->chip_version;
 	uint16_t clktable = 0, scriptptr;
 	uint32_t sel_clk_binding, sel_clk;
@@ -4527,19 +858,19 @@ int run_tmds_table(struct drm_device *dev, struct dcb_entry *dcbent, int head, i
 	}
 
 	if (!clktable) {
-		NV_ERROR(dev, "Pixel clock comparison table not found\n");
+		NV_ERROR(drm, "Pixel clock comparison table not found\n");
 		return -EINVAL;
 	}
 
 	scriptptr = clkcmptable(bios, clktable, pxclk);
 
 	if (!scriptptr) {
-		NV_ERROR(dev, "TMDS output init script not found\n");
+		NV_ERROR(drm, "TMDS output init script not found\n");
 		return -ENOENT;
 	}
 
 	/* don't let script change pll->head binding */
-	sel_clk_binding = bios_rd32(bios, NV_PRAMDAC_SEL_CLK) & 0x50000;
+	sel_clk_binding = nv_rd32(device, NV_PRAMDAC_SEL_CLK) & 0x50000;
 	run_digital_op_script(dev, scriptptr, dcbent, head, pxclk >= 165000);
 	sel_clk = NVReadRAMDAC(dev, 0, NV_PRAMDAC_SEL_CLK) & ~0x50000;
 	NVWriteRAMDAC(dev, 0, NV_PRAMDAC_SEL_CLK, sel_clk | sel_clk_binding);
@@ -4547,447 +878,6 @@ int run_tmds_table(struct drm_device *dev, struct dcb_entry *dcbent, int head, i
 	return 0;
 }
 
-struct pll_mapping {
-	u8  type;
-	u32 reg;
-};
-
-static struct pll_mapping nv04_pll_mapping[] = {
-	{ PLL_CORE  , NV_PRAMDAC_NVPLL_COEFF },
-	{ PLL_MEMORY, NV_PRAMDAC_MPLL_COEFF },
-	{ PLL_VPLL0 , NV_PRAMDAC_VPLL_COEFF },
-	{ PLL_VPLL1 , NV_RAMDAC_VPLL2 },
-	{}
-};
-
-static struct pll_mapping nv40_pll_mapping[] = {
-	{ PLL_CORE  , 0x004000 },
-	{ PLL_MEMORY, 0x004020 },
-	{ PLL_VPLL0 , NV_PRAMDAC_VPLL_COEFF },
-	{ PLL_VPLL1 , NV_RAMDAC_VPLL2 },
-	{}
-};
-
-static struct pll_mapping nv50_pll_mapping[] = {
-	{ PLL_CORE  , 0x004028 },
-	{ PLL_SHADER, 0x004020 },
-	{ PLL_UNK03 , 0x004000 },
-	{ PLL_MEMORY, 0x004008 },
-	{ PLL_UNK40 , 0x00e810 },
-	{ PLL_UNK41 , 0x00e818 },
-	{ PLL_UNK42 , 0x00e824 },
-	{ PLL_VPLL0 , 0x614100 },
-	{ PLL_VPLL1 , 0x614900 },
-	{}
-};
-
-static struct pll_mapping nv84_pll_mapping[] = {
-	{ PLL_CORE  , 0x004028 },
-	{ PLL_SHADER, 0x004020 },
-	{ PLL_MEMORY, 0x004008 },
-	{ PLL_VDEC  , 0x004030 },
-	{ PLL_UNK41 , 0x00e818 },
-	{ PLL_VPLL0 , 0x614100 },
-	{ PLL_VPLL1 , 0x614900 },
-	{}
-};
-
-u32
-get_pll_register(struct drm_device *dev, enum pll_types type)
-{
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct nvbios *bios = &dev_priv->vbios;
-	struct pll_mapping *map;
-	int i;
-
-	if (dev_priv->card_type < NV_40)
-		map = nv04_pll_mapping;
-	else
-	if (dev_priv->card_type < NV_50)
-		map = nv40_pll_mapping;
-	else {
-		u8 *plim = &bios->data[bios->pll_limit_tbl_ptr];
-
-		if (plim[0] >= 0x30) {
-			u8 *entry = plim + plim[1];
-			for (i = 0; i < plim[3]; i++, entry += plim[2]) {
-				if (entry[0] == type)
-					return ROM32(entry[3]);
-			}
-
-			return 0;
-		}
-
-		if (dev_priv->chipset == 0x50)
-			map = nv50_pll_mapping;
-		else
-			map = nv84_pll_mapping;
-	}
-
-	while (map->reg) {
-		if (map->type == type)
-			return map->reg;
-		map++;
-	}
-
-	return 0;
-}
-
-int get_pll_limits(struct drm_device *dev, uint32_t limit_match, struct pll_lims *pll_lim)
-{
-	/*
-	 * PLL limits table
-	 *
-	 * Version 0x10: NV30, NV31
-	 * One byte header (version), one record of 24 bytes
-	 * Version 0x11: NV36 - Not implemented
-	 * Seems to have same record style as 0x10, but 3 records rather than 1
-	 * Version 0x20: Found on Geforce 6 cards
-	 * Trivial 4 byte BIT header. 31 (0x1f) byte record length
-	 * Version 0x21: Found on Geforce 7, 8 and some Geforce 6 cards
-	 * 5 byte header, fifth byte of unknown purpose. 35 (0x23) byte record
-	 * length in general, some (integrated) have an extra configuration byte
-	 * Version 0x30: Found on Geforce 8, separates the register mapping
-	 * from the limits tables.
-	 */
-
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct nvbios *bios = &dev_priv->vbios;
-	int cv = bios->chip_version, pllindex = 0;
-	uint8_t pll_lim_ver = 0, headerlen = 0, recordlen = 0, entries = 0;
-	uint32_t crystal_strap_mask, crystal_straps;
-
-	if (!bios->pll_limit_tbl_ptr) {
-		if (cv == 0x30 || cv == 0x31 || cv == 0x35 || cv == 0x36 ||
-		    cv >= 0x40) {
-			NV_ERROR(dev, "Pointer to PLL limits table invalid\n");
-			return -EINVAL;
-		}
-	} else
-		pll_lim_ver = bios->data[bios->pll_limit_tbl_ptr];
-
-	crystal_strap_mask = 1 << 6;
-	/* open coded dev->twoHeads test */
-	if (cv > 0x10 && cv != 0x15 && cv != 0x1a && cv != 0x20)
-		crystal_strap_mask |= 1 << 22;
-	crystal_straps = nvReadEXTDEV(dev, NV_PEXTDEV_BOOT_0) &
-							crystal_strap_mask;
-
-	switch (pll_lim_ver) {
-	/*
-	 * We use version 0 to indicate a pre limit table bios (single stage
-	 * pll) and load the hard coded limits instead.
-	 */
-	case 0:
-		break;
-	case 0x10:
-	case 0x11:
-		/*
-		 * Strictly v0x11 has 3 entries, but the last two don't seem
-		 * to get used.
-		 */
-		headerlen = 1;
-		recordlen = 0x18;
-		entries = 1;
-		pllindex = 0;
-		break;
-	case 0x20:
-	case 0x21:
-	case 0x30:
-	case 0x40:
-		headerlen = bios->data[bios->pll_limit_tbl_ptr + 1];
-		recordlen = bios->data[bios->pll_limit_tbl_ptr + 2];
-		entries = bios->data[bios->pll_limit_tbl_ptr + 3];
-		break;
-	default:
-		NV_ERROR(dev, "PLL limits table revision 0x%X not currently "
-				"supported\n", pll_lim_ver);
-		return -ENOSYS;
-	}
-
-	/* initialize all members to zero */
-	memset(pll_lim, 0, sizeof(struct pll_lims));
-
-	/* if we were passed a type rather than a register, figure
-	 * out the register and store it
-	 */
-	if (limit_match > PLL_MAX)
-		pll_lim->reg = limit_match;
-	else {
-		pll_lim->reg = get_pll_register(dev, limit_match);
-		if (!pll_lim->reg)
-			return -ENOENT;
-	}
-
-	if (pll_lim_ver == 0x10 || pll_lim_ver == 0x11) {
-		uint8_t *pll_rec = &bios->data[bios->pll_limit_tbl_ptr + headerlen + recordlen * pllindex];
-
-		pll_lim->vco1.minfreq = ROM32(pll_rec[0]);
-		pll_lim->vco1.maxfreq = ROM32(pll_rec[4]);
-		pll_lim->vco2.minfreq = ROM32(pll_rec[8]);
-		pll_lim->vco2.maxfreq = ROM32(pll_rec[12]);
-		pll_lim->vco1.min_inputfreq = ROM32(pll_rec[16]);
-		pll_lim->vco2.min_inputfreq = ROM32(pll_rec[20]);
-		pll_lim->vco1.max_inputfreq = pll_lim->vco2.max_inputfreq = INT_MAX;
-
-		/* these values taken from nv30/31/36 */
-		pll_lim->vco1.min_n = 0x1;
-		if (cv == 0x36)
-			pll_lim->vco1.min_n = 0x5;
-		pll_lim->vco1.max_n = 0xff;
-		pll_lim->vco1.min_m = 0x1;
-		pll_lim->vco1.max_m = 0xd;
-		pll_lim->vco2.min_n = 0x4;
-		/*
-		 * On nv30, 31, 36 (i.e. all cards with two stage PLLs with this
-		 * table version (apart from nv35)), N2 is compared to
-		 * maxN2 (0x46) and 10 * maxM2 (0x4), so set maxN2 to 0x28 and
-		 * save a comparison
-		 */
-		pll_lim->vco2.max_n = 0x28;
-		if (cv == 0x30 || cv == 0x35)
-			/* only 5 bits available for N2 on nv30/35 */
-			pll_lim->vco2.max_n = 0x1f;
-		pll_lim->vco2.min_m = 0x1;
-		pll_lim->vco2.max_m = 0x4;
-		pll_lim->max_log2p = 0x7;
-		pll_lim->max_usable_log2p = 0x6;
-	} else if (pll_lim_ver == 0x20 || pll_lim_ver == 0x21) {
-		uint16_t plloffs = bios->pll_limit_tbl_ptr + headerlen;
-		uint8_t *pll_rec;
-		int i;
-
-		/*
-		 * First entry is default match, if nothing better. warn if
-		 * reg field nonzero
-		 */
-		if (ROM32(bios->data[plloffs]))
-			NV_WARN(dev, "Default PLL limit entry has non-zero "
-				       "register field\n");
-
-		for (i = 1; i < entries; i++)
-			if (ROM32(bios->data[plloffs + recordlen * i]) == pll_lim->reg) {
-				pllindex = i;
-				break;
-			}
-
-		if ((dev_priv->card_type >= NV_50) && (pllindex == 0)) {
-			NV_ERROR(dev, "Register 0x%08x not found in PLL "
-				 "limits table", pll_lim->reg);
-			return -ENOENT;
-		}
-
-		pll_rec = &bios->data[plloffs + recordlen * pllindex];
-
-		BIOSLOG(bios, "Loading PLL limits for reg 0x%08x\n",
-			pllindex ? pll_lim->reg : 0);
-
-		/*
-		 * Frequencies are stored in tables in MHz, kHz are more
-		 * useful, so we convert.
-		 */
-
-		/* What output frequencies can each VCO generate? */
-		pll_lim->vco1.minfreq = ROM16(pll_rec[4]) * 1000;
-		pll_lim->vco1.maxfreq = ROM16(pll_rec[6]) * 1000;
-		pll_lim->vco2.minfreq = ROM16(pll_rec[8]) * 1000;
-		pll_lim->vco2.maxfreq = ROM16(pll_rec[10]) * 1000;
-
-		/* What input frequencies they accept (past the m-divider)? */
-		pll_lim->vco1.min_inputfreq = ROM16(pll_rec[12]) * 1000;
-		pll_lim->vco2.min_inputfreq = ROM16(pll_rec[14]) * 1000;
-		pll_lim->vco1.max_inputfreq = ROM16(pll_rec[16]) * 1000;
-		pll_lim->vco2.max_inputfreq = ROM16(pll_rec[18]) * 1000;
-
-		/* What values are accepted as multiplier and divider? */
-		pll_lim->vco1.min_n = pll_rec[20];
-		pll_lim->vco1.max_n = pll_rec[21];
-		pll_lim->vco1.min_m = pll_rec[22];
-		pll_lim->vco1.max_m = pll_rec[23];
-		pll_lim->vco2.min_n = pll_rec[24];
-		pll_lim->vco2.max_n = pll_rec[25];
-		pll_lim->vco2.min_m = pll_rec[26];
-		pll_lim->vco2.max_m = pll_rec[27];
-
-		pll_lim->max_usable_log2p = pll_lim->max_log2p = pll_rec[29];
-		if (pll_lim->max_log2p > 0x7)
-			/* pll decoding in nv_hw.c assumes never > 7 */
-			NV_WARN(dev, "Max log2 P value greater than 7 (%d)\n",
-				pll_lim->max_log2p);
-		if (cv < 0x60)
-			pll_lim->max_usable_log2p = 0x6;
-		pll_lim->log2p_bias = pll_rec[30];
-
-		if (recordlen > 0x22)
-			pll_lim->refclk = ROM32(pll_rec[31]);
-
-		if (recordlen > 0x23 && pll_rec[35])
-			NV_WARN(dev,
-				"Bits set in PLL configuration byte (%x)\n",
-				pll_rec[35]);
-
-		/* C51 special not seen elsewhere */
-		if (cv == 0x51 && !pll_lim->refclk) {
-			uint32_t sel_clk = bios_rd32(bios, NV_PRAMDAC_SEL_CLK);
-
-			if ((pll_lim->reg == NV_PRAMDAC_VPLL_COEFF && sel_clk & 0x20) ||
-			    (pll_lim->reg == NV_RAMDAC_VPLL2 && sel_clk & 0x80)) {
-				if (bios_idxprt_rd(bios, NV_CIO_CRX__COLOR, NV_CIO_CRE_CHIP_ID_INDEX) < 0xa3)
-					pll_lim->refclk = 200000;
-				else
-					pll_lim->refclk = 25000;
-			}
-		}
-	} else if (pll_lim_ver == 0x30) { /* ver 0x30 */
-		uint8_t *entry = &bios->data[bios->pll_limit_tbl_ptr + headerlen];
-		uint8_t *record = NULL;
-		int i;
-
-		BIOSLOG(bios, "Loading PLL limits for register 0x%08x\n",
-			pll_lim->reg);
-
-		for (i = 0; i < entries; i++, entry += recordlen) {
-			if (ROM32(entry[3]) == pll_lim->reg) {
-				record = &bios->data[ROM16(entry[1])];
-				break;
-			}
-		}
-
-		if (!record) {
-			NV_ERROR(dev, "Register 0x%08x not found in PLL "
-				 "limits table", pll_lim->reg);
-			return -ENOENT;
-		}
-
-		pll_lim->vco1.minfreq = ROM16(record[0]) * 1000;
-		pll_lim->vco1.maxfreq = ROM16(record[2]) * 1000;
-		pll_lim->vco2.minfreq = ROM16(record[4]) * 1000;
-		pll_lim->vco2.maxfreq = ROM16(record[6]) * 1000;
-		pll_lim->vco1.min_inputfreq = ROM16(record[8]) * 1000;
-		pll_lim->vco2.min_inputfreq = ROM16(record[10]) * 1000;
-		pll_lim->vco1.max_inputfreq = ROM16(record[12]) * 1000;
-		pll_lim->vco2.max_inputfreq = ROM16(record[14]) * 1000;
-		pll_lim->vco1.min_n = record[16];
-		pll_lim->vco1.max_n = record[17];
-		pll_lim->vco1.min_m = record[18];
-		pll_lim->vco1.max_m = record[19];
-		pll_lim->vco2.min_n = record[20];
-		pll_lim->vco2.max_n = record[21];
-		pll_lim->vco2.min_m = record[22];
-		pll_lim->vco2.max_m = record[23];
-		pll_lim->max_usable_log2p = pll_lim->max_log2p = record[25];
-		pll_lim->log2p_bias = record[27];
-		pll_lim->refclk = ROM32(record[28]);
-	} else if (pll_lim_ver) { /* ver 0x40 */
-		uint8_t *entry = &bios->data[bios->pll_limit_tbl_ptr + headerlen];
-		uint8_t *record = NULL;
-		int i;
-
-		BIOSLOG(bios, "Loading PLL limits for register 0x%08x\n",
-			pll_lim->reg);
-
-		for (i = 0; i < entries; i++, entry += recordlen) {
-			if (ROM32(entry[3]) == pll_lim->reg) {
-				record = &bios->data[ROM16(entry[1])];
-				break;
-			}
-		}
-
-		if (!record) {
-			NV_ERROR(dev, "Register 0x%08x not found in PLL "
-				 "limits table", pll_lim->reg);
-			return -ENOENT;
-		}
-
-		pll_lim->vco1.minfreq = ROM16(record[0]) * 1000;
-		pll_lim->vco1.maxfreq = ROM16(record[2]) * 1000;
-		pll_lim->vco1.min_inputfreq = ROM16(record[4]) * 1000;
-		pll_lim->vco1.max_inputfreq = ROM16(record[6]) * 1000;
-		pll_lim->vco1.min_m = record[8];
-		pll_lim->vco1.max_m = record[9];
-		pll_lim->vco1.min_n = record[10];
-		pll_lim->vco1.max_n = record[11];
-		pll_lim->min_p = record[12];
-		pll_lim->max_p = record[13];
-		pll_lim->refclk = ROM16(entry[9]) * 1000;
-	}
-
-	/*
-	 * By now any valid limit table ought to have set a max frequency for
-	 * vco1, so if it's zero it's either a pre limit table bios, or one
-	 * with an empty limit table (seen on nv18)
-	 */
-	if (!pll_lim->vco1.maxfreq) {
-		pll_lim->vco1.minfreq = bios->fminvco;
-		pll_lim->vco1.maxfreq = bios->fmaxvco;
-		pll_lim->vco1.min_inputfreq = 0;
-		pll_lim->vco1.max_inputfreq = INT_MAX;
-		pll_lim->vco1.min_n = 0x1;
-		pll_lim->vco1.max_n = 0xff;
-		pll_lim->vco1.min_m = 0x1;
-		if (crystal_straps == 0) {
-			/* nv05 does this, nv11 doesn't, nv10 unknown */
-			if (cv < 0x11)
-				pll_lim->vco1.min_m = 0x7;
-			pll_lim->vco1.max_m = 0xd;
-		} else {
-			if (cv < 0x11)
-				pll_lim->vco1.min_m = 0x8;
-			pll_lim->vco1.max_m = 0xe;
-		}
-		if (cv < 0x17 || cv == 0x1a || cv == 0x20)
-			pll_lim->max_log2p = 4;
-		else
-			pll_lim->max_log2p = 5;
-		pll_lim->max_usable_log2p = pll_lim->max_log2p;
-	}
-
-	if (!pll_lim->refclk)
-		switch (crystal_straps) {
-		case 0:
-			pll_lim->refclk = 13500;
-			break;
-		case (1 << 6):
-			pll_lim->refclk = 14318;
-			break;
-		case (1 << 22):
-			pll_lim->refclk = 27000;
-			break;
-		case (1 << 22 | 1 << 6):
-			pll_lim->refclk = 25000;
-			break;
-		}
-
-	NV_DEBUG(dev, "pll.vco1.minfreq: %d\n", pll_lim->vco1.minfreq);
-	NV_DEBUG(dev, "pll.vco1.maxfreq: %d\n", pll_lim->vco1.maxfreq);
-	NV_DEBUG(dev, "pll.vco1.min_inputfreq: %d\n", pll_lim->vco1.min_inputfreq);
-	NV_DEBUG(dev, "pll.vco1.max_inputfreq: %d\n", pll_lim->vco1.max_inputfreq);
-	NV_DEBUG(dev, "pll.vco1.min_n: %d\n", pll_lim->vco1.min_n);
-	NV_DEBUG(dev, "pll.vco1.max_n: %d\n", pll_lim->vco1.max_n);
-	NV_DEBUG(dev, "pll.vco1.min_m: %d\n", pll_lim->vco1.min_m);
-	NV_DEBUG(dev, "pll.vco1.max_m: %d\n", pll_lim->vco1.max_m);
-	if (pll_lim->vco2.maxfreq) {
-		NV_DEBUG(dev, "pll.vco2.minfreq: %d\n", pll_lim->vco2.minfreq);
-		NV_DEBUG(dev, "pll.vco2.maxfreq: %d\n", pll_lim->vco2.maxfreq);
-		NV_DEBUG(dev, "pll.vco2.min_inputfreq: %d\n", pll_lim->vco2.min_inputfreq);
-		NV_DEBUG(dev, "pll.vco2.max_inputfreq: %d\n", pll_lim->vco2.max_inputfreq);
-		NV_DEBUG(dev, "pll.vco2.min_n: %d\n", pll_lim->vco2.min_n);
-		NV_DEBUG(dev, "pll.vco2.max_n: %d\n", pll_lim->vco2.max_n);
-		NV_DEBUG(dev, "pll.vco2.min_m: %d\n", pll_lim->vco2.min_m);
-		NV_DEBUG(dev, "pll.vco2.max_m: %d\n", pll_lim->vco2.max_m);
-	}
-	if (!pll_lim->max_p) {
-		NV_DEBUG(dev, "pll.max_log2p: %d\n", pll_lim->max_log2p);
-		NV_DEBUG(dev, "pll.log2p_bias: %d\n", pll_lim->log2p_bias);
-	} else {
-		NV_DEBUG(dev, "pll.min_p: %d\n", pll_lim->min_p);
-		NV_DEBUG(dev, "pll.max_p: %d\n", pll_lim->max_p);
-	}
-	NV_DEBUG(dev, "pll.refclk: %d\n", pll_lim->refclk);
-
-	return 0;
-}
-
 static void parse_bios_version(struct drm_device *dev, struct nvbios *bios, uint16_t offset)
 {
 	/*
@@ -4996,10 +886,11 @@ static void parse_bios_version(struct drm_device *dev, struct nvbios *bios, uint
 	 * offset + 2  (8 bits): Chip version
 	 * offset + 3  (8 bits): Major version
 	 */
+	struct nouveau_drm *drm = nouveau_drm(dev);
 
 	bios->major_version = bios->data[offset + 3];
 	bios->chip_version = bios->data[offset + 2];
-	NV_TRACE(dev, "Bios version %02x.%02x.%02x.%02x\n",
+	NV_INFO(drm, "Bios version %02x.%02x.%02x.%02x\n",
 		 bios->data[offset + 3], bios->data[offset + 2],
 		 bios->data[offset + 1], bios->data[offset]);
 }
@@ -5035,25 +926,26 @@ static int parse_bit_A_tbl_entry(struct drm_device *dev, struct nvbios *bios, st
 	 * offset + 0 (16 bits): loadval table pointer
 	 */
 
+	struct nouveau_drm *drm = nouveau_drm(dev);
 	uint16_t load_table_ptr;
 	uint8_t version, headerlen, entrylen, num_entries;
 
 	if (bitentry->length != 3) {
-		NV_ERROR(dev, "Do not understand BIT A table\n");
+		NV_ERROR(drm, "Do not understand BIT A table\n");
 		return -EINVAL;
 	}
 
 	load_table_ptr = ROM16(bios->data[bitentry->offset]);
 
 	if (load_table_ptr == 0x0) {
-		NV_DEBUG(dev, "Pointer to BIT loadval table invalid\n");
+		NV_DEBUG(drm, "Pointer to BIT loadval table invalid\n");
 		return -EINVAL;
 	}
 
 	version = bios->data[load_table_ptr];
 
 	if (version != 0x10) {
-		NV_ERROR(dev, "BIT loadval table version %d.%d not supported\n",
+		NV_ERROR(drm, "BIT loadval table version %d.%d not supported\n",
 			 version >> 4, version & 0xF);
 		return -ENOSYS;
 	}
@@ -5063,7 +955,7 @@ static int parse_bit_A_tbl_entry(struct drm_device *dev, struct nvbios *bios, st
 	num_entries = bios->data[load_table_ptr + 3];
 
 	if (headerlen != 4 || entrylen != 4 || num_entries != 2) {
-		NV_ERROR(dev, "Do not understand BIT loadval table\n");
+		NV_ERROR(drm, "Do not understand BIT loadval table\n");
 		return -EINVAL;
 	}
 
@@ -5080,9 +972,10 @@ static int parse_bit_C_tbl_entry(struct drm_device *dev, struct nvbios *bios, st
 	 *
 	 * There's more in here, but that's unknown.
 	 */
+	struct nouveau_drm *drm = nouveau_drm(dev);
 
 	if (bitentry->length < 10) {
-		NV_ERROR(dev, "Do not understand BIT C table\n");
+		NV_ERROR(drm, "Do not understand BIT C table\n");
 		return -EINVAL;
 	}
 
@@ -5101,9 +994,10 @@ static int parse_bit_display_tbl_entry(struct drm_device *dev, struct nvbios *bi
 	 * records beginning with a freq.
 	 * offset + 2  (16 bits): mode table pointer
 	 */
+	struct nouveau_drm *drm = nouveau_drm(dev);
 
 	if (bitentry->length != 4) {
-		NV_ERROR(dev, "Do not understand BIT display table\n");
+		NV_ERROR(drm, "Do not understand BIT display table\n");
 		return -EINVAL;
 	}
 
@@ -5119,9 +1013,10 @@ static int parse_bit_init_tbl_entry(struct drm_device *dev, struct nvbios *bios,
 	 *
 	 * See parse_script_table_pointers for layout
 	 */
+	struct nouveau_drm *drm = nouveau_drm(dev);
 
 	if (bitentry->length < 14) {
-		NV_ERROR(dev, "Do not understand init table\n");
+		NV_ERROR(drm, "Do not understand init table\n");
 		return -EINVAL;
 	}
 
@@ -5148,11 +1043,12 @@ static int parse_bit_i_tbl_entry(struct drm_device *dev, struct nvbios *bios, st
 	 * There's other things in the table, purpose unknown
 	 */
 
+	struct nouveau_drm *drm = nouveau_drm(dev);
 	uint16_t daccmpoffset;
 	uint8_t dacver, dacheaderlen;
 
 	if (bitentry->length < 6) {
-		NV_ERROR(dev, "BIT i table too short for needed information\n");
+		NV_ERROR(drm, "BIT i table too short for needed information\n");
 		return -EINVAL;
 	}
 
@@ -5166,7 +1062,7 @@ static int parse_bit_i_tbl_entry(struct drm_device *dev, struct nvbios *bios, st
 	bios->is_mobile = bios->feature_byte & FEATURE_MOBILE;
 
 	if (bitentry->length < 15) {
-		NV_WARN(dev, "BIT i table not long enough for DAC load "
+		NV_WARN(drm, "BIT i table not long enough for DAC load "
 			       "detection comparison table\n");
 		return -EINVAL;
 	}
@@ -5187,7 +1083,7 @@ static int parse_bit_i_tbl_entry(struct drm_device *dev, struct nvbios *bios, st
 	dacheaderlen = bios->data[daccmpoffset + 1];
 
 	if (dacver != 0x00 && dacver != 0x10) {
-		NV_WARN(dev, "DAC load detection comparison table version "
+		NV_WARN(drm, "DAC load detection comparison table version "
 			       "%d.%d not known\n", dacver >> 4, dacver & 0xf);
 		return -ENOSYS;
 	}
@@ -5207,8 +1103,10 @@ static int parse_bit_lvds_tbl_entry(struct drm_device *dev, struct nvbios *bios,
 	 * offset + 0  (16 bits): LVDS strap xlate table pointer
 	 */
 
+	struct nouveau_drm *drm = nouveau_drm(dev);
+
 	if (bitentry->length != 2) {
-		NV_ERROR(dev, "Do not understand BIT LVDS table\n");
+		NV_ERROR(drm, "Do not understand BIT LVDS table\n");
 		return -EINVAL;
 	}
 
@@ -5278,20 +1176,21 @@ static int parse_bit_tmds_tbl_entry(struct drm_device *dev, struct nvbios *bios,
 	 * "or" from the DCB.
 	 */
 
+	struct nouveau_drm *drm = nouveau_drm(dev);
 	uint16_t tmdstableptr, script1, script2;
 
 	if (bitentry->length != 2) {
-		NV_ERROR(dev, "Do not understand BIT TMDS table\n");
+		NV_ERROR(drm, "Do not understand BIT TMDS table\n");
 		return -EINVAL;
 	}
 
 	tmdstableptr = ROM16(bios->data[bitentry->offset]);
 	if (!tmdstableptr) {
-		NV_ERROR(dev, "Pointer to TMDS table invalid\n");
+		NV_ERROR(drm, "Pointer to TMDS table invalid\n");
 		return -EINVAL;
 	}
 
-	NV_INFO(dev, "TMDS table version %d.%d\n",
+	NV_INFO(drm, "TMDS table version %d.%d\n",
 		bios->data[tmdstableptr] >> 4, bios->data[tmdstableptr] & 0xf);
 
 	/* nv50+ has v2.0, but we don't parse it atm */
@@ -5305,7 +1204,7 @@ static int parse_bit_tmds_tbl_entry(struct drm_device *dev, struct nvbios *bios,
 	script1 = ROM16(bios->data[tmdstableptr + 7]);
 	script2 = ROM16(bios->data[tmdstableptr + 9]);
 	if (bios->data[script1] != 'q' || bios->data[script2] != 'q')
-		NV_WARN(dev, "TMDS table script pointers not stubbed\n");
+		NV_WARN(drm, "TMDS table script pointers not stubbed\n");
 
 	bios->tmds.output0_script_ptr = ROM16(bios->data[tmdstableptr + 11]);
 	bios->tmds.output1_script_ptr = ROM16(bios->data[tmdstableptr + 13]);
@@ -5325,10 +1224,11 @@ parse_bit_U_tbl_entry(struct drm_device *dev, struct nvbios *bios,
 	 * offset + 0  (16 bits): output script table pointer
 	 */
 
+	struct nouveau_drm *drm = nouveau_drm(dev);
 	uint16_t outputscripttableptr;
 
 	if (bitentry->length != 3) {
-		NV_ERROR(dev, "Do not understand BIT U table\n");
+		NV_ERROR(drm, "Do not understand BIT U table\n");
 		return -EINVAL;
 	}
 
@@ -5347,8 +1247,8 @@ struct bit_table {
 int
 bit_table(struct drm_device *dev, u8 id, struct bit_entry *bit)
 {
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct nvbios *bios = &dev_priv->vbios;
+	struct nouveau_drm *drm = nouveau_drm(dev);
+	struct nvbios *bios = &drm->vbios;
 	u8 entries, *entry;
 
 	if (bios->type != NVBIOS_BIT)
@@ -5377,12 +1277,13 @@ parse_bit_table(struct nvbios *bios, const uint16_t bitoffset,
 		struct bit_table *table)
 {
 	struct drm_device *dev = bios->dev;
+	struct nouveau_drm *drm = nouveau_drm(dev);
 	struct bit_entry bitentry;
 
 	if (bit_table(dev, table->id, &bitentry) == 0)
 		return table->parse_fn(dev, bios, &bitentry);
 
-	NV_INFO(dev, "BIT table '%c' not found\n", table->id);
+	NV_INFO(drm, "BIT table '%c' not found\n", table->id);
 	return -ENOSYS;
 }
 
@@ -5462,6 +1363,7 @@ static int parse_bmp_structure(struct drm_device *dev, struct nvbios *bios, unsi
 	 * offset + 156: minimum pixel clock for LVDS dual link
 	 */
 
+	struct nouveau_drm *drm = nouveau_drm(dev);
 	uint8_t *bmp = &bios->data[offset], bmp_version_major, bmp_version_minor;
 	uint16_t bmplength;
 	uint16_t legacy_scripts_offset, legacy_i2c_offset;
@@ -5475,7 +1377,7 @@ static int parse_bmp_structure(struct drm_device *dev, struct nvbios *bios, unsi
 	bmp_version_major = bmp[5];
 	bmp_version_minor = bmp[6];
 
-	NV_TRACE(dev, "BMP version %d.%d\n",
+	NV_INFO(drm, "BMP version %d.%d\n",
 		 bmp_version_major, bmp_version_minor);
 
 	/*
@@ -5491,7 +1393,7 @@ static int parse_bmp_structure(struct drm_device *dev, struct nvbios *bios, unsi
 	 * happened instead.
 	 */
 	if ((bmp_version_major < 5 && bmp_version_minor != 1) || bmp_version_major > 5) {
-		NV_ERROR(dev, "You have an unsupported BMP version. "
+		NV_ERROR(drm, "You have an unsupported BMP version. "
 				"Please send in your bios\n");
 		return -ENOSYS;
 	}
@@ -5540,7 +1442,7 @@ static int parse_bmp_structure(struct drm_device *dev, struct nvbios *bios, unsi
 
 	/* checksum */
 	if (nv_cksum(bmp, 8)) {
-		NV_ERROR(dev, "Bad BMP checksum\n");
+		NV_ERROR(drm, "Bad BMP checksum\n");
 		return -EINVAL;
 	}
 
@@ -5625,20 +1527,20 @@ static uint16_t findstr(uint8_t *data, int n, const uint8_t *str, int len)
 }
 
 void *
-dcb_table(struct drm_device *dev)
+olddcb_table(struct drm_device *dev)
 {
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_drm *drm = nouveau_drm(dev);
 	u8 *dcb = NULL;
 
-	if (dev_priv->card_type > NV_04)
-		dcb = ROMPTR(dev, dev_priv->vbios.data[0x36]);
+	if (nv_device(drm->device)->card_type > NV_04)
+		dcb = ROMPTR(dev, drm->vbios.data[0x36]);
 	if (!dcb) {
-		NV_WARNONCE(dev, "No DCB data found in VBIOS\n");
+		NV_WARN(drm, "No DCB data found in VBIOS\n");
 		return NULL;
 	}
 
 	if (dcb[0] >= 0x41) {
-		NV_WARNONCE(dev, "DCB version 0x%02x unknown\n", dcb[0]);
+		NV_WARN(drm, "DCB version 0x%02x unknown\n", dcb[0]);
 		return NULL;
 	} else
 	if (dcb[0] >= 0x30) {
@@ -5670,18 +1572,18 @@ dcb_table(struct drm_device *dev)
 		 *
 		 * v1.1 (NV5+, maybe some NV4) is entirely unhelpful
 		 */
-		NV_WARNONCE(dev, "No useful DCB data in VBIOS\n");
+		NV_WARN(drm, "No useful DCB data in VBIOS\n");
 		return NULL;
 	}
 
-	NV_WARNONCE(dev, "DCB header validation failed\n");
+	NV_WARN(drm, "DCB header validation failed\n");
 	return NULL;
 }
 
 void *
-dcb_outp(struct drm_device *dev, u8 idx)
+olddcb_outp(struct drm_device *dev, u8 idx)
 {
-	u8 *dcb = dcb_table(dev);
+	u8 *dcb = olddcb_table(dev);
 	if (dcb && dcb[0] >= 0x30) {
 		if (idx < dcb[2])
 			return dcb + dcb[1] + (idx * dcb[3]);
@@ -5703,20 +1605,20 @@ dcb_outp(struct drm_device *dev, u8 idx)
 }
 
 int
-dcb_outp_foreach(struct drm_device *dev, void *data,
+olddcb_outp_foreach(struct drm_device *dev, void *data,
 		 int (*exec)(struct drm_device *, void *, int idx, u8 *outp))
 {
 	int ret, idx = -1;
 	u8 *outp = NULL;
-	while ((outp = dcb_outp(dev, ++idx))) {
+	while ((outp = olddcb_outp(dev, ++idx))) {
 		if (ROM32(outp[0]) == 0x00000000)
 			break; /* seen on an NV11 with DCB v1.5 */
 		if (ROM32(outp[0]) == 0xffffffff)
 			break; /* seen on an NV17 with DCB v2.0 */
 
-		if ((outp[0] & 0x0f) == OUTPUT_UNUSED)
+		if ((outp[0] & 0x0f) == DCB_OUTPUT_UNUSED)
 			continue;
-		if ((outp[0] & 0x0f) == OUTPUT_EOL)
+		if ((outp[0] & 0x0f) == DCB_OUTPUT_EOL)
 			break;
 
 		ret = exec(dev, data, idx, outp);
@@ -5728,9 +1630,9 @@ dcb_outp_foreach(struct drm_device *dev, void *data,
 }
 
 u8 *
-dcb_conntab(struct drm_device *dev)
+olddcb_conntab(struct drm_device *dev)
 {
-	u8 *dcb = dcb_table(dev);
+	u8 *dcb = olddcb_table(dev);
 	if (dcb && dcb[0] >= 0x30 && dcb[1] >= 0x16) {
 		u8 *conntab = ROMPTR(dev, dcb[0x14]);
 		if (conntab && conntab[0] >= 0x30 && conntab[0] <= 0x40)
@@ -5740,19 +1642,19 @@ dcb_conntab(struct drm_device *dev)
 }
 
 u8 *
-dcb_conn(struct drm_device *dev, u8 idx)
+olddcb_conn(struct drm_device *dev, u8 idx)
 {
-	u8 *conntab = dcb_conntab(dev);
+	u8 *conntab = olddcb_conntab(dev);
 	if (conntab && idx < conntab[2])
 		return conntab + conntab[1] + (idx * conntab[3]);
 	return NULL;
 }
 
-static struct dcb_entry *new_dcb_entry(struct dcb_table *dcb)
+static struct dcb_output *new_dcb_entry(struct dcb_table *dcb)
 {
-	struct dcb_entry *entry = &dcb->entry[dcb->entries];
+	struct dcb_output *entry = &dcb->entry[dcb->entries];
 
-	memset(entry, 0, sizeof(struct dcb_entry));
+	memset(entry, 0, sizeof(struct dcb_output));
 	entry->index = dcb->entries++;
 
 	return entry;
@@ -5761,20 +1663,22 @@ static struct dcb_entry *new_dcb_entry(struct dcb_table *dcb)
 static void fabricate_dcb_output(struct dcb_table *dcb, int type, int i2c,
 				 int heads, int or)
 {
-	struct dcb_entry *entry = new_dcb_entry(dcb);
+	struct dcb_output *entry = new_dcb_entry(dcb);
 
 	entry->type = type;
 	entry->i2c_index = i2c;
 	entry->heads = heads;
-	if (type != OUTPUT_ANALOG)
+	if (type != DCB_OUTPUT_ANALOG)
 		entry->location = !DCB_LOC_ON_CHIP; /* ie OFF CHIP */
 	entry->or = or;
 }
 
 static bool
 parse_dcb20_entry(struct drm_device *dev, struct dcb_table *dcb,
-		  uint32_t conn, uint32_t conf, struct dcb_entry *entry)
+		  uint32_t conn, uint32_t conf, struct dcb_output *entry)
 {
+	struct nouveau_drm *drm = nouveau_drm(dev);
+
 	entry->type = conn & 0xf;
 	entry->i2c_index = (conn >> 4) & 0xf;
 	entry->heads = (conn >> 8) & 0xf;
@@ -5784,7 +1688,7 @@ parse_dcb20_entry(struct drm_device *dev, struct dcb_table *dcb,
 	entry->or = (conn >> 24) & 0xf;
 
 	switch (entry->type) {
-	case OUTPUT_ANALOG:
+	case DCB_OUTPUT_ANALOG:
 		/*
 		 * Although the rest of a CRT conf dword is usually
 		 * zeros, mac biosen have stuff there so we must mask
@@ -5793,7 +1697,7 @@ parse_dcb20_entry(struct drm_device *dev, struct dcb_table *dcb,
 					 (conf & 0xffff) * 10 :
 					 (conf & 0xff) * 10000;
 		break;
-	case OUTPUT_LVDS:
+	case DCB_OUTPUT_LVDS:
 		{
 		uint32_t mask;
 		if (conf & 0x1)
@@ -5828,12 +1732,12 @@ parse_dcb20_entry(struct drm_device *dev, struct dcb_table *dcb,
 			if (dcb->version >= 0x40)
 				break;
 
-			NV_ERROR(dev, "Unknown LVDS configuration bits, "
+			NV_ERROR(drm, "Unknown LVDS configuration bits, "
 				      "please report\n");
 		}
 		break;
 		}
-	case OUTPUT_TV:
+	case DCB_OUTPUT_TV:
 	{
 		if (dcb->version >= 0x30)
 			entry->tvconf.has_component_output = conf & (0x8 << 4);
@@ -5842,7 +1746,7 @@ parse_dcb20_entry(struct drm_device *dev, struct dcb_table *dcb,
 
 		break;
 	}
-	case OUTPUT_DP:
+	case DCB_OUTPUT_DP:
 		entry->dpconf.sor.link = (conf & 0x00000030) >> 4;
 		switch ((conf & 0x00e00000) >> 21) {
 		case 0:
@@ -5864,7 +1768,7 @@ parse_dcb20_entry(struct drm_device *dev, struct dcb_table *dcb,
 			break;
 		}
 		break;
-	case OUTPUT_TMDS:
+	case DCB_OUTPUT_TMDS:
 		if (dcb->version >= 0x40)
 			entry->tmdsconf.sor.link = (conf & 0x00000030) >> 4;
 		else if (dcb->version >= 0x30)
@@ -5873,7 +1777,7 @@ parse_dcb20_entry(struct drm_device *dev, struct dcb_table *dcb,
 			entry->tmdsconf.slave_addr = (conf & 0x00000070) >> 4;
 
 		break;
-	case OUTPUT_EOL:
+	case DCB_OUTPUT_EOL:
 		/* weird g80 mobile type that "nv" treats as a terminator */
 		dcb->entries--;
 		return false;
@@ -5900,27 +1804,29 @@ parse_dcb20_entry(struct drm_device *dev, struct dcb_table *dcb,
 
 static bool
 parse_dcb15_entry(struct drm_device *dev, struct dcb_table *dcb,
-		  uint32_t conn, uint32_t conf, struct dcb_entry *entry)
+		  uint32_t conn, uint32_t conf, struct dcb_output *entry)
 {
+	struct nouveau_drm *drm = nouveau_drm(dev);
+
 	switch (conn & 0x0000000f) {
 	case 0:
-		entry->type = OUTPUT_ANALOG;
+		entry->type = DCB_OUTPUT_ANALOG;
 		break;
 	case 1:
-		entry->type = OUTPUT_TV;
+		entry->type = DCB_OUTPUT_TV;
 		break;
 	case 2:
 	case 4:
 		if (conn & 0x10)
-			entry->type = OUTPUT_LVDS;
+			entry->type = DCB_OUTPUT_LVDS;
 		else
-			entry->type = OUTPUT_TMDS;
+			entry->type = DCB_OUTPUT_TMDS;
 		break;
 	case 3:
-		entry->type = OUTPUT_LVDS;
+		entry->type = DCB_OUTPUT_LVDS;
 		break;
 	default:
-		NV_ERROR(dev, "Unknown DCB type %d\n", conn & 0x0000000f);
+		NV_ERROR(drm, "Unknown DCB type %d\n", conn & 0x0000000f);
 		return false;
 	}
 
@@ -5932,13 +1838,13 @@ parse_dcb15_entry(struct drm_device *dev, struct dcb_table *dcb,
 	entry->duallink_possible = false;
 
 	switch (entry->type) {
-	case OUTPUT_ANALOG:
+	case DCB_OUTPUT_ANALOG:
 		entry->crtconf.maxfreq = (conf & 0xffff) * 10;
 		break;
-	case OUTPUT_TV:
+	case DCB_OUTPUT_TV:
 		entry->tvconf.has_component_output = false;
 		break;
-	case OUTPUT_LVDS:
+	case DCB_OUTPUT_LVDS:
 		if ((conn & 0x00003f00) >> 8 != 0x10)
 			entry->lvdsconf.use_straps_for_mode = true;
 		entry->lvdsconf.use_power_scripts = true;
@@ -5959,14 +1865,15 @@ void merge_like_dcb_entries(struct drm_device *dev, struct dcb_table *dcb)
 	 * more options
 	 */
 
+	struct nouveau_drm *drm = nouveau_drm(dev);
 	int i, newentries = 0;
 
 	for (i = 0; i < dcb->entries; i++) {
-		struct dcb_entry *ient = &dcb->entry[i];
+		struct dcb_output *ient = &dcb->entry[i];
 		int j;
 
 		for (j = i + 1; j < dcb->entries; j++) {
-			struct dcb_entry *jent = &dcb->entry[j];
+			struct dcb_output *jent = &dcb->entry[j];
 
 			if (jent->type == 100) /* already merged entry */
 				continue;
@@ -5976,7 +1883,7 @@ void merge_like_dcb_entries(struct drm_device *dev, struct dcb_table *dcb)
 			    jent->type == ient->type &&
 			    jent->location == ient->location &&
 			    jent->or == ient->or) {
-				NV_TRACE(dev, "Merging DCB entries %d and %d\n",
+				NV_INFO(drm, "Merging DCB entries %d and %d\n",
 					 i, j);
 				ient->heads |= jent->heads;
 				jent->type = 100; /* dummy value */
@@ -6002,8 +1909,8 @@ void merge_like_dcb_entries(struct drm_device *dev, struct dcb_table *dcb)
 static bool
 apply_dcb_encoder_quirks(struct drm_device *dev, int idx, u32 *conn, u32 *conf)
 {
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct dcb_table *dcb = &dev_priv->vbios.dcb;
+	struct nouveau_drm *drm = nouveau_drm(dev);
+	struct dcb_table *dcb = &drm->vbios.dcb;
 
 	/* Dell Precision M6300
 	 *   DCB entry 2: 02025312 00000010
@@ -6029,7 +1936,7 @@ apply_dcb_encoder_quirks(struct drm_device *dev, int idx, u32 *conn, u32 *conf)
 	 */
 	if (nv_match_device(dev, 0x0201, 0x1462, 0x8851)) {
 		if (*conn == 0xf2005014 && *conf == 0xffffffff) {
-			fabricate_dcb_output(dcb, OUTPUT_TMDS, 1, 1, 1);
+			fabricate_dcb_output(dcb, DCB_OUTPUT_TMDS, 1, 1, 1);
 			return false;
 		}
 	}
@@ -6115,24 +2022,24 @@ fabricate_dcb_encoder_table(struct drm_device *dev, struct nvbios *bios)
 #ifdef __powerpc__
 	/* Apple iMac G4 NV17 */
 	if (of_machine_is_compatible("PowerMac4,5")) {
-		fabricate_dcb_output(dcb, OUTPUT_TMDS, 0, all_heads, 1);
-		fabricate_dcb_output(dcb, OUTPUT_ANALOG, 1, all_heads, 2);
+		fabricate_dcb_output(dcb, DCB_OUTPUT_TMDS, 0, all_heads, 1);
+		fabricate_dcb_output(dcb, DCB_OUTPUT_ANALOG, 1, all_heads, 2);
 		return;
 	}
 #endif
 
 	/* Make up some sane defaults */
-	fabricate_dcb_output(dcb, OUTPUT_ANALOG,
+	fabricate_dcb_output(dcb, DCB_OUTPUT_ANALOG,
 			     bios->legacy.i2c_indices.crt, 1, 1);
 
 	if (nv04_tv_identify(dev, bios->legacy.i2c_indices.tv) >= 0)
-		fabricate_dcb_output(dcb, OUTPUT_TV,
+		fabricate_dcb_output(dcb, DCB_OUTPUT_TV,
 				     bios->legacy.i2c_indices.tv,
 				     all_heads, 0);
 
 	else if (bios->tmds.output0_script_ptr ||
 		 bios->tmds.output1_script_ptr)
-		fabricate_dcb_output(dcb, OUTPUT_TMDS,
+		fabricate_dcb_output(dcb, DCB_OUTPUT_TMDS,
 				     bios->legacy.i2c_indices.panel,
 				     all_heads, 1);
 }
@@ -6140,16 +2047,16 @@ fabricate_dcb_encoder_table(struct drm_device *dev, struct nvbios *bios)
 static int
 parse_dcb_entry(struct drm_device *dev, void *data, int idx, u8 *outp)
 {
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct dcb_table *dcb = &dev_priv->vbios.dcb;
+	struct nouveau_drm *drm = nouveau_drm(dev);
+	struct dcb_table *dcb = &drm->vbios.dcb;
 	u32 conf = (dcb->version >= 0x20) ? ROM32(outp[4]) : ROM32(outp[6]);
 	u32 conn = ROM32(outp[0]);
 	bool ret;
 
 	if (apply_dcb_encoder_quirks(dev, idx, &conn, &conf)) {
-		struct dcb_entry *entry = new_dcb_entry(dcb);
+		struct dcb_output *entry = new_dcb_entry(dcb);
 
-		NV_TRACEWARN(dev, "DCB outp %02d: %08x %08x\n", idx, conn, conf);
+		NV_INFO(drm, "DCB outp %02d: %08x %08x\n", idx, conn, conf);
 
 		if (dcb->version >= 0x20)
 			ret = parse_dcb20_entry(dev, dcb, conn, conf, entry);
@@ -6162,7 +2069,7 @@ parse_dcb_entry(struct drm_device *dev, void *data, int idx, u8 *outp)
 		 * are cards with bogus values (nv31m in bug 23212),
 		 * and it's otherwise useless.
 		 */
-		if (entry->type == OUTPUT_TV &&
+		if (entry->type == DCB_OUTPUT_TV &&
 		    entry->location == DCB_LOC_ON_CHIP)
 			entry->i2c_index = 0x0f;
 	}
@@ -6210,7 +2117,7 @@ dcb_fake_connectors(struct nvbios *bios)
 	 * table - just in case it has random, rather than stub, entries.
 	 */
 	if (i > 1) {
-		u8 *conntab = dcb_conntab(bios->dev);
+		u8 *conntab = olddcb_conntab(bios->dev);
 		if (conntab)
 			conntab[0] = 0x00;
 	}
@@ -6219,11 +2126,12 @@ dcb_fake_connectors(struct nvbios *bios)
 static int
 parse_dcb_table(struct drm_device *dev, struct nvbios *bios)
 {
+	struct nouveau_drm *drm = nouveau_drm(dev);
 	struct dcb_table *dcb = &bios->dcb;
 	u8 *dcbt, *conn;
 	int idx;
 
-	dcbt = dcb_table(dev);
+	dcbt = olddcb_table(dev);
 	if (!dcbt) {
 		/* handle pre-DCB boards */
 		if (bios->type == NVBIOS_BMP) {
@@ -6234,10 +2142,10 @@ parse_dcb_table(struct drm_device *dev, struct nvbios *bios)
 		return -EINVAL;
 	}
 
-	NV_TRACE(dev, "DCB version %d.%d\n", dcbt[0] >> 4, dcbt[0] & 0xf);
+	NV_INFO(drm, "DCB version %d.%d\n", dcbt[0] >> 4, dcbt[0] & 0xf);
 
 	dcb->version = dcbt[0];
-	dcb_outp_foreach(dev, NULL, parse_dcb_entry);
+	olddcb_outp_foreach(dev, NULL, parse_dcb_entry);
 
 	/*
 	 * apart for v2.1+ not being known for requiring merging, this
@@ -6251,10 +2159,10 @@ parse_dcb_table(struct drm_device *dev, struct nvbios *bios)
 
 	/* dump connector table entries to log, if any exist */
 	idx = -1;
-	while ((conn = dcb_conn(dev, ++idx))) {
+	while ((conn = olddcb_conn(dev, ++idx))) {
 		if (conn[0] != 0xff) {
-			NV_TRACE(dev, "DCB conn %02d: ", idx);
-			if (dcb_conntab(dev)[3] < 4)
+			NV_INFO(drm, "DCB conn %02d: ", idx);
+			if (olddcb_conntab(dev)[3] < 4)
 				printk("%04x\n", ROM16(conn[0]));
 			else
 				printk("%08x\n", ROM32(conn[0]));
@@ -6275,12 +2183,14 @@ static int load_nv17_hwsq_ucode_entry(struct drm_device *dev, struct nvbios *bio
 	 * starting at reg 0x00001400
 	 */
 
+	struct nouveau_drm *drm = nouveau_drm(dev);
+	struct nouveau_device *device = nv_device(drm->device);
 	uint8_t bytes_to_write;
 	uint16_t hwsq_entry_offset;
 	int i;
 
 	if (bios->data[hwsq_offset] <= entry) {
-		NV_ERROR(dev, "Too few entries in HW sequencer table for "
+		NV_ERROR(drm, "Too few entries in HW sequencer table for "
 				"requested entry\n");
 		return -ENOENT;
 	}
@@ -6288,24 +2198,24 @@ static int load_nv17_hwsq_ucode_entry(struct drm_device *dev, struct nvbios *bio
 	bytes_to_write = bios->data[hwsq_offset + 1];
 
 	if (bytes_to_write != 36) {
-		NV_ERROR(dev, "Unknown HW sequencer entry size\n");
+		NV_ERROR(drm, "Unknown HW sequencer entry size\n");
 		return -EINVAL;
 	}
 
-	NV_TRACE(dev, "Loading NV17 power sequencing microcode\n");
+	NV_INFO(drm, "Loading NV17 power sequencing microcode\n");
 
 	hwsq_entry_offset = hwsq_offset + 2 + entry * bytes_to_write;
 
 	/* set sequencer control */
-	bios_wr32(bios, 0x00001304, ROM32(bios->data[hwsq_entry_offset]));
+	nv_wr32(device, 0x00001304, ROM32(bios->data[hwsq_entry_offset]));
 	bytes_to_write -= 4;
 
 	/* write ucode */
 	for (i = 0; i < bytes_to_write; i += 4)
-		bios_wr32(bios, 0x00001400 + i, ROM32(bios->data[hwsq_entry_offset + i + 4]));
+		nv_wr32(device, 0x00001400 + i, ROM32(bios->data[hwsq_entry_offset + i + 4]));
 
 	/* twiddle NV_PBUS_DEBUG_4 */
-	bios_wr32(bios, NV_PBUS_DEBUG_4, bios_rd32(bios, NV_PBUS_DEBUG_4) | 0x18);
+	nv_wr32(device, NV_PBUS_DEBUG_4, nv_rd32(device, NV_PBUS_DEBUG_4) | 0x18);
 
 	return 0;
 }
@@ -6336,8 +2246,8 @@ static int load_nv17_hw_sequencer_ucode(struct drm_device *dev,
 
 uint8_t *nouveau_bios_embedded_edid(struct drm_device *dev)
 {
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct nvbios *bios = &dev_priv->vbios;
+	struct nouveau_drm *drm = nouveau_drm(dev);
+	struct nvbios *bios = &drm->vbios;
 	const uint8_t edid_sig[] = {
 			0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00 };
 	uint16_t offset = 0;
@@ -6360,53 +2270,29 @@ uint8_t *nouveau_bios_embedded_edid(struct drm_device *dev)
 		offset++;
 	}
 
-	NV_TRACE(dev, "Found EDID in BIOS\n");
+	NV_INFO(drm, "Found EDID in BIOS\n");
 
 	return bios->fp.edid = &bios->data[offset];
 }
 
-void
-nouveau_bios_run_init_table(struct drm_device *dev, uint16_t table,
-			    struct dcb_entry *dcbent, int crtc)
-{
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct nvbios *bios = &dev_priv->vbios;
-	struct init_exec iexec = { true, false };
-
-	spin_lock_bh(&bios->lock);
-	bios->display.output = dcbent;
-	bios->display.crtc = crtc;
-	parse_init_table(bios, table, &iexec);
-	bios->display.output = NULL;
-	spin_unlock_bh(&bios->lock);
-}
-
-void
-nouveau_bios_init_exec(struct drm_device *dev, uint16_t table)
-{
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct nvbios *bios = &dev_priv->vbios;
-	struct init_exec iexec = { true, false };
-
-	parse_init_table(bios, table, &iexec);
-}
-
 static bool NVInitVBIOS(struct drm_device *dev)
 {
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct nvbios *bios = &dev_priv->vbios;
+	struct nouveau_drm *drm = nouveau_drm(dev);
+	struct nvbios *bios = &drm->vbios;
 
 	memset(bios, 0, sizeof(struct nvbios));
 	spin_lock_init(&bios->lock);
 	bios->dev = dev;
 
-	return bios_shadow(dev);
+	bios->data = nouveau_bios(drm->device)->data;
+	bios->length = nouveau_bios(drm->device)->size;
+	return true;
 }
 
 static int nouveau_parse_vbios_struct(struct drm_device *dev)
 {
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct nvbios *bios = &dev_priv->vbios;
+	struct nouveau_drm *drm = nouveau_drm(dev);
+	struct nvbios *bios = &drm->vbios;
 	const uint8_t bit_signature[] = { 0xff, 0xb8, 'B', 'I', 'T' };
 	const uint8_t bmp_signature[] = { 0xff, 0x7f, 'N', 'V', 0x0 };
 	int offset;
@@ -6414,7 +2300,7 @@ static int nouveau_parse_vbios_struct(struct drm_device *dev)
 	offset = findstr(bios->data, bios->length,
 					bit_signature, sizeof(bit_signature));
 	if (offset) {
-		NV_TRACE(dev, "BIT BIOS found\n");
+		NV_INFO(drm, "BIT BIOS found\n");
 		bios->type = NVBIOS_BIT;
 		bios->offset = offset;
 		return parse_bit_structure(bios, offset + 6);
@@ -6423,21 +2309,21 @@ static int nouveau_parse_vbios_struct(struct drm_device *dev)
 	offset = findstr(bios->data, bios->length,
 					bmp_signature, sizeof(bmp_signature));
 	if (offset) {
-		NV_TRACE(dev, "BMP BIOS found\n");
+		NV_INFO(drm, "BMP BIOS found\n");
 		bios->type = NVBIOS_BMP;
 		bios->offset = offset;
 		return parse_bmp_structure(dev, bios, offset);
 	}
 
-	NV_ERROR(dev, "No known BIOS signature found\n");
+	NV_ERROR(drm, "No known BIOS signature found\n");
 	return -ENODEV;
 }
 
 int
 nouveau_run_vbios_init(struct drm_device *dev)
 {
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct nvbios *bios = &dev_priv->vbios;
+	struct nouveau_drm *drm = nouveau_drm(dev);
+	struct nvbios *bios = &drm->vbios;
 	int i, ret = 0;
 
 	/* Reset the BIOS head to 0. */
@@ -6451,23 +2337,8 @@ nouveau_run_vbios_init(struct drm_device *dev)
 		bios->fp.lvds_init_run = false;
 	}
 
-	parse_init_tables(bios);
-
-	/*
-	 * Runs some additional script seen on G8x VBIOSen.  The VBIOS'
-	 * parser will run this right after the init tables, the binary
-	 * driver appears to run it at some point later.
-	 */
-	if (bios->some_script_ptr) {
-		struct init_exec iexec = {true, false};
-
-		NV_INFO(dev, "Parsing VBIOS init table at offset 0x%04X\n",
-			bios->some_script_ptr);
-		parse_init_table(bios, bios->some_script_ptr, &iexec);
-	}
-
-	if (dev_priv->card_type >= NV_50) {
-		for (i = 0; i < bios->dcb.entries; i++) {
+	if (nv_device(drm->device)->card_type >= NV_50) {
+		for (i = 0; bios->execute && i < bios->dcb.entries; i++) {
 			nouveau_bios_run_display_table(dev, 0, 0,
 						       &bios->dcb.entry[i], -1);
 		}
@@ -6479,10 +2350,10 @@ nouveau_run_vbios_init(struct drm_device *dev)
 static bool
 nouveau_bios_posted(struct drm_device *dev)
 {
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_drm *drm = nouveau_drm(dev);
 	unsigned htotal;
 
-	if (dev_priv->card_type >= NV_50) {
+	if (nv_device(drm->device)->card_type >= NV_50) {
 		if (NVReadVgaCrtc(dev, 0, 0x00) == 0 &&
 		    NVReadVgaCrtc(dev, 0, 0x1a) == 0)
 			return false;
@@ -6501,8 +2372,8 @@ nouveau_bios_posted(struct drm_device *dev)
 int
 nouveau_bios_init(struct drm_device *dev)
 {
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct nvbios *bios = &dev_priv->vbios;
+	struct nouveau_drm *drm = nouveau_drm(dev);
+	struct nvbios *bios = &drm->vbios;
 	int ret;
 
 	if (!NVInitVBIOS(dev))
@@ -6512,14 +2383,6 @@ nouveau_bios_init(struct drm_device *dev)
 	if (ret)
 		return ret;
 
-	ret = nouveau_i2c_init(dev);
-	if (ret)
-		return ret;
-
-	ret = nouveau_mxm_init(dev);
-	if (ret)
-		return ret;
-
 	ret = parse_dcb_table(dev, bios);
 	if (ret)
 		return ret;
@@ -6532,12 +2395,10 @@ nouveau_bios_init(struct drm_device *dev)
 
 	/* ... unless card isn't POSTed already */
 	if (!nouveau_bios_posted(dev)) {
-		NV_INFO(dev, "Adaptor not initialised, "
+		NV_INFO(drm, "Adaptor not initialised, "
 			"running VBIOS init tables.\n");
 		bios->execute = true;
 	}
-	if (nouveau_force_post)
-		bios->execute = true;
 
 	ret = nouveau_run_vbios_init(dev);
 	if (ret)
@@ -6560,10 +2421,4 @@ nouveau_bios_init(struct drm_device *dev)
 void
 nouveau_bios_takedown(struct drm_device *dev)
 {
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-
-	nouveau_mxm_fini(dev);
-	nouveau_i2c_fini(dev);
-
-	kfree(dev_priv->vbios.data);
 }
diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.h b/drivers/gpu/drm/nouveau/nouveau_bios.h
index 298a3af48d14..3befbb821a56 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bios.h
+++ b/drivers/gpu/drm/nouveau/nouveau_bios.h
@@ -21,11 +21,10 @@
  * DEALINGS IN THE SOFTWARE.
  */
 
-#ifndef __NOUVEAU_BIOS_H__
-#define __NOUVEAU_BIOS_H__
+#ifndef __NOUVEAU_DISPBIOS_H__
+#define __NOUVEAU_DISPBIOS_H__
 
 #include "nvreg.h"
-#include "nouveau_i2c.h"
 
 #define DCB_MAX_NUM_ENTRIES 16
 #define DCB_MAX_NUM_I2C_ENTRIES 16
@@ -39,8 +38,8 @@
 #define ROM48(x) ({ u8 *p = &(x); (u64)ROM16(p[4]) << 32 | ROM32(p[0]); })
 #define ROM64(x) le64_to_cpu(*(u64 *)&(x))
 #define ROMPTR(d,x) ({            \
-	struct drm_nouveau_private *dev_priv = (d)->dev_private; \
-	ROM16(x) ? &dev_priv->vbios.data[ROM16(x)] : NULL; \
+	struct nouveau_drm *drm = nouveau_drm((d)); \
+	ROM16(x) ? &drm->vbios.data[ROM16(x)] : NULL; \
 })
 
 struct bit_entry {
@@ -53,95 +52,19 @@ struct bit_entry {
 
 int bit_table(struct drm_device *, u8 id, struct bit_entry *);
 
-enum dcb_gpio_tag {
-	DCB_GPIO_PANEL_POWER = 0x01,
-	DCB_GPIO_TVDAC0 = 0x0c,
-	DCB_GPIO_TVDAC1 = 0x2d,
-	DCB_GPIO_PWM_FAN = 0x09,
-	DCB_GPIO_FAN_SENSE = 0x3d,
-	DCB_GPIO_UNUSED = 0xff
-};
-
-enum dcb_connector_type {
-	DCB_CONNECTOR_VGA = 0x00,
-	DCB_CONNECTOR_TV_0 = 0x10,
-	DCB_CONNECTOR_TV_1 = 0x11,
-	DCB_CONNECTOR_TV_3 = 0x13,
-	DCB_CONNECTOR_DVI_I = 0x30,
-	DCB_CONNECTOR_DVI_D = 0x31,
-	DCB_CONNECTOR_DMS59_0 = 0x38,
-	DCB_CONNECTOR_DMS59_1 = 0x39,
-	DCB_CONNECTOR_LVDS = 0x40,
-	DCB_CONNECTOR_LVDS_SPWG = 0x41,
-	DCB_CONNECTOR_DP = 0x46,
-	DCB_CONNECTOR_eDP = 0x47,
-	DCB_CONNECTOR_HDMI_0 = 0x60,
-	DCB_CONNECTOR_HDMI_1 = 0x61,
-	DCB_CONNECTOR_DMS59_DP0 = 0x64,
-	DCB_CONNECTOR_DMS59_DP1 = 0x65,
-	DCB_CONNECTOR_NONE = 0xff
-};
-
-enum dcb_type {
-	OUTPUT_ANALOG = 0,
-	OUTPUT_TV = 1,
-	OUTPUT_TMDS = 2,
-	OUTPUT_LVDS = 3,
-	OUTPUT_DP = 6,
-	OUTPUT_EOL = 14, /* DCB 4.0+, appears to be end-of-list */
-	OUTPUT_UNUSED = 15,
-	OUTPUT_ANY = -1
-};
-
-struct dcb_entry {
-	int index;	/* may not be raw dcb index if merging has happened */
-	enum dcb_type type;
-	uint8_t i2c_index;
-	uint8_t heads;
-	uint8_t connector;
-	uint8_t bus;
-	uint8_t location;
-	uint8_t or;
-	bool duallink_possible;
-	union {
-		struct sor_conf {
-			int link;
-		} sorconf;
-		struct {
-			int maxfreq;
-		} crtconf;
-		struct {
-			struct sor_conf sor;
-			bool use_straps_for_mode;
-			bool use_acpi_for_edid;
-			bool use_power_scripts;
-		} lvdsconf;
-		struct {
-			bool has_component_output;
-		} tvconf;
-		struct {
-			struct sor_conf sor;
-			int link_nr;
-			int link_bw;
-		} dpconf;
-		struct {
-			struct sor_conf sor;
-			int slave_addr;
-		} tmdsconf;
-	};
-	bool i2c_upper_default;
-};
+#include <subdev/bios/dcb.h>
+#include <subdev/bios/conn.h>
 
 struct dcb_table {
 	uint8_t version;
 	int entries;
-	struct dcb_entry entry[DCB_MAX_NUM_ENTRIES];
+	struct dcb_output entry[DCB_MAX_NUM_ENTRIES];
 };
 
 enum nouveau_or {
-	OUTPUT_A = (1 << 0),
-	OUTPUT_B = (1 << 1),
-	OUTPUT_C = (1 << 2)
+	DCB_OUTPUT_A = (1 << 0),
+	DCB_OUTPUT_B = (1 << 1),
+	DCB_OUTPUT_C = (1 << 2)
 };
 
 enum LVDS_script {
@@ -154,58 +77,6 @@ enum LVDS_script {
 	LVDS_PANEL_OFF
 };
 
-/* these match types in pll limits table version 0x40,
- * nouveau uses them on all chipsets internally where a
- * specific pll needs to be referenced, but the exact
- * register isn't known.
- */
-enum pll_types {
-	PLL_CORE   = 0x01,
-	PLL_SHADER = 0x02,
-	PLL_UNK03  = 0x03,
-	PLL_MEMORY = 0x04,
-	PLL_VDEC   = 0x05,
-	PLL_UNK40  = 0x40,
-	PLL_UNK41  = 0x41,
-	PLL_UNK42  = 0x42,
-	PLL_VPLL0  = 0x80,
-	PLL_VPLL1  = 0x81,
-	PLL_MAX    = 0xff
-};
-
-struct pll_lims {
-	u32 reg;
-
-	struct {
-		int minfreq;
-		int maxfreq;
-		int min_inputfreq;
-		int max_inputfreq;
-
-		uint8_t min_m;
-		uint8_t max_m;
-		uint8_t min_n;
-		uint8_t max_n;
-	} vco1, vco2;
-
-	uint8_t max_log2p;
-	/*
-	 * for most pre nv50 cards setting a log2P of 7 (the common max_log2p
-	 * value) is no different to 6 (at least for vplls) so allowing the MNP
-	 * calc to use 7 causes the generated clock to be out by a factor of 2.
-	 * however, max_log2p cannot be fixed-up during parsing as the
-	 * unmodified max_log2p value is still needed for setting mplls, hence
-	 * an additional max_usable_log2p member
-	 */
-	uint8_t max_usable_log2p;
-	uint8_t log2p_bias;
-
-	uint8_t min_p;
-	uint8_t max_p;
-
-	int refclk;
-};
-
 struct nvbios {
 	struct drm_device *dev;
 	enum {
@@ -257,7 +128,7 @@ struct nvbios {
 	} state;
 
 	struct {
-		struct dcb_entry *output;
+		struct dcb_output *output;
 		int crtc;
 		uint16_t script_table_ptr;
 	} display;
@@ -302,11 +173,28 @@ struct nvbios {
 	} legacy;
 };
 
-void *dcb_table(struct drm_device *);
-void *dcb_outp(struct drm_device *, u8 idx);
-int dcb_outp_foreach(struct drm_device *, void *data,
+void *olddcb_table(struct drm_device *);
+void *olddcb_outp(struct drm_device *, u8 idx);
+int olddcb_outp_foreach(struct drm_device *, void *data,
 		     int (*)(struct drm_device *, void *, int idx, u8 *outp));
-u8 *dcb_conntab(struct drm_device *);
-u8 *dcb_conn(struct drm_device *, u8 idx);
+u8 *olddcb_conntab(struct drm_device *);
+u8 *olddcb_conn(struct drm_device *, u8 idx);
+
+int nouveau_bios_init(struct drm_device *);
+void nouveau_bios_takedown(struct drm_device *dev);
+int nouveau_run_vbios_init(struct drm_device *);
+struct dcb_connector_table_entry *
+nouveau_bios_connector_entry(struct drm_device *, int index);
+int nouveau_bios_run_display_table(struct drm_device *, u16 id, int clk,
+					  struct dcb_output *, int crtc);
+bool nouveau_bios_fp_mode(struct drm_device *, struct drm_display_mode *);
+uint8_t *nouveau_bios_embedded_edid(struct drm_device *);
+int nouveau_bios_parse_lvds_table(struct drm_device *, int pxclk,
+					 bool *dl, bool *if_is_24bit);
+int run_tmds_table(struct drm_device *, struct dcb_output *,
+			  int head, int pxclk);
+int call_lvds_script(struct drm_device *, struct dcb_output *, int head,
+			    enum LVDS_script, int pxclk);
+bool bios_encoder_match(struct dcb_output *, u32 hash);
 
 #endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index 4ee2e7ff92d2..259e5f1adf47 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -27,31 +27,127 @@
  *	    Jeremy Kolb  <jkolb@brandeis.edu>
  */
 
-#include <drm/drmP.h>
-#include <drm/ttm/ttm_page_alloc.h>
+#include <core/engine.h>
 
-#include <drm/nouveau_drm.h>
-#include "nouveau_drv.h"
+#include <subdev/fb.h>
+#include <subdev/vm.h>
+#include <subdev/bar.h>
+
+#include "nouveau_drm.h"
 #include "nouveau_dma.h"
-#include "nouveau_mm.h"
-#include "nouveau_vm.h"
 #include "nouveau_fence.h"
-#include "nouveau_ramht.h"
 
-#include <linux/log2.h>
-#include <linux/slab.h>
+#include "nouveau_bo.h"
+#include "nouveau_ttm.h"
+#include "nouveau_gem.h"
+
+/*
+ * NV10-NV40 tiling helpers
+ */
+
+static void
+nv10_bo_update_tile_region(struct drm_device *dev, struct nouveau_drm_tile *reg,
+			   u32 addr, u32 size, u32 pitch, u32 flags)
+{
+	struct nouveau_drm *drm = nouveau_drm(dev);
+	int i = reg - drm->tile.reg;
+	struct nouveau_fb *pfb = nouveau_fb(drm->device);
+	struct nouveau_fb_tile *tile = &pfb->tile.region[i];
+	struct nouveau_engine *engine;
+
+	nouveau_fence_unref(&reg->fence);
+
+	if (tile->pitch)
+		pfb->tile.fini(pfb, i, tile);
+
+	if (pitch)
+		pfb->tile.init(pfb, i, addr, size, pitch, flags, tile);
+
+	pfb->tile.prog(pfb, i, tile);
+
+	if ((engine = nouveau_engine(pfb, NVDEV_ENGINE_GR)))
+		engine->tile_prog(engine, i);
+	if ((engine = nouveau_engine(pfb, NVDEV_ENGINE_MPEG)))
+		engine->tile_prog(engine, i);
+}
+
+static struct nouveau_drm_tile *
+nv10_bo_get_tile_region(struct drm_device *dev, int i)
+{
+	struct nouveau_drm *drm = nouveau_drm(dev);
+	struct nouveau_drm_tile *tile = &drm->tile.reg[i];
+
+	spin_lock(&drm->tile.lock);
+
+	if (!tile->used &&
+	    (!tile->fence || nouveau_fence_done(tile->fence)))
+		tile->used = true;
+	else
+		tile = NULL;
+
+	spin_unlock(&drm->tile.lock);
+	return tile;
+}
+
+static void
+nv10_bo_put_tile_region(struct drm_device *dev, struct nouveau_drm_tile *tile,
+			struct nouveau_fence *fence)
+{
+	struct nouveau_drm *drm = nouveau_drm(dev);
+
+	if (tile) {
+		spin_lock(&drm->tile.lock);
+		if (fence) {
+			/* Mark it as pending. */
+			tile->fence = fence;
+			nouveau_fence_ref(fence);
+		}
+
+		tile->used = false;
+		spin_unlock(&drm->tile.lock);
+	}
+}
+
+static struct nouveau_drm_tile *
+nv10_bo_set_tiling(struct drm_device *dev, u32 addr,
+		   u32 size, u32 pitch, u32 flags)
+{
+	struct nouveau_drm *drm = nouveau_drm(dev);
+	struct nouveau_fb *pfb = nouveau_fb(drm->device);
+	struct nouveau_drm_tile *tile, *found = NULL;
+	int i;
+
+	for (i = 0; i < pfb->tile.regions; i++) {
+		tile = nv10_bo_get_tile_region(dev, i);
+
+		if (pitch && !found) {
+			found = tile;
+			continue;
+
+		} else if (tile && pfb->tile.region[i].pitch) {
+			/* Kill an unused tile region. */
+			nv10_bo_update_tile_region(dev, tile, 0, 0, 0, 0);
+		}
+
+		nv10_bo_put_tile_region(dev, tile, NULL);
+	}
+
+	if (found)
+		nv10_bo_update_tile_region(dev, found, addr, size,
+					    pitch, flags);
+	return found;
+}
 
 static void
 nouveau_bo_del_ttm(struct ttm_buffer_object *bo)
 {
-	struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
-	struct drm_device *dev = dev_priv->dev;
+	struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
+	struct drm_device *dev = drm->dev;
 	struct nouveau_bo *nvbo = nouveau_bo(bo);
 
 	if (unlikely(nvbo->gem))
 		DRM_ERROR("bo %p still attached to GEM object\n", bo);
-
-	nv10_mem_put_tile_region(dev, nvbo->tile, NULL);
+	nv10_bo_put_tile_region(dev, nvbo->tile, NULL);
 	kfree(nvbo);
 }
 
@@ -59,23 +155,24 @@ static void
 nouveau_bo_fixup_align(struct nouveau_bo *nvbo, u32 flags,
 		       int *align, int *size)
 {
-	struct drm_nouveau_private *dev_priv = nouveau_bdev(nvbo->bo.bdev);
+	struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
+	struct nouveau_device *device = nv_device(drm->device);
 
-	if (dev_priv->card_type < NV_50) {
+	if (device->card_type < NV_50) {
 		if (nvbo->tile_mode) {
-			if (dev_priv->chipset >= 0x40) {
+			if (device->chipset >= 0x40) {
 				*align = 65536;
 				*size = roundup(*size, 64 * nvbo->tile_mode);
 
-			} else if (dev_priv->chipset >= 0x30) {
+			} else if (device->chipset >= 0x30) {
 				*align = 32768;
 				*size = roundup(*size, 64 * nvbo->tile_mode);
 
-			} else if (dev_priv->chipset >= 0x20) {
+			} else if (device->chipset >= 0x20) {
 				*align = 16384;
 				*size = roundup(*size, 64 * nvbo->tile_mode);
 
-			} else if (dev_priv->chipset >= 0x10) {
+			} else if (device->chipset >= 0x10) {
 				*align = 16384;
 				*size = roundup(*size, 32 * nvbo->tile_mode);
 			}
@@ -94,7 +191,7 @@ nouveau_bo_new(struct drm_device *dev, int size, int align,
 	       struct sg_table *sg,
 	       struct nouveau_bo **pnvbo)
 {
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_drm *drm = nouveau_drm(dev);
 	struct nouveau_bo *nvbo;
 	size_t acc_size;
 	int ret;
@@ -111,22 +208,22 @@ nouveau_bo_new(struct drm_device *dev, int size, int align,
 	INIT_LIST_HEAD(&nvbo->vma_list);
 	nvbo->tile_mode = tile_mode;
 	nvbo->tile_flags = tile_flags;
-	nvbo->bo.bdev = &dev_priv->ttm.bdev;
+	nvbo->bo.bdev = &drm->ttm.bdev;
 
 	nvbo->page_shift = 12;
-	if (dev_priv->bar1_vm) {
+	if (drm->client.base.vm) {
 		if (!(flags & TTM_PL_FLAG_TT) && size > 256 * 1024)
-			nvbo->page_shift = dev_priv->bar1_vm->lpg_shift;
+			nvbo->page_shift = drm->client.base.vm->vmm->lpg_shift;
 	}
 
 	nouveau_bo_fixup_align(nvbo, flags, &align, &size);
 	nvbo->bo.mem.num_pages = size >> PAGE_SHIFT;
 	nouveau_bo_placement_set(nvbo, flags, 0);
 
-	acc_size = ttm_bo_dma_acc_size(&dev_priv->ttm.bdev, size,
+	acc_size = ttm_bo_dma_acc_size(&drm->ttm.bdev, size,
 				       sizeof(struct nouveau_bo));
 
-	ret = ttm_bo_init(&dev_priv->ttm.bdev, &nvbo->bo, size,
+	ret = ttm_bo_init(&drm->ttm.bdev, &nvbo->bo, size,
 			  type, &nvbo->placement,
 			  align >> PAGE_SHIFT, 0, false, NULL, acc_size, sg,
 			  nouveau_bo_del_ttm);
@@ -155,10 +252,11 @@ set_placement_list(uint32_t *pl, unsigned *n, uint32_t type, uint32_t flags)
 static void
 set_placement_range(struct nouveau_bo *nvbo, uint32_t type)
 {
-	struct drm_nouveau_private *dev_priv = nouveau_bdev(nvbo->bo.bdev);
-	int vram_pages = dev_priv->vram_size >> PAGE_SHIFT;
+	struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
+	struct nouveau_fb *pfb = nouveau_fb(drm->device);
+	u32 vram_pages = pfb->ram.size >> PAGE_SHIFT;
 
-	if (dev_priv->card_type == NV_10 &&
+	if (nv_device(drm->device)->card_type == NV_10 &&
 	    nvbo->tile_mode && (type & TTM_PL_FLAG_VRAM) &&
 	    nvbo->bo.mem.num_pages < vram_pages / 4) {
 		/*
@@ -198,13 +296,12 @@ nouveau_bo_placement_set(struct nouveau_bo *nvbo, uint32_t type, uint32_t busy)
 int
 nouveau_bo_pin(struct nouveau_bo *nvbo, uint32_t memtype)
 {
-	struct drm_nouveau_private *dev_priv = nouveau_bdev(nvbo->bo.bdev);
+	struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
 	struct ttm_buffer_object *bo = &nvbo->bo;
 	int ret;
 
 	if (nvbo->pin_refcnt && !(memtype & (1 << bo->mem.mem_type))) {
-		NV_ERROR(nouveau_bdev(bo->bdev)->dev,
-			 "bo %p pinned elsewhere: 0x%08x vs 0x%08x\n", bo,
+		NV_ERROR(drm, "bo %p pinned elsewhere: 0x%08x vs 0x%08x\n", bo,
 			 1 << bo->mem.mem_type, memtype);
 		return -EINVAL;
 	}
@@ -222,10 +319,10 @@ nouveau_bo_pin(struct nouveau_bo *nvbo, uint32_t memtype)
 	if (ret == 0) {
 		switch (bo->mem.mem_type) {
 		case TTM_PL_VRAM:
-			dev_priv->fb_aper_free -= bo->mem.size;
+			drm->gem.vram_available -= bo->mem.size;
 			break;
 		case TTM_PL_TT:
-			dev_priv->gart_info.aper_free -= bo->mem.size;
+			drm->gem.gart_available -= bo->mem.size;
 			break;
 		default:
 			break;
@@ -241,7 +338,7 @@ out:
 int
 nouveau_bo_unpin(struct nouveau_bo *nvbo)
 {
-	struct drm_nouveau_private *dev_priv = nouveau_bdev(nvbo->bo.bdev);
+	struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
 	struct ttm_buffer_object *bo = &nvbo->bo;
 	int ret;
 
@@ -258,10 +355,10 @@ nouveau_bo_unpin(struct nouveau_bo *nvbo)
 	if (ret == 0) {
 		switch (bo->mem.mem_type) {
 		case TTM_PL_VRAM:
-			dev_priv->fb_aper_free += bo->mem.size;
+			drm->gem.vram_available += bo->mem.size;
 			break;
 		case TTM_PL_TT:
-			dev_priv->gart_info.aper_free += bo->mem.size;
+			drm->gem.gart_available += bo->mem.size;
 			break;
 		default:
 			break;
@@ -356,30 +453,18 @@ nouveau_bo_wr32(struct nouveau_bo *nvbo, unsigned index, u32 val)
 }
 
 static struct ttm_tt *
-nouveau_ttm_tt_create(struct ttm_bo_device *bdev,
-		      unsigned long size, uint32_t page_flags,
-		      struct page *dummy_read_page)
+nouveau_ttm_tt_create(struct ttm_bo_device *bdev, unsigned long size,
+		      uint32_t page_flags, struct page *dummy_read)
 {
-	struct drm_nouveau_private *dev_priv = nouveau_bdev(bdev);
-	struct drm_device *dev = dev_priv->dev;
+	struct nouveau_drm *drm = nouveau_bdev(bdev);
+	struct drm_device *dev = drm->dev;
 
-	switch (dev_priv->gart_info.type) {
-#if __OS_HAS_AGP
-	case NOUVEAU_GART_AGP:
-		return ttm_agp_tt_create(bdev, dev->agp->bridge,
-					 size, page_flags, dummy_read_page);
-#endif
-	case NOUVEAU_GART_PDMA:
-	case NOUVEAU_GART_HW:
-		return nouveau_sgdma_create_ttm(bdev, size, page_flags,
-						dummy_read_page);
-	default:
-		NV_ERROR(dev, "Unknown GART type %d\n",
-			 dev_priv->gart_info.type);
-		break;
+	if (drm->agp.stat == ENABLED) {
+		return ttm_agp_tt_create(bdev, dev->agp->bridge, size,
+					 page_flags, dummy_read);
 	}
 
-	return NULL;
+	return nouveau_sgdma_create_ttm(bdev, size, page_flags, dummy_read);
 }
 
 static int
@@ -393,8 +478,7 @@ static int
 nouveau_bo_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
 			 struct ttm_mem_type_manager *man)
 {
-	struct drm_nouveau_private *dev_priv = nouveau_bdev(bdev);
-	struct drm_device *dev = dev_priv->dev;
+	struct nouveau_drm *drm = nouveau_bdev(bdev);
 
 	switch (type) {
 	case TTM_PL_SYSTEM:
@@ -403,7 +487,7 @@ nouveau_bo_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
 		man->default_caching = TTM_PL_FLAG_CACHED;
 		break;
 	case TTM_PL_VRAM:
-		if (dev_priv->card_type >= NV_50) {
+		if (nv_device(drm->device)->card_type >= NV_50) {
 			man->func = &nouveau_vram_manager;
 			man->io_reserve_fastpath = false;
 			man->use_io_reserve_lru = true;
@@ -417,32 +501,28 @@ nouveau_bo_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
 		man->default_caching = TTM_PL_FLAG_WC;
 		break;
 	case TTM_PL_TT:
-		if (dev_priv->card_type >= NV_50)
+		if (nv_device(drm->device)->card_type >= NV_50)
 			man->func = &nouveau_gart_manager;
 		else
+		if (drm->agp.stat != ENABLED)
+			man->func = &nv04_gart_manager;
+		else
 			man->func = &ttm_bo_manager_func;
-		switch (dev_priv->gart_info.type) {
-		case NOUVEAU_GART_AGP:
+
+		if (drm->agp.stat == ENABLED) {
 			man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
 			man->available_caching = TTM_PL_FLAG_UNCACHED |
 				TTM_PL_FLAG_WC;
 			man->default_caching = TTM_PL_FLAG_WC;
-			break;
-		case NOUVEAU_GART_PDMA:
-		case NOUVEAU_GART_HW:
+		} else {
 			man->flags = TTM_MEMTYPE_FLAG_MAPPABLE |
 				     TTM_MEMTYPE_FLAG_CMA;
 			man->available_caching = TTM_PL_MASK_CACHING;
 			man->default_caching = TTM_PL_FLAG_CACHED;
-			break;
-		default:
-			NV_ERROR(dev, "Unknown GART type: %d\n",
-				 dev_priv->gart_info.type);
-			return -EINVAL;
 		}
+
 		break;
 	default:
-		NV_ERROR(dev, "Unsupported memory type %u\n", (unsigned)type);
 		return -EINVAL;
 	}
 	return 0;
@@ -491,6 +571,18 @@ nouveau_bo_move_accel_cleanup(struct nouveau_channel *chan,
 }
 
 static int
+nve0_bo_move_init(struct nouveau_channel *chan, u32 handle)
+{
+	int ret = RING_SPACE(chan, 2);
+	if (ret == 0) {
+		BEGIN_NVC0(chan, NvSubCopy, 0x0000, 1);
+		OUT_RING  (chan, handle);
+		FIRE_RING (chan);
+	}
+	return ret;
+}
+
+static int
 nve0_bo_move_copy(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
 		  struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
 {
@@ -676,20 +768,14 @@ nv84_bo_move_exec(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
 static int
 nv50_bo_move_init(struct nouveau_channel *chan, u32 handle)
 {
-	int ret = nouveau_notifier_alloc(chan, NvNotify0, 32, 0xfe0, 0x1000,
-					 &chan->m2mf_ntfy);
+	int ret = RING_SPACE(chan, 6);
 	if (ret == 0) {
-		ret = RING_SPACE(chan, 6);
-		if (ret == 0) {
-			BEGIN_NV04(chan, NvSubCopy, 0x0000, 1);
-			OUT_RING  (chan, handle);
-			BEGIN_NV04(chan, NvSubCopy, 0x0180, 3);
-			OUT_RING  (chan, NvNotify0);
-			OUT_RING  (chan, NvDmaFB);
-			OUT_RING  (chan, NvDmaFB);
-		} else {
-			nouveau_ramht_remove(chan, NvNotify0);
-		}
+		BEGIN_NV04(chan, NvSubCopy, 0x0000, 1);
+		OUT_RING  (chan, handle);
+		BEGIN_NV04(chan, NvSubCopy, 0x0180, 3);
+		OUT_RING  (chan, NvNotify0);
+		OUT_RING  (chan, NvDmaFB);
+		OUT_RING  (chan, NvDmaFB);
 	}
 
 	return ret;
@@ -788,16 +874,12 @@ nv50_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
 static int
 nv04_bo_move_init(struct nouveau_channel *chan, u32 handle)
 {
-	int ret = nouveau_notifier_alloc(chan, NvNotify0, 32, 0xfe0, 0x1000,
-					 &chan->m2mf_ntfy);
+	int ret = RING_SPACE(chan, 4);
 	if (ret == 0) {
-		ret = RING_SPACE(chan, 4);
-		if (ret == 0) {
-			BEGIN_NV04(chan, NvSubCopy, 0x0000, 1);
-			OUT_RING  (chan, handle);
-			BEGIN_NV04(chan, NvSubCopy, 0x0180, 1);
-			OUT_RING  (chan, NvNotify0);
-		}
+		BEGIN_NV04(chan, NvSubCopy, 0x0000, 1);
+		OUT_RING  (chan, handle);
+		BEGIN_NV04(chan, NvSubCopy, 0x0180, 1);
+		OUT_RING  (chan, NvNotify0);
 	}
 
 	return ret;
@@ -808,8 +890,8 @@ nouveau_bo_mem_ctxdma(struct ttm_buffer_object *bo,
 		      struct nouveau_channel *chan, struct ttm_mem_reg *mem)
 {
 	if (mem->mem_type == TTM_PL_TT)
-		return chan->gart_handle;
-	return chan->vram_handle;
+		return NvDmaTT;
+	return NvDmaFB;
 }
 
 static int
@@ -865,8 +947,9 @@ nouveau_vma_getmap(struct nouveau_channel *chan, struct nouveau_bo *nvbo,
 	struct nouveau_mem *node = mem->mm_node;
 	int ret;
 
-	ret = nouveau_vm_get(chan->vm, mem->num_pages << PAGE_SHIFT,
-			     node->page_shift, NV_MEM_ACCESS_RO, vma);
+	ret = nouveau_vm_get(nv_client(chan->cli)->vm, mem->num_pages <<
+			     PAGE_SHIFT, node->page_shift,
+			     NV_MEM_ACCESS_RW, vma);
 	if (ret)
 		return ret;
 
@@ -883,19 +966,19 @@ nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr,
 		     bool no_wait_reserve, bool no_wait_gpu,
 		     struct ttm_mem_reg *new_mem)
 {
-	struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
-	struct nouveau_channel *chan = chan = dev_priv->channel;
+	struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
+	struct nouveau_channel *chan = chan = drm->channel;
 	struct nouveau_bo *nvbo = nouveau_bo(bo);
 	struct ttm_mem_reg *old_mem = &bo->mem;
 	int ret;
 
-	mutex_lock_nested(&chan->mutex, NOUVEAU_KCHANNEL_MUTEX);
+	mutex_lock(&chan->cli->mutex);
 
 	/* create temporary vmas for the transfer and attach them to the
 	 * old nouveau_mem node, these will get cleaned up after ttm has
 	 * destroyed the ttm_mem_reg
 	 */
-	if (dev_priv->card_type >= NV_50) {
+	if (nv_device(drm->device)->card_type >= NV_50) {
 		struct nouveau_mem *node = old_mem->mm_node;
 
 		ret = nouveau_vma_getmap(chan, nvbo, old_mem, &node->vma[0]);
@@ -907,7 +990,7 @@ nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr,
 			goto out;
 	}
 
-	ret = dev_priv->ttm.move(chan, bo, &bo->mem, new_mem);
+	ret = drm->ttm.move(chan, bo, &bo->mem, new_mem);
 	if (ret == 0) {
 		ret = nouveau_bo_move_accel_cleanup(chan, nvbo, evict,
 						    no_wait_reserve,
@@ -915,14 +998,13 @@ nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr,
 	}
 
 out:
-	mutex_unlock(&chan->mutex);
+	mutex_unlock(&chan->cli->mutex);
 	return ret;
 }
 
 void
-nouveau_bo_move_init(struct nouveau_channel *chan)
+nouveau_bo_move_init(struct nouveau_drm *drm)
 {
-	struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
 	static const struct {
 		const char *name;
 		int engine;
@@ -932,7 +1014,8 @@ nouveau_bo_move_init(struct nouveau_channel *chan)
 			    struct ttm_mem_reg *, struct ttm_mem_reg *);
 		int (*init)(struct nouveau_channel *, u32 handle);
 	} _methods[] = {
-		{  "COPY", 0, 0xa0b5, nve0_bo_move_copy, nvc0_bo_move_init },
+		{  "COPY", 0, 0xa0b5, nve0_bo_move_copy, nve0_bo_move_init },
+		{  "GRCE", 0, 0xa0b5, nve0_bo_move_copy, nvc0_bo_move_init },
 		{ "COPY1", 5, 0x90b8, nvc0_bo_move_copy, nvc0_bo_move_init },
 		{ "COPY0", 4, 0x90b5, nvc0_bo_move_copy, nvc0_bo_move_init },
 		{  "COPY", 0, 0x85b5, nva3_bo_move_copy, nv50_bo_move_init },
@@ -947,19 +1030,34 @@ nouveau_bo_move_init(struct nouveau_channel *chan)
 	int ret;
 
 	do {
+		struct nouveau_object *object;
+		struct nouveau_channel *chan;
 		u32 handle = (mthd->engine << 16) | mthd->oclass;
-		ret = nouveau_gpuobj_gr_new(chan, handle, mthd->oclass);
+
+		if (mthd->init == nve0_bo_move_init)
+			chan = drm->cechan;
+		else
+			chan = drm->channel;
+		if (chan == NULL)
+			continue;
+
+		ret = nouveau_object_new(nv_object(drm), chan->handle, handle,
+					 mthd->oclass, NULL, 0, &object);
 		if (ret == 0) {
 			ret = mthd->init(chan, handle);
-			if (ret == 0) {
-				dev_priv->ttm.move = mthd->exec;
-				name = mthd->name;
-				break;
+			if (ret) {
+				nouveau_object_del(nv_object(drm),
+						   chan->handle, handle);
+				continue;
 			}
+
+			drm->ttm.move = mthd->exec;
+			name = mthd->name;
+			break;
 		}
 	} while ((++mthd)->exec);
 
-	NV_INFO(chan->dev, "MM: using %s for buffer copies\n", name);
+	NV_INFO(drm, "MM: using %s for buffer copies\n", name);
 }
 
 static int
@@ -1044,7 +1142,7 @@ nouveau_bo_move_ntfy(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem)
 			nouveau_vm_map(vma, new_mem->mm_node);
 		} else
 		if (new_mem && new_mem->mem_type == TTM_PL_TT &&
-		    nvbo->page_shift == vma->vm->spg_shift) {
+		    nvbo->page_shift == vma->vm->vmm->spg_shift) {
 			if (((struct nouveau_mem *)new_mem->mm_node)->sg)
 				nouveau_vm_map_sg_table(vma, 0, new_mem->
 						  num_pages << PAGE_SHIFT,
@@ -1061,10 +1159,10 @@ nouveau_bo_move_ntfy(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem)
 
 static int
 nouveau_bo_vm_bind(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem,
-		   struct nouveau_tile_reg **new_tile)
+		   struct nouveau_drm_tile **new_tile)
 {
-	struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
-	struct drm_device *dev = dev_priv->dev;
+	struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
+	struct drm_device *dev = drm->dev;
 	struct nouveau_bo *nvbo = nouveau_bo(bo);
 	u64 offset = new_mem->start << PAGE_SHIFT;
 
@@ -1072,8 +1170,8 @@ nouveau_bo_vm_bind(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem,
 	if (new_mem->mem_type != TTM_PL_VRAM)
 		return 0;
 
-	if (dev_priv->card_type >= NV_10) {
-		*new_tile = nv10_mem_set_tiling(dev, offset, new_mem->size,
+	if (nv_device(drm->device)->card_type >= NV_10) {
+		*new_tile = nv10_bo_set_tiling(dev, offset, new_mem->size,
 						nvbo->tile_mode,
 						nvbo->tile_flags);
 	}
@@ -1083,13 +1181,13 @@ nouveau_bo_vm_bind(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem,
 
 static void
 nouveau_bo_vm_cleanup(struct ttm_buffer_object *bo,
-		      struct nouveau_tile_reg *new_tile,
-		      struct nouveau_tile_reg **old_tile)
+		      struct nouveau_drm_tile *new_tile,
+		      struct nouveau_drm_tile **old_tile)
 {
-	struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
-	struct drm_device *dev = dev_priv->dev;
+	struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
+	struct drm_device *dev = drm->dev;
 
-	nv10_mem_put_tile_region(dev, *old_tile, bo->sync_obj);
+	nv10_bo_put_tile_region(dev, *old_tile, bo->sync_obj);
 	*old_tile = new_tile;
 }
 
@@ -1098,13 +1196,13 @@ nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr,
 		bool no_wait_reserve, bool no_wait_gpu,
 		struct ttm_mem_reg *new_mem)
 {
-	struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
+	struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
 	struct nouveau_bo *nvbo = nouveau_bo(bo);
 	struct ttm_mem_reg *old_mem = &bo->mem;
-	struct nouveau_tile_reg *new_tile = NULL;
+	struct nouveau_drm_tile *new_tile = NULL;
 	int ret = 0;
 
-	if (dev_priv->card_type < NV_50) {
+	if (nv_device(drm->device)->card_type < NV_50) {
 		ret = nouveau_bo_vm_bind(bo, new_mem, &new_tile);
 		if (ret)
 			return ret;
@@ -1119,7 +1217,7 @@ nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr,
 	}
 
 	/* CPU copy if we have no accelerated method available */
-	if (!dev_priv->ttm.move) {
+	if (!drm->ttm.move) {
 		ret = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, new_mem);
 		goto out;
 	}
@@ -1139,7 +1237,7 @@ nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr,
 	ret = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, new_mem);
 
 out:
-	if (dev_priv->card_type < NV_50) {
+	if (nv_device(drm->device)->card_type < NV_50) {
 		if (ret)
 			nouveau_bo_vm_cleanup(bo, NULL, &new_tile);
 		else
@@ -1159,8 +1257,8 @@ static int
 nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
 {
 	struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
-	struct drm_nouveau_private *dev_priv = nouveau_bdev(bdev);
-	struct drm_device *dev = dev_priv->dev;
+	struct nouveau_drm *drm = nouveau_bdev(bdev);
+	struct drm_device *dev = drm->dev;
 	int ret;
 
 	mem->bus.addr = NULL;
@@ -1176,48 +1274,28 @@ nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
 		return 0;
 	case TTM_PL_TT:
 #if __OS_HAS_AGP
-		if (dev_priv->gart_info.type == NOUVEAU_GART_AGP) {
+		if (drm->agp.stat == ENABLED) {
 			mem->bus.offset = mem->start << PAGE_SHIFT;
-			mem->bus.base = dev_priv->gart_info.aper_base;
+			mem->bus.base = drm->agp.base;
 			mem->bus.is_iomem = true;
 		}
 #endif
 		break;
 	case TTM_PL_VRAM:
-	{
-		struct nouveau_mem *node = mem->mm_node;
-		u8 page_shift;
-
-		if (!dev_priv->bar1_vm) {
-			mem->bus.offset = mem->start << PAGE_SHIFT;
-			mem->bus.base = pci_resource_start(dev->pdev, 1);
-			mem->bus.is_iomem = true;
-			break;
-		}
-
-		if (dev_priv->card_type >= NV_C0)
-			page_shift = node->page_shift;
-		else
-			page_shift = 12;
+		mem->bus.offset = mem->start << PAGE_SHIFT;
+		mem->bus.base = pci_resource_start(dev->pdev, 1);
+		mem->bus.is_iomem = true;
+		if (nv_device(drm->device)->card_type >= NV_50) {
+			struct nouveau_bar *bar = nouveau_bar(drm->device);
+			struct nouveau_mem *node = mem->mm_node;
 
-		ret = nouveau_vm_get(dev_priv->bar1_vm, mem->bus.size,
-				     page_shift, NV_MEM_ACCESS_RW,
-				     &node->bar_vma);
-		if (ret)
-			return ret;
+			ret = bar->umap(bar, node, NV_MEM_ACCESS_RW,
+					&node->bar_vma);
+			if (ret)
+				return ret;
 
-		nouveau_vm_map(&node->bar_vma, node);
-		if (ret) {
-			nouveau_vm_put(&node->bar_vma);
-			return ret;
+			mem->bus.offset = node->bar_vma.offset;
 		}
-
-		mem->bus.offset = node->bar_vma.offset;
-		if (dev_priv->card_type == NV_50) /*XXX*/
-			mem->bus.offset -= 0x0020000000ULL;
-		mem->bus.base = pci_resource_start(dev->pdev, 1);
-		mem->bus.is_iomem = true;
-	}
 		break;
 	default:
 		return -EINVAL;
@@ -1228,41 +1306,40 @@ nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
 static void
 nouveau_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
 {
-	struct drm_nouveau_private *dev_priv = nouveau_bdev(bdev);
+	struct nouveau_drm *drm = nouveau_bdev(bdev);
+	struct nouveau_bar *bar = nouveau_bar(drm->device);
 	struct nouveau_mem *node = mem->mm_node;
 
-	if (!dev_priv->bar1_vm || mem->mem_type != TTM_PL_VRAM)
-		return;
-
 	if (!node->bar_vma.node)
 		return;
 
-	nouveau_vm_unmap(&node->bar_vma);
-	nouveau_vm_put(&node->bar_vma);
+	bar->unmap(bar, &node->bar_vma);
 }
 
 static int
 nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object *bo)
 {
-	struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
+	struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
 	struct nouveau_bo *nvbo = nouveau_bo(bo);
+	struct nouveau_device *device = nv_device(drm->device);
+	u32 mappable = pci_resource_len(device->pdev, 1) >> PAGE_SHIFT;
 
 	/* as long as the bo isn't in vram, and isn't tiled, we've got
 	 * nothing to do here.
 	 */
 	if (bo->mem.mem_type != TTM_PL_VRAM) {
-		if (dev_priv->card_type < NV_50 ||
+		if (nv_device(drm->device)->card_type < NV_50 ||
 		    !nouveau_bo_tile_layout(nvbo))
 			return 0;
 	}
 
 	/* make sure bo is in mappable vram */
-	if (bo->mem.start + bo->mem.num_pages < dev_priv->fb_mappable_pages)
+	if (bo->mem.start + bo->mem.num_pages < mappable)
 		return 0;
 
 
 	nvbo->placement.fpfn = 0;
-	nvbo->placement.lpfn = dev_priv->fb_mappable_pages;
+	nvbo->placement.lpfn = mappable;
 	nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_VRAM, 0);
 	return nouveau_bo_validate(nvbo, false, true, false);
 }
@@ -1271,7 +1348,7 @@ static int
 nouveau_ttm_tt_populate(struct ttm_tt *ttm)
 {
 	struct ttm_dma_tt *ttm_dma = (void *)ttm;
-	struct drm_nouveau_private *dev_priv;
+	struct nouveau_drm *drm;
 	struct drm_device *dev;
 	unsigned i;
 	int r;
@@ -1288,11 +1365,11 @@ nouveau_ttm_tt_populate(struct ttm_tt *ttm)
 		return 0;
 	}
 
-	dev_priv = nouveau_bdev(ttm->bdev);
-	dev = dev_priv->dev;
+	drm = nouveau_bdev(ttm->bdev);
+	dev = drm->dev;
 
 #if __OS_HAS_AGP
-	if (dev_priv->gart_info.type == NOUVEAU_GART_AGP) {
+	if (drm->agp.stat == ENABLED) {
 		return ttm_agp_tt_populate(ttm);
 	}
 #endif
@@ -1329,7 +1406,7 @@ static void
 nouveau_ttm_tt_unpopulate(struct ttm_tt *ttm)
 {
 	struct ttm_dma_tt *ttm_dma = (void *)ttm;
-	struct drm_nouveau_private *dev_priv;
+	struct nouveau_drm *drm;
 	struct drm_device *dev;
 	unsigned i;
 	bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG);
@@ -1337,11 +1414,11 @@ nouveau_ttm_tt_unpopulate(struct ttm_tt *ttm)
 	if (slave)
 		return;
 
-	dev_priv = nouveau_bdev(ttm->bdev);
-	dev = dev_priv->dev;
+	drm = nouveau_bdev(ttm->bdev);
+	dev = drm->dev;
 
 #if __OS_HAS_AGP
-	if (dev_priv->gart_info.type == NOUVEAU_GART_AGP) {
+	if (drm->agp.stat == ENABLED) {
 		ttm_agp_tt_unpopulate(ttm);
 		return;
 	}
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.h b/drivers/gpu/drm/nouveau/nouveau_bo.h
new file mode 100644
index 000000000000..dec51b1098fe
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.h
@@ -0,0 +1,99 @@
+#ifndef __NOUVEAU_BO_H__
+#define __NOUVEAU_BO_H__
+
+struct nouveau_channel;
+struct nouveau_fence;
+struct nouveau_vma;
+
+struct nouveau_bo {
+	struct ttm_buffer_object bo;
+	struct ttm_placement placement;
+	u32 valid_domains;
+	u32 placements[3];
+	u32 busy_placements[3];
+	struct ttm_bo_kmap_obj kmap;
+	struct list_head head;
+
+	/* protected by ttm_bo_reserve() */
+	struct drm_file *reserved_by;
+	struct list_head entry;
+	int pbbo_index;
+	bool validate_mapped;
+
+	struct list_head vma_list;
+	unsigned page_shift;
+
+	u32 tile_mode;
+	u32 tile_flags;
+	struct nouveau_drm_tile *tile;
+
+	struct drm_gem_object *gem;
+	int pin_refcnt;
+
+	struct ttm_bo_kmap_obj dma_buf_vmap;
+	int vmapping_count;
+};
+
+static inline struct nouveau_bo *
+nouveau_bo(struct ttm_buffer_object *bo)
+{
+	return container_of(bo, struct nouveau_bo, bo);
+}
+
+static inline int
+nouveau_bo_ref(struct nouveau_bo *ref, struct nouveau_bo **pnvbo)
+{
+	struct nouveau_bo *prev;
+
+	if (!pnvbo)
+		return -EINVAL;
+	prev = *pnvbo;
+
+	*pnvbo = ref ? nouveau_bo(ttm_bo_reference(&ref->bo)) : NULL;
+	if (prev) {
+		struct ttm_buffer_object *bo = &prev->bo;
+
+		ttm_bo_unref(&bo);
+	}
+
+	return 0;
+}
+
+extern struct ttm_bo_driver nouveau_bo_driver;
+
+void nouveau_bo_move_init(struct nouveau_drm *);
+int  nouveau_bo_new(struct drm_device *, int size, int align, u32 flags,
+		    u32 tile_mode, u32 tile_flags, struct sg_table *sg,
+		    struct nouveau_bo **);
+int  nouveau_bo_pin(struct nouveau_bo *, u32 flags);
+int  nouveau_bo_unpin(struct nouveau_bo *);
+int  nouveau_bo_map(struct nouveau_bo *);
+void nouveau_bo_unmap(struct nouveau_bo *);
+void nouveau_bo_placement_set(struct nouveau_bo *, u32 type, u32 busy);
+u16  nouveau_bo_rd16(struct nouveau_bo *, unsigned index);
+void nouveau_bo_wr16(struct nouveau_bo *, unsigned index, u16 val);
+u32  nouveau_bo_rd32(struct nouveau_bo *, unsigned index);
+void nouveau_bo_wr32(struct nouveau_bo *, unsigned index, u32 val);
+void nouveau_bo_fence(struct nouveau_bo *, struct nouveau_fence *);
+int  nouveau_bo_validate(struct nouveau_bo *, bool interruptible,
+			 bool no_wait_reserve, bool no_wait_gpu);
+
+struct nouveau_vma *
+nouveau_bo_vma_find(struct nouveau_bo *, struct nouveau_vm *);
+
+int  nouveau_bo_vma_add(struct nouveau_bo *, struct nouveau_vm *,
+			struct nouveau_vma *);
+void nouveau_bo_vma_del(struct nouveau_bo *, struct nouveau_vma *);
+
+/* TODO: submit equivalent to TTM generic API upstream? */
+static inline void __iomem *
+nvbo_kmap_obj_iovirtual(struct nouveau_bo *nvbo)
+{
+	bool is_iomem;
+	void __iomem *ioptr = (void __force __iomem *)ttm_kmap_obj_virtual(
+						&nvbo->kmap, &is_iomem);
+	WARN_ON_ONCE(ioptr && !is_iomem);
+	return ioptr;
+}
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_calc.c b/drivers/gpu/drm/nouveau/nouveau_calc.c
index 2c5eb5d8d556..6da576445b3d 100644
--- a/drivers/gpu/drm/nouveau/nouveau_calc.c
+++ b/drivers/gpu/drm/nouveau/nouveau_calc.c
@@ -22,7 +22,9 @@
  */
 
 #include <drm/drmP.h>
-#include "nouveau_drv.h"
+
+#include "nouveau_drm.h"
+#include "nouveau_reg.h"
 #include "nouveau_hw.h"
 
 /****************************************************************************\
@@ -195,12 +197,13 @@ static void
 nv04_update_arb(struct drm_device *dev, int VClk, int bpp,
 		int *burst, int *lwm)
 {
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_drm *drm = nouveau_drm(dev);
+	struct nouveau_device *device = nouveau_dev(dev);
 	struct nv_fifo_info fifo_data;
 	struct nv_sim_state sim_data;
 	int MClk = nouveau_hw_get_clock(dev, PLL_MEMORY);
 	int NVClk = nouveau_hw_get_clock(dev, PLL_CORE);
-	uint32_t cfg1 = nvReadFB(dev, NV04_PFB_CFG1);
+	uint32_t cfg1 = nv_rd32(device, NV04_PFB_CFG1);
 
 	sim_data.pclk_khz = VClk;
 	sim_data.mclk_khz = MClk;
@@ -218,13 +221,13 @@ nv04_update_arb(struct drm_device *dev, int VClk, int bpp,
 		sim_data.mem_latency = 3;
 		sim_data.mem_page_miss = 10;
 	} else {
-		sim_data.memory_type = nvReadFB(dev, NV04_PFB_CFG0) & 0x1;
-		sim_data.memory_width = (nvReadEXTDEV(dev, NV_PEXTDEV_BOOT_0) & 0x10) ? 128 : 64;
+		sim_data.memory_type = nv_rd32(device, NV04_PFB_CFG0) & 0x1;
+		sim_data.memory_width = (nv_rd32(device, NV_PEXTDEV_BOOT_0) & 0x10) ? 128 : 64;
 		sim_data.mem_latency = cfg1 & 0xf;
 		sim_data.mem_page_miss = ((cfg1 >> 4) & 0xf) + ((cfg1 >> 31) & 0x1);
 	}
 
-	if (dev_priv->card_type == NV_04)
+	if (nv_device(drm->device)->card_type == NV_04)
 		nv04_calc_arb(&fifo_data, &sim_data);
 	else
 		nv10_calc_arb(&fifo_data, &sim_data);
@@ -249,9 +252,9 @@ nv20_update_arb(int *burst, int *lwm)
 void
 nouveau_calc_arb(struct drm_device *dev, int vclk, int bpp, int *burst, int *lwm)
 {
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_drm *drm = nouveau_drm(dev);
 
-	if (dev_priv->card_type < NV_20)
+	if (nv_device(drm->device)->card_type < NV_20)
 		nv04_update_arb(dev, vclk, bpp, burst, lwm);
 	else if ((dev->pci_device & 0xfff0) == 0x0240 /*CHIPSET_C51*/ ||
 		 (dev->pci_device & 0xfff0) == 0x03d0 /*CHIPSET_C512*/) {
@@ -260,219 +263,3 @@ nouveau_calc_arb(struct drm_device *dev, int vclk, int bpp, int *burst, int *lwm
 	} else
 		nv20_update_arb(burst, lwm);
 }
-
-static int
-getMNP_single(struct drm_device *dev, struct pll_lims *pll_lim, int clk,
-	      struct nouveau_pll_vals *bestpv)
-{
-	/* Find M, N and P for a single stage PLL
-	 *
-	 * Note that some bioses (NV3x) have lookup tables of precomputed MNP
-	 * values, but we're too lazy to use those atm
-	 *
-	 * "clk" parameter in kHz
-	 * returns calculated clock
-	 */
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	int cv = dev_priv->vbios.chip_version;
-	int minvco = pll_lim->vco1.minfreq, maxvco = pll_lim->vco1.maxfreq;
-	int minM = pll_lim->vco1.min_m, maxM = pll_lim->vco1.max_m;
-	int minN = pll_lim->vco1.min_n, maxN = pll_lim->vco1.max_n;
-	int minU = pll_lim->vco1.min_inputfreq;
-	int maxU = pll_lim->vco1.max_inputfreq;
-	int minP = pll_lim->max_p ? pll_lim->min_p : 0;
-	int maxP = pll_lim->max_p ? pll_lim->max_p : pll_lim->max_usable_log2p;
-	int crystal = pll_lim->refclk;
-	int M, N, thisP, P;
-	int clkP, calcclk;
-	int delta, bestdelta = INT_MAX;
-	int bestclk = 0;
-
-	/* this division verified for nv20, nv18, nv28 (Haiku), and nv34 */
-	/* possibly correlated with introduction of 27MHz crystal */
-	if (dev_priv->card_type < NV_50) {
-		if (cv < 0x17 || cv == 0x1a || cv == 0x20) {
-			if (clk > 250000)
-				maxM = 6;
-			if (clk > 340000)
-				maxM = 2;
-		} else if (cv < 0x40) {
-			if (clk > 150000)
-				maxM = 6;
-			if (clk > 200000)
-				maxM = 4;
-			if (clk > 340000)
-				maxM = 2;
-		}
-	}
-
-	P = pll_lim->max_p ? maxP : (1 << maxP);
-	if ((clk * P) < minvco) {
-		minvco = clk * maxP;
-		maxvco = minvco * 2;
-	}
-
-	if (clk + clk/200 > maxvco)	/* +0.5% */
-		maxvco = clk + clk/200;
-
-	/* NV34 goes maxlog2P->0, NV20 goes 0->maxlog2P */
-	for (thisP = minP; thisP <= maxP; thisP++) {
-		P = pll_lim->max_p ? thisP : (1 << thisP);
-		clkP = clk * P;
-
-		if (clkP < minvco)
-			continue;
-		if (clkP > maxvco)
-			return bestclk;
-
-		for (M = minM; M <= maxM; M++) {
-			if (crystal/M < minU)
-				return bestclk;
-			if (crystal/M > maxU)
-				continue;
-
-			/* add crystal/2 to round better */
-			N = (clkP * M + crystal/2) / crystal;
-
-			if (N < minN)
-				continue;
-			if (N > maxN)
-				break;
-
-			/* more rounding additions */
-			calcclk = ((N * crystal + P/2) / P + M/2) / M;
-			delta = abs(calcclk - clk);
-			/* we do an exhaustive search rather than terminating
-			 * on an optimality condition...
-			 */
-			if (delta < bestdelta) {
-				bestdelta = delta;
-				bestclk = calcclk;
-				bestpv->N1 = N;
-				bestpv->M1 = M;
-				bestpv->log2P = thisP;
-				if (delta == 0)	/* except this one */
-					return bestclk;
-			}
-		}
-	}
-
-	return bestclk;
-}
-
-static int
-getMNP_double(struct drm_device *dev, struct pll_lims *pll_lim, int clk,
-	      struct nouveau_pll_vals *bestpv)
-{
-	/* Find M, N and P for a two stage PLL
-	 *
-	 * Note that some bioses (NV30+) have lookup tables of precomputed MNP
-	 * values, but we're too lazy to use those atm
-	 *
-	 * "clk" parameter in kHz
-	 * returns calculated clock
-	 */
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	int chip_version = dev_priv->vbios.chip_version;
-	int minvco1 = pll_lim->vco1.minfreq, maxvco1 = pll_lim->vco1.maxfreq;
-	int minvco2 = pll_lim->vco2.minfreq, maxvco2 = pll_lim->vco2.maxfreq;
-	int minU1 = pll_lim->vco1.min_inputfreq, minU2 = pll_lim->vco2.min_inputfreq;
-	int maxU1 = pll_lim->vco1.max_inputfreq, maxU2 = pll_lim->vco2.max_inputfreq;
-	int minM1 = pll_lim->vco1.min_m, maxM1 = pll_lim->vco1.max_m;
-	int minN1 = pll_lim->vco1.min_n, maxN1 = pll_lim->vco1.max_n;
-	int minM2 = pll_lim->vco2.min_m, maxM2 = pll_lim->vco2.max_m;
-	int minN2 = pll_lim->vco2.min_n, maxN2 = pll_lim->vco2.max_n;
-	int maxlog2P = pll_lim->max_usable_log2p;
-	int crystal = pll_lim->refclk;
-	bool fixedgain2 = (minM2 == maxM2 && minN2 == maxN2);
-	int M1, N1, M2, N2, log2P;
-	int clkP, calcclk1, calcclk2, calcclkout;
-	int delta, bestdelta = INT_MAX;
-	int bestclk = 0;
-
-	int vco2 = (maxvco2 - maxvco2/200) / 2;
-	for (log2P = 0; clk && log2P < maxlog2P && clk <= (vco2 >> log2P); log2P++)
-		;
-	clkP = clk << log2P;
-
-	if (maxvco2 < clk + clk/200)	/* +0.5% */
-		maxvco2 = clk + clk/200;
-
-	for (M1 = minM1; M1 <= maxM1; M1++) {
-		if (crystal/M1 < minU1)
-			return bestclk;
-		if (crystal/M1 > maxU1)
-			continue;
-
-		for (N1 = minN1; N1 <= maxN1; N1++) {
-			calcclk1 = crystal * N1 / M1;
-			if (calcclk1 < minvco1)
-				continue;
-			if (calcclk1 > maxvco1)
-				break;
-
-			for (M2 = minM2; M2 <= maxM2; M2++) {
-				if (calcclk1/M2 < minU2)
-					break;
-				if (calcclk1/M2 > maxU2)
-					continue;
-
-				/* add calcclk1/2 to round better */
-				N2 = (clkP * M2 + calcclk1/2) / calcclk1;
-				if (N2 < minN2)
-					continue;
-				if (N2 > maxN2)
-					break;
-
-				if (!fixedgain2) {
-					if (chip_version < 0x60)
-						if (N2/M2 < 4 || N2/M2 > 10)
-							continue;
-
-					calcclk2 = calcclk1 * N2 / M2;
-					if (calcclk2 < minvco2)
-						break;
-					if (calcclk2 > maxvco2)
-						continue;
-				} else
-					calcclk2 = calcclk1;
-
-				calcclkout = calcclk2 >> log2P;
-				delta = abs(calcclkout - clk);
-				/* we do an exhaustive search rather than terminating
-				 * on an optimality condition...
-				 */
-				if (delta < bestdelta) {
-					bestdelta = delta;
-					bestclk = calcclkout;
-					bestpv->N1 = N1;
-					bestpv->M1 = M1;
-					bestpv->N2 = N2;
-					bestpv->M2 = M2;
-					bestpv->log2P = log2P;
-					if (delta == 0)	/* except this one */
-						return bestclk;
-				}
-			}
-		}
-	}
-
-	return bestclk;
-}
-
-int
-nouveau_calc_pll_mnp(struct drm_device *dev, struct pll_lims *pll_lim, int clk,
-		     struct nouveau_pll_vals *pv)
-{
-	int outclk;
-
-	if (!pll_lim->vco2.maxfreq)
-		outclk = getMNP_single(dev, pll_lim, clk, pv);
-	else
-		outclk = getMNP_double(dev, pll_lim, clk, pv);
-
-	if (!outclk)
-		NV_ERROR(dev, "Could not find a compatible set of PLL values\n");
-
-	return outclk;
-}
diff --git a/drivers/gpu/drm/nouveau/nouveau_chan.c b/drivers/gpu/drm/nouveau/nouveau_chan.c
new file mode 100644
index 000000000000..c1d7301c0e9c
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_chan.c
@@ -0,0 +1,400 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <core/object.h>
+#include <core/client.h>
+#include <core/device.h>
+#include <core/class.h>
+
+#include <subdev/fb.h>
+#include <subdev/vm.h>
+#include <subdev/instmem.h>
+
+#include <engine/software.h>
+
+#include "nouveau_drm.h"
+#include "nouveau_dma.h"
+#include "nouveau_bo.h"
+#include "nouveau_chan.h"
+#include "nouveau_fence.h"
+#include "nouveau_abi16.h"
+
+MODULE_PARM_DESC(vram_pushbuf, "Create DMA push buffers in VRAM");
+static int nouveau_vram_pushbuf;
+module_param_named(vram_pushbuf, nouveau_vram_pushbuf, int, 0400);
+
+int
+nouveau_channel_idle(struct nouveau_channel *chan)
+{
+	struct nouveau_cli *cli = chan->cli;
+	struct nouveau_fence *fence = NULL;
+	int ret;
+
+	ret = nouveau_fence_new(chan, &fence);
+	if (!ret) {
+		ret = nouveau_fence_wait(fence, false, false);
+		nouveau_fence_unref(&fence);
+	}
+
+	if (ret)
+		NV_ERROR(cli, "failed to idle channel 0x%08x\n", chan->handle);
+	return ret;
+}
+
+void
+nouveau_channel_del(struct nouveau_channel **pchan)
+{
+	struct nouveau_channel *chan = *pchan;
+	if (chan) {
+		struct nouveau_object *client = nv_object(chan->cli);
+		if (chan->fence) {
+			nouveau_channel_idle(chan);
+			nouveau_fence(chan->drm)->context_del(chan);
+		}
+		nouveau_object_del(client, NVDRM_DEVICE, chan->handle);
+		nouveau_object_del(client, NVDRM_DEVICE, chan->push.handle);
+		nouveau_bo_vma_del(chan->push.buffer, &chan->push.vma);
+		nouveau_bo_unmap(chan->push.buffer);
+		nouveau_bo_ref(NULL, &chan->push.buffer);
+		kfree(chan);
+	}
+	*pchan = NULL;
+}
+
+static int
+nouveau_channel_prep(struct nouveau_drm *drm, struct nouveau_cli *cli,
+		     u32 parent, u32 handle, u32 size,
+		     struct nouveau_channel **pchan)
+{
+	struct nouveau_device *device = nv_device(drm->device);
+	struct nouveau_instmem *imem = nouveau_instmem(device);
+	struct nouveau_vmmgr *vmm = nouveau_vmmgr(device);
+	struct nouveau_fb *pfb = nouveau_fb(device);
+	struct nouveau_client *client = &cli->base;
+	struct nv_dma_class args = {};
+	struct nouveau_channel *chan;
+	struct nouveau_object *push;
+	u32 target;
+	int ret;
+
+	chan = *pchan = kzalloc(sizeof(*chan), GFP_KERNEL);
+	if (!chan)
+		return -ENOMEM;
+
+	chan->cli = cli;
+	chan->drm = drm;
+	chan->handle = handle;
+
+	/* allocate memory for dma push buffer */
+	target = TTM_PL_FLAG_TT;
+	if (nouveau_vram_pushbuf)
+		target = TTM_PL_FLAG_VRAM;
+
+	ret = nouveau_bo_new(drm->dev, size, 0, target, 0, 0, NULL,
+			    &chan->push.buffer);
+	if (ret == 0) {
+		ret = nouveau_bo_pin(chan->push.buffer, target);
+		if (ret == 0)
+			ret = nouveau_bo_map(chan->push.buffer);
+	}
+
+	if (ret) {
+		nouveau_channel_del(pchan);
+		return ret;
+	}
+
+	/* create dma object covering the *entire* memory space that the
+	 * pushbuf lives in, this is because the GEM code requires that
+	 * we be able to call out to other (indirect) push buffers
+	 */
+	chan->push.vma.offset = chan->push.buffer->bo.offset;
+	chan->push.handle = NVDRM_PUSH | (handle & 0xffff);
+
+	if (device->card_type >= NV_50) {
+		ret = nouveau_bo_vma_add(chan->push.buffer, client->vm,
+					&chan->push.vma);
+		if (ret) {
+			nouveau_channel_del(pchan);
+			return ret;
+		}
+
+		args.flags = NV_DMA_TARGET_VM | NV_DMA_ACCESS_VM;
+		args.start = 0;
+		args.limit = client->vm->vmm->limit - 1;
+	} else
+	if (chan->push.buffer->bo.mem.mem_type == TTM_PL_VRAM) {
+		u64 limit = pfb->ram.size - imem->reserved - 1;
+		if (device->card_type == NV_04) {
+			/* nv04 vram pushbuf hack, retarget to its location in
+			 * the framebuffer bar rather than direct vram access..
+			 * nfi why this exists, it came from the -nv ddx.
+			 */
+			args.flags = NV_DMA_TARGET_PCI | NV_DMA_ACCESS_RDWR;
+			args.start = pci_resource_start(device->pdev, 1);
+			args.limit = args.start + limit;
+		} else {
+			args.flags = NV_DMA_TARGET_VRAM | NV_DMA_ACCESS_RDWR;
+			args.start = 0;
+			args.limit = limit;
+		}
+	} else {
+		if (chan->drm->agp.stat == ENABLED) {
+			args.flags = NV_DMA_TARGET_AGP | NV_DMA_ACCESS_RDWR;
+			args.start = chan->drm->agp.base;
+			args.limit = chan->drm->agp.base +
+				     chan->drm->agp.size - 1;
+		} else {
+			args.flags = NV_DMA_TARGET_VM | NV_DMA_ACCESS_RDWR;
+			args.start = 0;
+			args.limit = vmm->limit - 1;
+		}
+	}
+
+	ret = nouveau_object_new(nv_object(chan->cli), parent,
+				 chan->push.handle, 0x0002,
+				 &args, sizeof(args), &push);
+	if (ret) {
+		nouveau_channel_del(pchan);
+		return ret;
+	}
+
+	return 0;
+}
+
+static int
+nouveau_channel_ind(struct nouveau_drm *drm, struct nouveau_cli *cli,
+		    u32 parent, u32 handle, u32 engine,
+		    struct nouveau_channel **pchan)
+{
+	static const u16 oclasses[] = { NVE0_CHANNEL_IND_CLASS,
+					NVC0_CHANNEL_IND_CLASS,
+					NV84_CHANNEL_IND_CLASS,
+					NV50_CHANNEL_IND_CLASS,
+					0 };
+	const u16 *oclass = oclasses;
+	struct nve0_channel_ind_class args;
+	struct nouveau_channel *chan;
+	int ret;
+
+	/* allocate dma push buffer */
+	ret = nouveau_channel_prep(drm, cli, parent, handle, 0x12000, &chan);
+	*pchan = chan;
+	if (ret)
+		return ret;
+
+	/* create channel object */
+	args.pushbuf = chan->push.handle;
+	args.ioffset = 0x10000 + chan->push.vma.offset;
+	args.ilength = 0x02000;
+	args.engine  = engine;
+
+	do {
+		ret = nouveau_object_new(nv_object(cli), parent, handle,
+					 *oclass++, &args, sizeof(args),
+					 &chan->object);
+		if (ret == 0)
+			return ret;
+	} while (*oclass);
+
+	nouveau_channel_del(pchan);
+	return ret;
+}
+
+static int
+nouveau_channel_dma(struct nouveau_drm *drm, struct nouveau_cli *cli,
+		    u32 parent, u32 handle, struct nouveau_channel **pchan)
+{
+	static const u16 oclasses[] = { NV40_CHANNEL_DMA_CLASS,
+					NV17_CHANNEL_DMA_CLASS,
+					NV10_CHANNEL_DMA_CLASS,
+					NV03_CHANNEL_DMA_CLASS,
+					0 };
+	const u16 *oclass = oclasses;
+	struct nv03_channel_dma_class args;
+	struct nouveau_channel *chan;
+	int ret;
+
+	/* allocate dma push buffer */
+	ret = nouveau_channel_prep(drm, cli, parent, handle, 0x10000, &chan);
+	*pchan = chan;
+	if (ret)
+		return ret;
+
+	/* create channel object */
+	args.pushbuf = chan->push.handle;
+	args.offset = chan->push.vma.offset;
+
+	do {
+		ret = nouveau_object_new(nv_object(cli), parent, handle,
+					 *oclass++, &args, sizeof(args),
+					 &chan->object);
+		if (ret == 0)
+			return ret;
+	} while (ret && *oclass);
+
+	nouveau_channel_del(pchan);
+	return ret;
+}
+
+static int
+nouveau_channel_init(struct nouveau_channel *chan, u32 vram, u32 gart)
+{
+	struct nouveau_client *client = nv_client(chan->cli);
+	struct nouveau_device *device = nv_device(chan->drm->device);
+	struct nouveau_instmem *imem = nouveau_instmem(device);
+	struct nouveau_vmmgr *vmm = nouveau_vmmgr(device);
+	struct nouveau_fb *pfb = nouveau_fb(device);
+	struct nouveau_software_chan *swch;
+	struct nouveau_object *object;
+	struct nv_dma_class args;
+	int ret, i;
+
+	/* allocate dma objects to cover all allowed vram, and gart */
+	if (device->card_type < NV_C0) {
+		if (device->card_type >= NV_50) {
+			args.flags = NV_DMA_TARGET_VM | NV_DMA_ACCESS_VM;
+			args.start = 0;
+			args.limit = client->vm->vmm->limit - 1;
+		} else {
+			args.flags = NV_DMA_TARGET_VRAM | NV_DMA_ACCESS_RDWR;
+			args.start = 0;
+			args.limit = pfb->ram.size - imem->reserved - 1;
+		}
+
+		ret = nouveau_object_new(nv_object(client), chan->handle, vram,
+					 0x003d, &args, sizeof(args), &object);
+		if (ret)
+			return ret;
+
+		if (device->card_type >= NV_50) {
+			args.flags = NV_DMA_TARGET_VM | NV_DMA_ACCESS_VM;
+			args.start = 0;
+			args.limit = client->vm->vmm->limit - 1;
+		} else
+		if (chan->drm->agp.stat == ENABLED) {
+			args.flags = NV_DMA_TARGET_AGP | NV_DMA_ACCESS_RDWR;
+			args.start = chan->drm->agp.base;
+			args.limit = chan->drm->agp.base +
+				     chan->drm->agp.size - 1;
+		} else {
+			args.flags = NV_DMA_TARGET_VM | NV_DMA_ACCESS_RDWR;
+			args.start = 0;
+			args.limit = vmm->limit - 1;
+		}
+
+		ret = nouveau_object_new(nv_object(client), chan->handle, gart,
+					 0x003d, &args, sizeof(args), &object);
+		if (ret)
+			return ret;
+
+		chan->vram = vram;
+		chan->gart = gart;
+	}
+
+	/* initialise dma tracking parameters */
+	switch (nv_hclass(chan->object) & 0x00ff) {
+	case 0x006b:
+	case 0x006e:
+		chan->user_put = 0x40;
+		chan->user_get = 0x44;
+		chan->dma.max = (0x10000 / 4) - 2;
+		break;
+	default:
+		chan->user_put = 0x40;
+		chan->user_get = 0x44;
+		chan->user_get_hi = 0x60;
+		chan->dma.ib_base =  0x10000 / 4;
+		chan->dma.ib_max  = (0x02000 / 8) - 1;
+		chan->dma.ib_put  = 0;
+		chan->dma.ib_free = chan->dma.ib_max - chan->dma.ib_put;
+		chan->dma.max = chan->dma.ib_base;
+		break;
+	}
+
+	chan->dma.put = 0;
+	chan->dma.cur = chan->dma.put;
+	chan->dma.free = chan->dma.max - chan->dma.cur;
+
+	ret = RING_SPACE(chan, NOUVEAU_DMA_SKIPS);
+	if (ret)
+		return ret;
+
+	for (i = 0; i < NOUVEAU_DMA_SKIPS; i++)
+		OUT_RING(chan, 0x00000000);
+
+	/* allocate software object class (used for fences on <= nv05, and
+	 * to signal flip completion), bind it to a subchannel.
+	 */
+	if (chan != chan->drm->cechan) {
+		ret = nouveau_object_new(nv_object(client), chan->handle,
+					 NvSw, nouveau_abi16_swclass(chan->drm),
+					 NULL, 0, &object);
+		if (ret)
+			return ret;
+
+		swch = (void *)object->parent;
+		swch->flip = nouveau_flip_complete;
+		swch->flip_data = chan;
+	}
+
+	if (device->card_type < NV_C0) {
+		ret = RING_SPACE(chan, 2);
+		if (ret)
+			return ret;
+
+		BEGIN_NV04(chan, NvSubSw, 0x0000, 1);
+		OUT_RING  (chan, NvSw);
+		FIRE_RING (chan);
+	}
+
+	/* initialise synchronisation */
+	return nouveau_fence(chan->drm)->context_new(chan);
+}
+
+int
+nouveau_channel_new(struct nouveau_drm *drm, struct nouveau_cli *cli,
+		    u32 parent, u32 handle, u32 arg0, u32 arg1,
+		    struct nouveau_channel **pchan)
+{
+	int ret;
+
+	ret = nouveau_channel_ind(drm, cli, parent, handle, arg0, pchan);
+	if (ret) {
+		NV_DEBUG(cli, "ib channel create, %d\n", ret);
+		ret = nouveau_channel_dma(drm, cli, parent, handle, pchan);
+		if (ret) {
+			NV_DEBUG(cli, "dma channel create, %d\n", ret);
+			return ret;
+		}
+	}
+
+	ret = nouveau_channel_init(*pchan, arg0, arg1);
+	if (ret) {
+		NV_ERROR(cli, "channel failed to initialise, %d\n", ret);
+		nouveau_channel_del(pchan);
+		return ret;
+	}
+
+	return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nouveau_chan.h b/drivers/gpu/drm/nouveau/nouveau_chan.h
new file mode 100644
index 000000000000..40f97e2c47b6
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_chan.h
@@ -0,0 +1,47 @@
+#ifndef __NOUVEAU_CHAN_H__
+#define __NOUVEAU_CHAN_H__
+
+struct nouveau_cli;
+
+struct nouveau_channel {
+	struct nouveau_cli *cli;
+	struct nouveau_drm *drm;
+
+	u32 handle;
+	u32 vram;
+	u32 gart;
+
+	struct {
+		struct nouveau_bo *buffer;
+		struct nouveau_vma vma;
+		u32 handle;
+	} push;
+
+	/* TODO: this will be reworked in the near future */
+	bool accel_done;
+	void *fence;
+	struct {
+		int max;
+		int free;
+		int cur;
+		int put;
+		int ib_base;
+		int ib_max;
+		int ib_free;
+		int ib_put;
+	} dma;
+	u32 user_get_hi;
+	u32 user_get;
+	u32 user_put;
+
+	struct nouveau_object *object;
+};
+
+
+int  nouveau_channel_new(struct nouveau_drm *, struct nouveau_cli *,
+			 u32 parent, u32 handle, u32 arg0, u32 arg1,
+			 struct nouveau_channel **);
+void nouveau_channel_del(struct nouveau_channel **);
+int  nouveau_channel_idle(struct nouveau_channel *);
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_channel.c b/drivers/gpu/drm/nouveau/nouveau_channel.c
deleted file mode 100644
index cd180c678c13..000000000000
--- a/drivers/gpu/drm/nouveau/nouveau_channel.c
+++ /dev/null
@@ -1,396 +0,0 @@
-/*
- * Copyright 2005-2006 Stephane Marchesin
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-#include <drm/drmP.h>
-#include "nouveau_drv.h"
-#include <drm/nouveau_drm.h>
-#include "nouveau_dma.h"
-#include "nouveau_fifo.h"
-#include "nouveau_ramht.h"
-#include "nouveau_fence.h"
-#include "nouveau_software.h"
-
-static int
-nouveau_channel_pushbuf_init(struct nouveau_channel *chan)
-{
-	u32 mem = nouveau_vram_pushbuf ? TTM_PL_FLAG_VRAM : TTM_PL_FLAG_TT;
-	struct drm_device *dev = chan->dev;
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	int ret;
-
-	/* allocate buffer object */
-	ret = nouveau_bo_new(dev, 65536, 0, mem, 0, 0, NULL, &chan->pushbuf_bo);
-	if (ret)
-		goto out;
-
-	ret = nouveau_bo_pin(chan->pushbuf_bo, mem);
-	if (ret)
-		goto out;
-
-	ret = nouveau_bo_map(chan->pushbuf_bo);
-	if (ret)
-		goto out;
-
-	/* create DMA object covering the entire memtype where the push
-	 * buffer resides, userspace can submit its own push buffers from
-	 * anywhere within the same memtype.
-	 */
-	chan->pushbuf_base = chan->pushbuf_bo->bo.offset;
-	if (dev_priv->card_type >= NV_50) {
-		ret = nouveau_bo_vma_add(chan->pushbuf_bo, chan->vm,
-					 &chan->pushbuf_vma);
-		if (ret)
-			goto out;
-
-		if (dev_priv->card_type < NV_C0) {
-			ret = nouveau_gpuobj_dma_new(chan,
-						     NV_CLASS_DMA_IN_MEMORY, 0,
-						     (1ULL << 40),
-						     NV_MEM_ACCESS_RO,
-						     NV_MEM_TARGET_VM,
-						     &chan->pushbuf);
-		}
-		chan->pushbuf_base = chan->pushbuf_vma.offset;
-	} else
-	if (chan->pushbuf_bo->bo.mem.mem_type == TTM_PL_TT) {
-		ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, 0,
-					     dev_priv->gart_info.aper_size,
-					     NV_MEM_ACCESS_RO,
-					     NV_MEM_TARGET_GART,
-					     &chan->pushbuf);
-	} else
-	if (dev_priv->card_type != NV_04) {
-		ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, 0,
-					     dev_priv->fb_available_size,
-					     NV_MEM_ACCESS_RO,
-					     NV_MEM_TARGET_VRAM,
-					     &chan->pushbuf);
-	} else {
-		/* NV04 cmdbuf hack, from original ddx.. not sure of it's
-		 * exact reason for existing :)  PCI access to cmdbuf in
-		 * VRAM.
-		 */
-		ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY,
-					     pci_resource_start(dev->pdev, 1),
-					     dev_priv->fb_available_size,
-					     NV_MEM_ACCESS_RO,
-					     NV_MEM_TARGET_PCI,
-					     &chan->pushbuf);
-	}
-
-out:
-	if (ret) {
-		NV_ERROR(dev, "error initialising pushbuf: %d\n", ret);
-		nouveau_bo_vma_del(chan->pushbuf_bo, &chan->pushbuf_vma);
-		nouveau_gpuobj_ref(NULL, &chan->pushbuf);
-		if (chan->pushbuf_bo) {
-			nouveau_bo_unmap(chan->pushbuf_bo);
-			nouveau_bo_ref(NULL, &chan->pushbuf_bo);
-		}
-	}
-
-	return 0;
-}
-
-/* allocates and initializes a fifo for user space consumption */
-int
-nouveau_channel_alloc(struct drm_device *dev, struct nouveau_channel **chan_ret,
-		      struct drm_file *file_priv,
-		      uint32_t vram_handle, uint32_t gart_handle)
-{
-	struct nouveau_exec_engine *fence = nv_engine(dev, NVOBJ_ENGINE_FENCE);
-	struct nouveau_fifo_priv *pfifo = nv_engine(dev, NVOBJ_ENGINE_FIFO);
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct nouveau_fpriv *fpriv = nouveau_fpriv(file_priv);
-	struct nouveau_channel *chan;
-	unsigned long flags;
-	int ret, i;
-
-	/* allocate and lock channel structure */
-	chan = kzalloc(sizeof(*chan), GFP_KERNEL);
-	if (!chan)
-		return -ENOMEM;
-	chan->dev = dev;
-	chan->file_priv = file_priv;
-	chan->vram_handle = vram_handle;
-	chan->gart_handle = gart_handle;
-
-	kref_init(&chan->ref);
-	atomic_set(&chan->users, 1);
-	mutex_init(&chan->mutex);
-	mutex_lock(&chan->mutex);
-
-	/* allocate hw channel id */
-	spin_lock_irqsave(&dev_priv->channels.lock, flags);
-	for (chan->id = 0; chan->id < pfifo->channels; chan->id++) {
-		if (!dev_priv->channels.ptr[chan->id]) {
-			nouveau_channel_ref(chan, &dev_priv->channels.ptr[chan->id]);
-			break;
-		}
-	}
-	spin_unlock_irqrestore(&dev_priv->channels.lock, flags);
-
-	if (chan->id == pfifo->channels) {
-		mutex_unlock(&chan->mutex);
-		kfree(chan);
-		return -ENODEV;
-	}
-
-	NV_DEBUG(dev, "initialising channel %d\n", chan->id);
-
-	/* setup channel's memory and vm */
-	ret = nouveau_gpuobj_channel_init(chan, vram_handle, gart_handle);
-	if (ret) {
-		NV_ERROR(dev, "gpuobj %d\n", ret);
-		nouveau_channel_put(&chan);
-		return ret;
-	}
-
-	/* Allocate space for per-channel fixed notifier memory */
-	ret = nouveau_notifier_init_channel(chan);
-	if (ret) {
-		NV_ERROR(dev, "ntfy %d\n", ret);
-		nouveau_channel_put(&chan);
-		return ret;
-	}
-
-	/* Allocate DMA push buffer */
-	ret = nouveau_channel_pushbuf_init(chan);
-	if (ret) {
-		NV_ERROR(dev, "pushbuf %d\n", ret);
-		nouveau_channel_put(&chan);
-		return ret;
-	}
-
-	nouveau_dma_init(chan);
-	chan->user_put = 0x40;
-	chan->user_get = 0x44;
-	if (dev_priv->card_type >= NV_50)
-		chan->user_get_hi = 0x60;
-
-	/* create fifo context */
-	ret = pfifo->base.context_new(chan, NVOBJ_ENGINE_FIFO);
-	if (ret) {
-		nouveau_channel_put(&chan);
-		return ret;
-	}
-
-	/* Insert NOPs for NOUVEAU_DMA_SKIPS */
-	ret = RING_SPACE(chan, NOUVEAU_DMA_SKIPS);
-	if (ret) {
-		nouveau_channel_put(&chan);
-		return ret;
-	}
-
-	for (i = 0; i < NOUVEAU_DMA_SKIPS; i++)
-		OUT_RING  (chan, 0x00000000);
-
-	ret = nouveau_gpuobj_gr_new(chan, NvSw, nouveau_software_class(dev));
-	if (ret) {
-		nouveau_channel_put(&chan);
-		return ret;
-	}
-
-	if (dev_priv->card_type < NV_C0) {
-		ret = RING_SPACE(chan, 2);
-		if (ret) {
-			nouveau_channel_put(&chan);
-			return ret;
-		}
-
-		BEGIN_NV04(chan, NvSubSw, NV01_SUBCHAN_OBJECT, 1);
-		OUT_RING  (chan, NvSw);
-		FIRE_RING (chan);
-	}
-
-	FIRE_RING(chan);
-
-	ret = fence->context_new(chan, NVOBJ_ENGINE_FENCE);
-	if (ret) {
-		nouveau_channel_put(&chan);
-		return ret;
-	}
-
-	nouveau_debugfs_channel_init(chan);
-
-	NV_DEBUG(dev, "channel %d initialised\n", chan->id);
-	if (fpriv) {
-		spin_lock(&fpriv->lock);
-		list_add(&chan->list, &fpriv->channels);
-		spin_unlock(&fpriv->lock);
-	}
-	*chan_ret = chan;
-	return 0;
-}
-
-struct nouveau_channel *
-nouveau_channel_get_unlocked(struct nouveau_channel *ref)
-{
-	struct nouveau_channel *chan = NULL;
-
-	if (likely(ref && atomic_inc_not_zero(&ref->users)))
-		nouveau_channel_ref(ref, &chan);
-
-	return chan;
-}
-
-struct nouveau_channel *
-nouveau_channel_get(struct drm_file *file_priv, int id)
-{
-	struct nouveau_fpriv *fpriv = nouveau_fpriv(file_priv);
-	struct nouveau_channel *chan;
-
-	spin_lock(&fpriv->lock);
-	list_for_each_entry(chan, &fpriv->channels, list) {
-		if (chan->id == id) {
-			chan = nouveau_channel_get_unlocked(chan);
-			spin_unlock(&fpriv->lock);
-			mutex_lock(&chan->mutex);
-			return chan;
-		}
-	}
-	spin_unlock(&fpriv->lock);
-
-	return ERR_PTR(-EINVAL);
-}
-
-void
-nouveau_channel_put_unlocked(struct nouveau_channel **pchan)
-{
-	struct nouveau_channel *chan = *pchan;
-	struct drm_device *dev = chan->dev;
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	unsigned long flags;
-	int i;
-
-	/* decrement the refcount, and we're done if there's still refs */
-	if (likely(!atomic_dec_and_test(&chan->users))) {
-		nouveau_channel_ref(NULL, pchan);
-		return;
-	}
-
-	/* no one wants the channel anymore */
-	NV_DEBUG(dev, "freeing channel %d\n", chan->id);
-	nouveau_debugfs_channel_fini(chan);
-
-	/* give it chance to idle */
-	nouveau_channel_idle(chan);
-
-	/* destroy the engine specific contexts */
-	for (i = NVOBJ_ENGINE_NR - 1; i >= 0; i--) {
-		if (chan->engctx[i])
-			dev_priv->eng[i]->context_del(chan, i);
-	}
-
-	/* aside from its resources, the channel should now be dead,
-	 * remove it from the channel list
-	 */
-	spin_lock_irqsave(&dev_priv->channels.lock, flags);
-	nouveau_channel_ref(NULL, &dev_priv->channels.ptr[chan->id]);
-	spin_unlock_irqrestore(&dev_priv->channels.lock, flags);
-
-	/* destroy any resources the channel owned */
-	nouveau_gpuobj_ref(NULL, &chan->pushbuf);
-	if (chan->pushbuf_bo) {
-		nouveau_bo_vma_del(chan->pushbuf_bo, &chan->pushbuf_vma);
-		nouveau_bo_unmap(chan->pushbuf_bo);
-		nouveau_bo_unpin(chan->pushbuf_bo);
-		nouveau_bo_ref(NULL, &chan->pushbuf_bo);
-	}
-	nouveau_ramht_ref(NULL, &chan->ramht, chan);
-	nouveau_notifier_takedown_channel(chan);
-	nouveau_gpuobj_channel_takedown(chan);
-
-	nouveau_channel_ref(NULL, pchan);
-}
-
-void
-nouveau_channel_put(struct nouveau_channel **pchan)
-{
-	mutex_unlock(&(*pchan)->mutex);
-	nouveau_channel_put_unlocked(pchan);
-}
-
-static void
-nouveau_channel_del(struct kref *ref)
-{
-	struct nouveau_channel *chan =
-		container_of(ref, struct nouveau_channel, ref);
-
-	kfree(chan);
-}
-
-void
-nouveau_channel_ref(struct nouveau_channel *chan,
-		    struct nouveau_channel **pchan)
-{
-	if (chan)
-		kref_get(&chan->ref);
-
-	if (*pchan)
-		kref_put(&(*pchan)->ref, nouveau_channel_del);
-
-	*pchan = chan;
-}
-
-int
-nouveau_channel_idle(struct nouveau_channel *chan)
-{
-	struct drm_device *dev = chan->dev;
-	struct nouveau_fence *fence = NULL;
-	int ret;
-
-	ret = nouveau_fence_new(chan, &fence);
-	if (!ret) {
-		ret = nouveau_fence_wait(fence, false, false);
-		nouveau_fence_unref(&fence);
-	}
-
-	if (ret)
-		NV_ERROR(dev, "Failed to idle channel %d.\n", chan->id);
-	return ret;
-}
-
-/* cleans up all the fifos from file_priv */
-void
-nouveau_channel_cleanup(struct drm_device *dev, struct drm_file *file_priv)
-{
-	struct nouveau_fifo_priv *pfifo = nv_engine(dev, NVOBJ_ENGINE_FIFO);
-	struct nouveau_channel *chan;
-	int i;
-
-	if (!pfifo)
-		return;
-
-	NV_DEBUG(dev, "clearing FIFO enables from file_priv\n");
-	for (i = 0; i < pfifo->channels; i++) {
-		chan = nouveau_channel_get(file_priv, i);
-		if (IS_ERR(chan))
-			continue;
-
-		list_del(&chan->list);
-		atomic_dec(&chan->users);
-		nouveau_channel_put(&chan);
-	}
-}
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c
index abb92de98573..9a6e2cb282dc 100644
--- a/drivers/gpu/drm/nouveau/nouveau_connector.c
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.c
@@ -31,12 +31,29 @@
 #include <drm/drm_crtc_helper.h>
 
 #include "nouveau_reg.h"
-#include "nouveau_drv.h"
+#include "nouveau_drm.h"
+#include "nouveau_hw.h"
+#include "nouveau_acpi.h"
+
+#include "nouveau_display.h"
+#include "nouveau_connector.h"
 #include "nouveau_encoder.h"
 #include "nouveau_crtc.h"
-#include "nouveau_connector.h"
-#include "nouveau_gpio.h"
-#include "nouveau_hw.h"
+
+#include <subdev/i2c.h>
+#include <subdev/gpio.h>
+
+MODULE_PARM_DESC(tv_disable, "Disable TV-out detection");
+static int nouveau_tv_disable = 0;
+module_param_named(tv_disable, nouveau_tv_disable, int, 0400);
+
+MODULE_PARM_DESC(ignorelid, "Ignore ACPI lid status");
+static int nouveau_ignorelid = 0;
+module_param_named(ignorelid, nouveau_ignorelid, int, 0400);
+
+MODULE_PARM_DESC(duallink, "Allow dual-link TMDS (default: enabled)");
+static int nouveau_duallink = 1;
+module_param_named(duallink, nouveau_duallink, int, 0400);
 
 static void nouveau_connector_hotplug(void *, int);
 
@@ -58,7 +75,7 @@ find_encoder(struct drm_connector *connector, int type)
 			continue;
 		nv_encoder = nouveau_encoder(obj_to_encoder(obj));
 
-		if (type == OUTPUT_ANY || nv_encoder->dcb->type == type)
+		if (type == DCB_OUTPUT_ANY || nv_encoder->dcb->type == type)
 			return nv_encoder;
 	}
 
@@ -83,19 +100,21 @@ static void
 nouveau_connector_destroy(struct drm_connector *connector)
 {
 	struct nouveau_connector *nv_connector = nouveau_connector(connector);
-	struct drm_nouveau_private *dev_priv;
+	struct nouveau_gpio *gpio;
+	struct nouveau_drm *drm;
 	struct drm_device *dev;
 
 	if (!nv_connector)
 		return;
 
-	dev = nv_connector->base.dev;
-	dev_priv = dev->dev_private;
-	NV_DEBUG_KMS(dev, "\n");
+	dev  = nv_connector->base.dev;
+	drm  = nouveau_drm(dev);
+	gpio = nouveau_gpio(drm->device);
+	NV_DEBUG(drm, "\n");
 
-	if (nv_connector->hpd != DCB_GPIO_UNUSED) {
-		nouveau_gpio_isr_del(dev, 0, nv_connector->hpd, 0xff,
-				     nouveau_connector_hotplug, connector);
+	if (gpio && nv_connector->hpd != DCB_GPIO_UNUSED) {
+		gpio->isr_del(gpio, 0, nv_connector->hpd, 0xff,
+			      nouveau_connector_hotplug, connector);
 	}
 
 	kfree(nv_connector->edid);
@@ -104,15 +123,17 @@ nouveau_connector_destroy(struct drm_connector *connector)
 	kfree(connector);
 }
 
-static struct nouveau_i2c_chan *
+static struct nouveau_i2c_port *
 nouveau_connector_ddc_detect(struct drm_connector *connector,
 			     struct nouveau_encoder **pnv_encoder)
 {
 	struct drm_device *dev = connector->dev;
+	struct nouveau_drm *drm = nouveau_drm(dev);
+	struct nouveau_i2c *i2c = nouveau_i2c(drm->device);
 	int i;
 
 	for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
-		struct nouveau_i2c_chan *i2c = NULL;
+		struct nouveau_i2c_port *port = NULL;
 		struct nouveau_encoder *nv_encoder;
 		struct drm_mode_object *obj;
 		int id;
@@ -127,11 +148,10 @@ nouveau_connector_ddc_detect(struct drm_connector *connector,
 		nv_encoder = nouveau_encoder(obj_to_encoder(obj));
 
 		if (nv_encoder->dcb->i2c_index < 0xf)
-			i2c = nouveau_i2c_find(dev, nv_encoder->dcb->i2c_index);
-
-		if (i2c && nouveau_probe_i2c_addr(i2c, 0x50)) {
+			port = i2c->find(i2c, nv_encoder->dcb->i2c_index);
+		if (port && nv_probe_i2c(port, 0x50)) {
 			*pnv_encoder = nv_encoder;
-			return i2c;
+			return port;
 		}
 	}
 
@@ -148,8 +168,8 @@ nouveau_connector_of_detect(struct drm_connector *connector)
 	struct device_node *cn, *dn = pci_device_to_OF_node(dev->pdev);
 
 	if (!dn ||
-	    !((nv_encoder = find_encoder(connector, OUTPUT_TMDS)) ||
-	      (nv_encoder = find_encoder(connector, OUTPUT_ANALOG))))
+	    !((nv_encoder = find_encoder(connector, DCB_OUTPUT_TMDS)) ||
+	      (nv_encoder = find_encoder(connector, DCB_OUTPUT_ANALOG))))
 		return NULL;
 
 	for_each_child_of_node(dn, cn) {
@@ -173,25 +193,25 @@ nouveau_connector_set_encoder(struct drm_connector *connector,
 			      struct nouveau_encoder *nv_encoder)
 {
 	struct nouveau_connector *nv_connector = nouveau_connector(connector);
-	struct drm_nouveau_private *dev_priv = connector->dev->dev_private;
+	struct nouveau_drm *drm = nouveau_drm(connector->dev);
 	struct drm_device *dev = connector->dev;
 
 	if (nv_connector->detected_encoder == nv_encoder)
 		return;
 	nv_connector->detected_encoder = nv_encoder;
 
-	if (dev_priv->card_type >= NV_50) {
+	if (nv_device(drm->device)->card_type >= NV_50) {
 		connector->interlace_allowed = true;
 		connector->doublescan_allowed = true;
 	} else
-	if (nv_encoder->dcb->type == OUTPUT_LVDS ||
-	    nv_encoder->dcb->type == OUTPUT_TMDS) {
+	if (nv_encoder->dcb->type == DCB_OUTPUT_LVDS ||
+	    nv_encoder->dcb->type == DCB_OUTPUT_TMDS) {
 		connector->doublescan_allowed = false;
 		connector->interlace_allowed = false;
 	} else {
 		connector->doublescan_allowed = true;
-		if (dev_priv->card_type == NV_20 ||
-		   (dev_priv->card_type == NV_10 &&
+		if (nv_device(drm->device)->card_type == NV_20 ||
+		   (nv_device(drm->device)->card_type == NV_10 &&
 		    (dev->pci_device & 0x0ff0) != 0x0100 &&
 		    (dev->pci_device & 0x0ff0) != 0x0150))
 			/* HW is broken */
@@ -203,7 +223,7 @@ nouveau_connector_set_encoder(struct drm_connector *connector,
 	if (nv_connector->type == DCB_CONNECTOR_DVI_I) {
 		drm_connector_property_set_value(connector,
 			dev->mode_config.dvi_i_subconnector_property,
-			nv_encoder->dcb->type == OUTPUT_TMDS ?
+			nv_encoder->dcb->type == DCB_OUTPUT_TMDS ?
 			DRM_MODE_SUBCONNECTOR_DVID :
 			DRM_MODE_SUBCONNECTOR_DVIA);
 	}
@@ -213,10 +233,11 @@ static enum drm_connector_status
 nouveau_connector_detect(struct drm_connector *connector, bool force)
 {
 	struct drm_device *dev = connector->dev;
+	struct nouveau_drm *drm = nouveau_drm(dev);
 	struct nouveau_connector *nv_connector = nouveau_connector(connector);
 	struct nouveau_encoder *nv_encoder = NULL;
 	struct nouveau_encoder *nv_partner;
-	struct nouveau_i2c_chan *i2c;
+	struct nouveau_i2c_port *i2c;
 	int type;
 
 	/* Cleanup the previous EDID block. */
@@ -232,14 +253,14 @@ nouveau_connector_detect(struct drm_connector *connector, bool force)
 		drm_mode_connector_update_edid_property(connector,
 							nv_connector->edid);
 		if (!nv_connector->edid) {
-			NV_ERROR(dev, "DDC responded, but no EDID for %s\n",
+			NV_ERROR(drm, "DDC responded, but no EDID for %s\n",
 				 drm_get_connector_name(connector));
 			goto detect_analog;
 		}
 
-		if (nv_encoder->dcb->type == OUTPUT_DP &&
+		if (nv_encoder->dcb->type == DCB_OUTPUT_DP &&
 		    !nouveau_dp_detect(to_drm_encoder(nv_encoder))) {
-			NV_ERROR(dev, "Detected %s, but failed init\n",
+			NV_ERROR(drm, "Detected %s, but failed init\n",
 				 drm_get_connector_name(connector));
 			return connector_status_disconnected;
 		}
@@ -250,19 +271,19 @@ nouveau_connector_detect(struct drm_connector *connector, bool force)
 		 * isn't necessarily correct.
 		 */
 		nv_partner = NULL;
-		if (nv_encoder->dcb->type == OUTPUT_TMDS)
-			nv_partner = find_encoder(connector, OUTPUT_ANALOG);
-		if (nv_encoder->dcb->type == OUTPUT_ANALOG)
-			nv_partner = find_encoder(connector, OUTPUT_TMDS);
-
-		if (nv_partner && ((nv_encoder->dcb->type == OUTPUT_ANALOG &&
-				    nv_partner->dcb->type == OUTPUT_TMDS) ||
-				   (nv_encoder->dcb->type == OUTPUT_TMDS &&
-				    nv_partner->dcb->type == OUTPUT_ANALOG))) {
+		if (nv_encoder->dcb->type == DCB_OUTPUT_TMDS)
+			nv_partner = find_encoder(connector, DCB_OUTPUT_ANALOG);
+		if (nv_encoder->dcb->type == DCB_OUTPUT_ANALOG)
+			nv_partner = find_encoder(connector, DCB_OUTPUT_TMDS);
+
+		if (nv_partner && ((nv_encoder->dcb->type == DCB_OUTPUT_ANALOG &&
+				    nv_partner->dcb->type == DCB_OUTPUT_TMDS) ||
+				   (nv_encoder->dcb->type == DCB_OUTPUT_TMDS &&
+				    nv_partner->dcb->type == DCB_OUTPUT_ANALOG))) {
 			if (nv_connector->edid->input & DRM_EDID_INPUT_DIGITAL)
-				type = OUTPUT_TMDS;
+				type = DCB_OUTPUT_TMDS;
 			else
-				type = OUTPUT_ANALOG;
+				type = DCB_OUTPUT_ANALOG;
 
 			nv_encoder = find_encoder(connector, type);
 		}
@@ -278,9 +299,9 @@ nouveau_connector_detect(struct drm_connector *connector, bool force)
 	}
 
 detect_analog:
-	nv_encoder = find_encoder(connector, OUTPUT_ANALOG);
+	nv_encoder = find_encoder(connector, DCB_OUTPUT_ANALOG);
 	if (!nv_encoder && !nouveau_tv_disable)
-		nv_encoder = find_encoder(connector, OUTPUT_TV);
+		nv_encoder = find_encoder(connector, DCB_OUTPUT_TV);
 	if (nv_encoder && force) {
 		struct drm_encoder *encoder = to_drm_encoder(nv_encoder);
 		struct drm_encoder_helper_funcs *helper =
@@ -301,7 +322,7 @@ static enum drm_connector_status
 nouveau_connector_detect_lvds(struct drm_connector *connector, bool force)
 {
 	struct drm_device *dev = connector->dev;
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_drm *drm = nouveau_drm(dev);
 	struct nouveau_connector *nv_connector = nouveau_connector(connector);
 	struct nouveau_encoder *nv_encoder = NULL;
 	enum drm_connector_status status = connector_status_disconnected;
@@ -313,12 +334,12 @@ nouveau_connector_detect_lvds(struct drm_connector *connector, bool force)
 		nv_connector->edid = NULL;
 	}
 
-	nv_encoder = find_encoder(connector, OUTPUT_LVDS);
+	nv_encoder = find_encoder(connector, DCB_OUTPUT_LVDS);
 	if (!nv_encoder)
 		return connector_status_disconnected;
 
 	/* Try retrieving EDID via DDC */
-	if (!dev_priv->vbios.fp_no_ddc) {
+	if (!drm->vbios.fp_no_ddc) {
 		status = nouveau_connector_detect(connector, force);
 		if (status == connector_status_connected)
 			goto out;
@@ -334,7 +355,7 @@ nouveau_connector_detect_lvds(struct drm_connector *connector, bool force)
 	 * valid - it's not (rh#613284)
 	 */
 	if (nv_encoder->dcb->lvdsconf.use_acpi_for_edid) {
-		if (!nouveau_acpi_edid(dev, connector)) {
+		if (!(nv_connector->edid = nouveau_acpi_edid(dev, connector))) {
 			status = connector_status_connected;
 			goto out;
 		}
@@ -344,7 +365,7 @@ nouveau_connector_detect_lvds(struct drm_connector *connector, bool force)
 	 * modeline is avalilable for the panel, set it as the panel's
 	 * native mode and exit.
 	 */
-	if (nouveau_bios_fp_mode(dev, NULL) && (dev_priv->vbios.fp_no_ddc ||
+	if (nouveau_bios_fp_mode(dev, NULL) && (drm->vbios.fp_no_ddc ||
 	    nv_encoder->dcb->lvdsconf.use_straps_for_mode)) {
 		status = connector_status_connected;
 		goto out;
@@ -353,7 +374,7 @@ nouveau_connector_detect_lvds(struct drm_connector *connector, bool force)
 	/* Still nothing, some VBIOS images have a hardcoded EDID block
 	 * stored for the panel stored in them.
 	 */
-	if (!dev_priv->vbios.fp_no_ddc) {
+	if (!drm->vbios.fp_no_ddc) {
 		struct edid *edid =
 			(struct edid *)nouveau_bios_embedded_edid(dev);
 		if (edid) {
@@ -379,21 +400,22 @@ out:
 static void
 nouveau_connector_force(struct drm_connector *connector)
 {
+	struct nouveau_drm *drm = nouveau_drm(connector->dev);
 	struct nouveau_connector *nv_connector = nouveau_connector(connector);
 	struct nouveau_encoder *nv_encoder;
 	int type;
 
 	if (nv_connector->type == DCB_CONNECTOR_DVI_I) {
 		if (connector->force == DRM_FORCE_ON_DIGITAL)
-			type = OUTPUT_TMDS;
+			type = DCB_OUTPUT_TMDS;
 		else
-			type = OUTPUT_ANALOG;
+			type = DCB_OUTPUT_ANALOG;
 	} else
-		type = OUTPUT_ANY;
+		type = DCB_OUTPUT_ANY;
 
 	nv_encoder = find_encoder(connector, type);
 	if (!nv_encoder) {
-		NV_ERROR(connector->dev, "can't find encoder to force %s on!\n",
+		NV_ERROR(drm, "can't find encoder to force %s on!\n",
 			 drm_get_connector_name(connector));
 		connector->status = connector_status_disconnected;
 		return;
@@ -406,8 +428,7 @@ static int
 nouveau_connector_set_property(struct drm_connector *connector,
 			       struct drm_property *property, uint64_t value)
 {
-	struct drm_nouveau_private *dev_priv = connector->dev->dev_private;
-	struct nouveau_display_engine *disp = &dev_priv->engine.display;
+	struct nouveau_display *disp = nouveau_display(connector->dev);
 	struct nouveau_connector *nv_connector = nouveau_connector(connector);
 	struct nouveau_encoder *nv_encoder = nv_connector->detected_encoder;
 	struct drm_encoder *encoder = to_drm_encoder(nv_encoder);
@@ -532,7 +553,7 @@ nouveau_connector_set_property(struct drm_connector *connector,
 		}
 	}
 
-	if (nv_encoder && nv_encoder->dcb->type == OUTPUT_TV)
+	if (nv_encoder && nv_encoder->dcb->type == DCB_OUTPUT_TV)
 		return get_slave_funcs(encoder)->set_property(
 			encoder, connector, property, value);
 
@@ -543,6 +564,7 @@ static struct drm_display_mode *
 nouveau_connector_native_mode(struct drm_connector *connector)
 {
 	struct drm_connector_helper_funcs *helper = connector->helper_private;
+	struct nouveau_drm *drm = nouveau_drm(connector->dev);
 	struct nouveau_connector *nv_connector = nouveau_connector(connector);
 	struct drm_device *dev = connector->dev;
 	struct drm_display_mode *mode, *largest = NULL;
@@ -556,7 +578,7 @@ nouveau_connector_native_mode(struct drm_connector *connector)
 
 		/* Use preferred mode if there is one.. */
 		if (mode->type & DRM_MODE_TYPE_PREFERRED) {
-			NV_DEBUG_KMS(dev, "native mode from preferred\n");
+			NV_DEBUG(drm, "native mode from preferred\n");
 			return drm_mode_duplicate(dev, mode);
 		}
 
@@ -579,7 +601,7 @@ nouveau_connector_native_mode(struct drm_connector *connector)
 		largest = mode;
 	}
 
-	NV_DEBUG_KMS(dev, "native mode from largest: %dx%d@%d\n",
+	NV_DEBUG(drm, "native mode from largest: %dx%d@%d\n",
 		      high_w, high_h, high_v);
 	return largest ? drm_mode_duplicate(dev, largest) : NULL;
 }
@@ -643,10 +665,10 @@ nouveau_connector_scaler_modes_add(struct drm_connector *connector)
 static void
 nouveau_connector_detect_depth(struct drm_connector *connector)
 {
-	struct drm_nouveau_private *dev_priv = connector->dev->dev_private;
+	struct nouveau_drm *drm = nouveau_drm(connector->dev);
 	struct nouveau_connector *nv_connector = nouveau_connector(connector);
 	struct nouveau_encoder *nv_encoder = nv_connector->detected_encoder;
-	struct nvbios *bios = &dev_priv->vbios;
+	struct nvbios *bios = &drm->vbios;
 	struct drm_display_mode *mode = nv_connector->native_mode;
 	bool duallink;
 
@@ -661,7 +683,7 @@ nouveau_connector_detect_depth(struct drm_connector *connector)
 	}
 
 	/* we're out of options unless we're LVDS, default to 8bpc */
-	if (nv_encoder->dcb->type != OUTPUT_LVDS) {
+	if (nv_encoder->dcb->type != DCB_OUTPUT_LVDS) {
 		connector->display_info.bpc = 8;
 		return;
 	}
@@ -693,7 +715,7 @@ static int
 nouveau_connector_get_modes(struct drm_connector *connector)
 {
 	struct drm_device *dev = connector->dev;
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_drm *drm = nouveau_drm(dev);
 	struct nouveau_connector *nv_connector = nouveau_connector(connector);
 	struct nouveau_encoder *nv_encoder = nv_connector->detected_encoder;
 	struct drm_encoder *encoder = to_drm_encoder(nv_encoder);
@@ -709,9 +731,9 @@ nouveau_connector_get_modes(struct drm_connector *connector)
 	if (nv_connector->edid)
 		ret = drm_add_edid_modes(connector, nv_connector->edid);
 	else
-	if (nv_encoder->dcb->type == OUTPUT_LVDS &&
+	if (nv_encoder->dcb->type == DCB_OUTPUT_LVDS &&
 	    (nv_encoder->dcb->lvdsconf.use_straps_for_mode ||
-	     dev_priv->vbios.fp_no_ddc) && nouveau_bios_fp_mode(dev, NULL)) {
+	     drm->vbios.fp_no_ddc) && nouveau_bios_fp_mode(dev, NULL)) {
 		struct drm_display_mode mode;
 
 		nouveau_bios_fp_mode(dev, &mode);
@@ -746,7 +768,7 @@ nouveau_connector_get_modes(struct drm_connector *connector)
 	if (connector->connector_type == DRM_MODE_CONNECTOR_LVDS)
 		nouveau_connector_detect_depth(connector);
 
-	if (nv_encoder->dcb->type == OUTPUT_TV)
+	if (nv_encoder->dcb->type == DCB_OUTPUT_TV)
 		ret = get_slave_funcs(encoder)->get_modes(encoder, connector);
 
 	if (nv_connector->type == DCB_CONNECTOR_LVDS ||
@@ -761,15 +783,15 @@ static unsigned
 get_tmds_link_bandwidth(struct drm_connector *connector)
 {
 	struct nouveau_connector *nv_connector = nouveau_connector(connector);
-	struct drm_nouveau_private *dev_priv = connector->dev->dev_private;
-	struct dcb_entry *dcb = nv_connector->detected_encoder->dcb;
+	struct nouveau_drm *drm = nouveau_drm(connector->dev);
+	struct dcb_output *dcb = nv_connector->detected_encoder->dcb;
 
 	if (dcb->location != DCB_LOC_ON_CHIP ||
-	    dev_priv->chipset >= 0x46)
+	    nv_device(drm->device)->chipset >= 0x46)
 		return 165000;
-	else if (dev_priv->chipset >= 0x40)
+	else if (nv_device(drm->device)->chipset >= 0x40)
 		return 155000;
-	else if (dev_priv->chipset >= 0x18)
+	else if (nv_device(drm->device)->chipset >= 0x18)
 		return 135000;
 	else
 		return 112000;
@@ -786,7 +808,7 @@ nouveau_connector_mode_valid(struct drm_connector *connector,
 	unsigned clock = mode->clock;
 
 	switch (nv_encoder->dcb->type) {
-	case OUTPUT_LVDS:
+	case DCB_OUTPUT_LVDS:
 		if (nv_connector->native_mode &&
 		    (mode->hdisplay > nv_connector->native_mode->hdisplay ||
 		     mode->vdisplay > nv_connector->native_mode->vdisplay))
@@ -795,19 +817,19 @@ nouveau_connector_mode_valid(struct drm_connector *connector,
 		min_clock = 0;
 		max_clock = 400000;
 		break;
-	case OUTPUT_TMDS:
+	case DCB_OUTPUT_TMDS:
 		max_clock = get_tmds_link_bandwidth(connector);
 		if (nouveau_duallink && nv_encoder->dcb->duallink_possible)
 			max_clock *= 2;
 		break;
-	case OUTPUT_ANALOG:
+	case DCB_OUTPUT_ANALOG:
 		max_clock = nv_encoder->dcb->crtconf.maxfreq;
 		if (!max_clock)
 			max_clock = 350000;
 		break;
-	case OUTPUT_TV:
+	case DCB_OUTPUT_TV:
 		return get_slave_funcs(encoder)->mode_valid(encoder, mode);
-	case OUTPUT_DP:
+	case DCB_OUTPUT_DP:
 		max_clock  = nv_encoder->dp.link_nr;
 		max_clock *= nv_encoder->dp.link_bw;
 		clock = clock * (connector->display_info.bpc * 3) / 10;
@@ -899,14 +921,15 @@ struct drm_connector *
 nouveau_connector_create(struct drm_device *dev, int index)
 {
 	const struct drm_connector_funcs *funcs = &nouveau_connector_funcs;
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct nouveau_display_engine *disp = &dev_priv->engine.display;
+	struct nouveau_drm *drm = nouveau_drm(dev);
+	struct nouveau_gpio *gpio = nouveau_gpio(drm->device);
+	struct nouveau_display *disp = nouveau_display(dev);
 	struct nouveau_connector *nv_connector = NULL;
 	struct drm_connector *connector;
 	int type, ret = 0;
 	bool dummy;
 
-	NV_DEBUG_KMS(dev, "\n");
+	NV_DEBUG(drm, "\n");
 
 	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
 		nv_connector = nouveau_connector(connector);
@@ -922,7 +945,7 @@ nouveau_connector_create(struct drm_device *dev, int index)
 	nv_connector->index = index;
 
 	/* attempt to parse vbios connector type and hotplug gpio */
-	nv_connector->dcb = dcb_conn(dev, index);
+	nv_connector->dcb = olddcb_conn(dev, index);
 	if (nv_connector->dcb) {
 		static const u8 hpd[16] = {
 			0xff, 0x07, 0x08, 0xff, 0xff, 0x51, 0x52, 0xff,
@@ -930,7 +953,7 @@ nouveau_connector_create(struct drm_device *dev, int index)
 		};
 
 		u32 entry = ROM16(nv_connector->dcb[0]);
-		if (dcb_conntab(dev)[3] >= 4)
+		if (olddcb_conntab(dev)[3] >= 4)
 			entry |= (u32)ROM16(nv_connector->dcb[2]) << 16;
 
 		nv_connector->hpd = ffs((entry & 0x07033000) >> 12);
@@ -939,7 +962,7 @@ nouveau_connector_create(struct drm_device *dev, int index)
 		nv_connector->type = nv_connector->dcb[0];
 		if (drm_conntype_from_dcb(nv_connector->type) ==
 					  DRM_MODE_CONNECTOR_Unknown) {
-			NV_WARN(dev, "unknown connector type %02x\n",
+			NV_WARN(drm, "unknown connector type %02x\n",
 				nv_connector->type);
 			nv_connector->type = DCB_CONNECTOR_NONE;
 		}
@@ -964,8 +987,8 @@ nouveau_connector_create(struct drm_device *dev, int index)
 	 * figure out something suitable ourselves
 	 */
 	if (nv_connector->type == DCB_CONNECTOR_NONE) {
-		struct drm_nouveau_private *dev_priv = dev->dev_private;
-		struct dcb_table *dcbt = &dev_priv->vbios.dcb;
+		struct nouveau_drm *drm = nouveau_drm(dev);
+		struct dcb_table *dcbt = &drm->vbios.dcb;
 		u32 encoders = 0;
 		int i;
 
@@ -974,25 +997,25 @@ nouveau_connector_create(struct drm_device *dev, int index)
 				encoders |= (1 << dcbt->entry[i].type);
 		}
 
-		if (encoders & (1 << OUTPUT_DP)) {
-			if (encoders & (1 << OUTPUT_TMDS))
+		if (encoders & (1 << DCB_OUTPUT_DP)) {
+			if (encoders & (1 << DCB_OUTPUT_TMDS))
 				nv_connector->type = DCB_CONNECTOR_DP;
 			else
 				nv_connector->type = DCB_CONNECTOR_eDP;
 		} else
-		if (encoders & (1 << OUTPUT_TMDS)) {
-			if (encoders & (1 << OUTPUT_ANALOG))
+		if (encoders & (1 << DCB_OUTPUT_TMDS)) {
+			if (encoders & (1 << DCB_OUTPUT_ANALOG))
 				nv_connector->type = DCB_CONNECTOR_DVI_I;
 			else
 				nv_connector->type = DCB_CONNECTOR_DVI_D;
 		} else
-		if (encoders & (1 << OUTPUT_ANALOG)) {
+		if (encoders & (1 << DCB_OUTPUT_ANALOG)) {
 			nv_connector->type = DCB_CONNECTOR_VGA;
 		} else
-		if (encoders & (1 << OUTPUT_LVDS)) {
+		if (encoders & (1 << DCB_OUTPUT_LVDS)) {
 			nv_connector->type = DCB_CONNECTOR_LVDS;
 		} else
-		if (encoders & (1 << OUTPUT_TV)) {
+		if (encoders & (1 << DCB_OUTPUT_TV)) {
 			nv_connector->type = DCB_CONNECTOR_TV_0;
 		}
 	}
@@ -1001,7 +1024,7 @@ nouveau_connector_create(struct drm_device *dev, int index)
 	if (type == DRM_MODE_CONNECTOR_LVDS) {
 		ret = nouveau_bios_parse_lvds_table(dev, 0, &dummy, &dummy);
 		if (ret) {
-			NV_ERROR(dev, "Error parsing LVDS table, disabling\n");
+			NV_ERROR(drm, "Error parsing LVDS table, disabling\n");
 			kfree(nv_connector);
 			return ERR_PTR(ret);
 		}
@@ -1051,7 +1074,7 @@ nouveau_connector_create(struct drm_device *dev, int index)
 
 	switch (nv_connector->type) {
 	case DCB_CONNECTOR_VGA:
-		if (dev_priv->card_type >= NV_50) {
+		if (nv_device(drm->device)->card_type >= NV_50) {
 			drm_connector_attach_property(connector,
 					dev->mode_config.scaling_mode_property,
 					nv_connector->scaling_mode);
@@ -1084,10 +1107,9 @@ nouveau_connector_create(struct drm_device *dev, int index)
 	}
 
 	connector->polled = DRM_CONNECTOR_POLL_CONNECT;
-	if (nv_connector->hpd != DCB_GPIO_UNUSED) {
-		ret = nouveau_gpio_isr_add(dev, 0, nv_connector->hpd, 0xff,
-					   nouveau_connector_hotplug,
-					   connector);
+	if (gpio && nv_connector->hpd != DCB_GPIO_UNUSED) {
+		ret = gpio->isr_add(gpio, 0, nv_connector->hpd, 0xff,
+				    nouveau_connector_hotplug, connector);
 		if (ret == 0)
 			connector->polled = DRM_CONNECTOR_POLL_HPD;
 	}
@@ -1101,8 +1123,9 @@ nouveau_connector_hotplug(void *data, int plugged)
 {
 	struct drm_connector *connector = data;
 	struct drm_device *dev = connector->dev;
+	struct nouveau_drm *drm = nouveau_drm(dev);
 
-	NV_DEBUG(dev, "%splugged %s\n", plugged ? "" : "un",
+	NV_DEBUG(drm, "%splugged %s\n", plugged ? "" : "un",
 		 drm_get_connector_name(connector));
 
 	if (plugged)
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.h b/drivers/gpu/drm/nouveau/nouveau_connector.h
index e1c1567c0c1a..ebdb87670a8f 100644
--- a/drivers/gpu/drm/nouveau/nouveau_connector.h
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.h
@@ -28,7 +28,8 @@
 #define __NOUVEAU_CONNECTOR_H__
 
 #include <drm/drm_edid.h>
-#include "nouveau_i2c.h"
+
+struct nouveau_i2c_port;
 
 enum nouveau_underscan_type {
 	UNDERSCAN_OFF,
diff --git a/drivers/gpu/drm/nouveau/nouveau_debugfs.c b/drivers/gpu/drm/nouveau/nouveau_debugfs.c
deleted file mode 100644
index f68cb5e71893..000000000000
--- a/drivers/gpu/drm/nouveau/nouveau_debugfs.c
+++ /dev/null
@@ -1,196 +0,0 @@
-/*
- * Copyright (C) 2009 Red Hat <bskeggs@redhat.com>
- *
- * Permission is hereby granted, free of charge, to any person obtaining
- * a copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sublicense, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial
- * portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
- * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
- * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
- * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
- * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-
-/*
- * Authors:
- *  Ben Skeggs <bskeggs@redhat.com>
- */
-
-#include <linux/debugfs.h>
-
-#include <drm/drmP.h>
-#include "nouveau_drv.h"
-
-#include <ttm/ttm_page_alloc.h>
-
-static int
-nouveau_debugfs_channel_info(struct seq_file *m, void *data)
-{
-	struct drm_info_node *node = (struct drm_info_node *) m->private;
-	struct nouveau_channel *chan = node->info_ent->data;
-
-	seq_printf(m, "channel id    : %d\n", chan->id);
-
-	seq_printf(m, "cpu fifo state:\n");
-	seq_printf(m, "          base: 0x%10llx\n", chan->pushbuf_base);
-	seq_printf(m, "           max: 0x%08x\n", chan->dma.max << 2);
-	seq_printf(m, "           cur: 0x%08x\n", chan->dma.cur << 2);
-	seq_printf(m, "           put: 0x%08x\n", chan->dma.put << 2);
-	seq_printf(m, "          free: 0x%08x\n", chan->dma.free << 2);
-	if (chan->dma.ib_max) {
-		seq_printf(m, "        ib max: 0x%08x\n", chan->dma.ib_max);
-		seq_printf(m, "        ib put: 0x%08x\n", chan->dma.ib_put);
-		seq_printf(m, "       ib free: 0x%08x\n", chan->dma.ib_free);
-	}
-
-	seq_printf(m, "gpu fifo state:\n");
-	seq_printf(m, "           get: 0x%08x\n",
-					nvchan_rd32(chan, chan->user_get));
-	seq_printf(m, "           put: 0x%08x\n",
-					nvchan_rd32(chan, chan->user_put));
-	if (chan->dma.ib_max) {
-		seq_printf(m, "        ib get: 0x%08x\n",
-			   nvchan_rd32(chan, 0x88));
-		seq_printf(m, "        ib put: 0x%08x\n",
-			   nvchan_rd32(chan, 0x8c));
-	}
-
-	return 0;
-}
-
-int
-nouveau_debugfs_channel_init(struct nouveau_channel *chan)
-{
-	struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
-	struct drm_minor *minor = chan->dev->primary;
-	int ret;
-
-	if (!dev_priv->debugfs.channel_root) {
-		dev_priv->debugfs.channel_root =
-			debugfs_create_dir("channel", minor->debugfs_root);
-		if (!dev_priv->debugfs.channel_root)
-			return -ENOENT;
-	}
-
-	snprintf(chan->debugfs.name, 32, "%d", chan->id);
-	chan->debugfs.info.name = chan->debugfs.name;
-	chan->debugfs.info.show = nouveau_debugfs_channel_info;
-	chan->debugfs.info.driver_features = 0;
-	chan->debugfs.info.data = chan;
-
-	ret = drm_debugfs_create_files(&chan->debugfs.info, 1,
-				       dev_priv->debugfs.channel_root,
-				       chan->dev->primary);
-	if (ret == 0)
-		chan->debugfs.active = true;
-	return ret;
-}
-
-void
-nouveau_debugfs_channel_fini(struct nouveau_channel *chan)
-{
-	struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
-
-	if (!chan->debugfs.active)
-		return;
-
-	drm_debugfs_remove_files(&chan->debugfs.info, 1, chan->dev->primary);
-	chan->debugfs.active = false;
-
-	if (chan == dev_priv->channel) {
-		debugfs_remove(dev_priv->debugfs.channel_root);
-		dev_priv->debugfs.channel_root = NULL;
-	}
-}
-
-static int
-nouveau_debugfs_chipset_info(struct seq_file *m, void *data)
-{
-	struct drm_info_node *node = (struct drm_info_node *) m->private;
-	struct drm_minor *minor = node->minor;
-	struct drm_device *dev = minor->dev;
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	uint32_t ppci_0;
-
-	ppci_0 = nv_rd32(dev, dev_priv->chipset >= 0x40 ? 0x88000 : 0x1800);
-
-	seq_printf(m, "PMC_BOOT_0: 0x%08x\n", nv_rd32(dev, NV03_PMC_BOOT_0));
-	seq_printf(m, "PCI ID    : 0x%04x:0x%04x\n",
-		   ppci_0 & 0xffff, ppci_0 >> 16);
-	return 0;
-}
-
-static int
-nouveau_debugfs_memory_info(struct seq_file *m, void *data)
-{
-	struct drm_info_node *node = (struct drm_info_node *) m->private;
-	struct drm_minor *minor = node->minor;
-	struct drm_nouveau_private *dev_priv = minor->dev->dev_private;
-
-	seq_printf(m, "VRAM total: %dKiB\n", (int)(dev_priv->vram_size >> 10));
-	return 0;
-}
-
-static int
-nouveau_debugfs_vbios_image(struct seq_file *m, void *data)
-{
-	struct drm_info_node *node = (struct drm_info_node *) m->private;
-	struct drm_nouveau_private *dev_priv = node->minor->dev->dev_private;
-	int i;
-
-	for (i = 0; i < dev_priv->vbios.length; i++)
-		seq_printf(m, "%c", dev_priv->vbios.data[i]);
-	return 0;
-}
-
-static int
-nouveau_debugfs_evict_vram(struct seq_file *m, void *data)
-{
-	struct drm_info_node *node = (struct drm_info_node *) m->private;
-	struct drm_nouveau_private *dev_priv = node->minor->dev->dev_private;
-	int ret;
-
-	ret = ttm_bo_evict_mm(&dev_priv->ttm.bdev, TTM_PL_VRAM);
-	if (ret)
-		seq_printf(m, "failed: %d", ret);
-	else
-		seq_printf(m, "succeeded\n");
-	return 0;
-}
-
-static struct drm_info_list nouveau_debugfs_list[] = {
-	{ "evict_vram", nouveau_debugfs_evict_vram, 0, NULL },
-	{ "chipset", nouveau_debugfs_chipset_info, 0, NULL },
-	{ "memory", nouveau_debugfs_memory_info, 0, NULL },
-	{ "vbios.rom", nouveau_debugfs_vbios_image, 0, NULL },
-	{ "ttm_page_pool", ttm_page_alloc_debugfs, 0, NULL },
-	{ "ttm_dma_page_pool", ttm_dma_page_alloc_debugfs, 0, NULL },
-};
-#define NOUVEAU_DEBUGFS_ENTRIES ARRAY_SIZE(nouveau_debugfs_list)
-
-int
-nouveau_debugfs_init(struct drm_minor *minor)
-{
-	drm_debugfs_create_files(nouveau_debugfs_list, NOUVEAU_DEBUGFS_ENTRIES,
-				 minor->debugfs_root, minor);
-	return 0;
-}
-
-void
-nouveau_debugfs_takedown(struct drm_minor *minor)
-{
-	drm_debugfs_remove_files(nouveau_debugfs_list, NOUVEAU_DEBUGFS_ENTRIES,
-				 minor);
-}
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c
index e4eeeaf20fdf..8f98e5a8c488 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.c
+++ b/drivers/gpu/drm/nouveau/nouveau_display.c
@@ -26,18 +26,21 @@
 
 #include <drm/drmP.h>
 #include <drm/drm_crtc_helper.h>
-#include "nouveau_drv.h"
-#include "nouveau_fb.h"
+
 #include "nouveau_fbcon.h"
 #include "nouveau_hw.h"
 #include "nouveau_crtc.h"
 #include "nouveau_dma.h"
+#include "nouveau_gem.h"
 #include "nouveau_connector.h"
-#include "nouveau_software.h"
-#include "nouveau_gpio.h"
-#include "nouveau_fence.h"
 #include "nv50_display.h"
 
+#include "nouveau_fence.h"
+
+#include <subdev/bios/gpio.h>
+#include <subdev/gpio.h>
+#include <engine/disp.h>
+
 static void
 nouveau_user_framebuffer_destroy(struct drm_framebuffer *drm_fb)
 {
@@ -71,7 +74,7 @@ nouveau_framebuffer_init(struct drm_device *dev,
 			 struct drm_mode_fb_cmd2 *mode_cmd,
 			 struct nouveau_bo *nvbo)
 {
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_drm *drm = nouveau_drm(dev);
 	struct drm_framebuffer *fb = &nv_fb->base;
 	int ret;
 
@@ -83,7 +86,7 @@ nouveau_framebuffer_init(struct drm_device *dev,
 	drm_helper_mode_fill_fb_struct(fb, mode_cmd);
 	nv_fb->nvbo = nvbo;
 
-	if (dev_priv->card_type >= NV_50) {
+	if (nv_device(drm->device)->card_type >= NV_50) {
 		u32 tile_flags = nouveau_bo_tile_layout(nvbo);
 		if (tile_flags == 0x7a00 ||
 		    tile_flags == 0xfe00)
@@ -102,21 +105,21 @@ nouveau_framebuffer_init(struct drm_device *dev,
 		case 32: nv_fb->r_format = NV50_EVO_CRTC_FB_DEPTH_24; break;
 		case 30: nv_fb->r_format = NV50_EVO_CRTC_FB_DEPTH_30; break;
 		default:
-			 NV_ERROR(dev, "unknown depth %d\n", fb->depth);
+			 NV_ERROR(drm, "unknown depth %d\n", fb->depth);
 			 return -EINVAL;
 		}
 
-		if (dev_priv->chipset == 0x50)
+		if (nv_device(drm->device)->chipset == 0x50)
 			nv_fb->r_format |= (tile_flags << 8);
 
 		if (!tile_flags) {
-			if (dev_priv->card_type < NV_D0)
+			if (nv_device(drm->device)->card_type < NV_D0)
 				nv_fb->r_pitch = 0x00100000 | fb->pitches[0];
 			else
 				nv_fb->r_pitch = 0x01000000 | fb->pitches[0];
 		} else {
 			u32 mode = nvbo->tile_mode;
-			if (dev_priv->card_type >= NV_C0)
+			if (nv_device(drm->device)->card_type >= NV_C0)
 				mode >>= 4;
 			nv_fb->r_pitch = ((fb->pitches[0] / 4) << 4) | mode;
 		}
@@ -212,8 +215,9 @@ static struct nouveau_drm_prop_enum_list dither_depth[] = {
 int
 nouveau_display_init(struct drm_device *dev)
 {
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct nouveau_display_engine *disp = &dev_priv->engine.display;
+	struct nouveau_drm *drm = nouveau_drm(dev);
+	struct nouveau_display *disp = nouveau_display(dev);
+	struct nouveau_gpio *gpio = nouveau_gpio(drm->device);
 	struct drm_connector *connector;
 	int ret;
 
@@ -225,8 +229,8 @@ nouveau_display_init(struct drm_device *dev)
 	 * some vbios default this to off for some reason, causing the
 	 * panel to not work after resume
 	 */
-	if (nouveau_gpio_func_get(dev, DCB_GPIO_PANEL_POWER) == 0) {
-		nouveau_gpio_func_set(dev, DCB_GPIO_PANEL_POWER, true);
+	if (gpio && gpio->get(gpio, 0, DCB_GPIO_PANEL_POWER, 0xff) == 0) {
+		gpio->set(gpio, 0, DCB_GPIO_PANEL_POWER, 0xff, 1);
 		msleep(300);
 	}
 
@@ -236,7 +240,8 @@ nouveau_display_init(struct drm_device *dev)
 	/* enable hotplug interrupts */
 	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
 		struct nouveau_connector *conn = nouveau_connector(connector);
-		nouveau_gpio_irq(dev, 0, conn->hpd, 0xff, true);
+		if (gpio)
+			gpio->irq(gpio, 0, conn->hpd, 0xff, true);
 	}
 
 	return ret;
@@ -245,35 +250,65 @@ nouveau_display_init(struct drm_device *dev)
 void
 nouveau_display_fini(struct drm_device *dev)
 {
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct nouveau_display_engine *disp = &dev_priv->engine.display;
+	struct nouveau_drm *drm = nouveau_drm(dev);
+	struct nouveau_display *disp = nouveau_display(dev);
+	struct nouveau_gpio *gpio = nouveau_gpio(drm->device);
 	struct drm_connector *connector;
 
 	/* disable hotplug interrupts */
 	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
 		struct nouveau_connector *conn = nouveau_connector(connector);
-		nouveau_gpio_irq(dev, 0, conn->hpd, 0xff, false);
+		if (gpio)
+			gpio->irq(gpio, 0, conn->hpd, 0xff, false);
 	}
 
 	drm_kms_helper_poll_disable(dev);
 	disp->fini(dev);
 }
 
+static void
+nouveau_display_vblank_notify(void *data, int crtc)
+{
+	drm_handle_vblank(data, crtc);
+}
+
+static void
+nouveau_display_vblank_get(void *data, int crtc)
+{
+	drm_vblank_get(data, crtc);
+}
+
+static void
+nouveau_display_vblank_put(void *data, int crtc)
+{
+	drm_vblank_put(data, crtc);
+}
+
 int
 nouveau_display_create(struct drm_device *dev)
 {
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct nouveau_display_engine *disp = &dev_priv->engine.display;
+	struct nouveau_drm *drm = nouveau_drm(dev);
+	struct nouveau_disp *pdisp = nouveau_disp(drm->device);
+	struct nouveau_display *disp;
 	int ret, gen;
 
+	disp = drm->display = kzalloc(sizeof(*disp), GFP_KERNEL);
+	if (!disp)
+		return -ENOMEM;
+
+	pdisp->vblank.data = dev;
+	pdisp->vblank.notify = nouveau_display_vblank_notify;
+	pdisp->vblank.get = nouveau_display_vblank_get;
+	pdisp->vblank.put = nouveau_display_vblank_put;
+
 	drm_mode_config_init(dev);
 	drm_mode_create_scaling_mode_property(dev);
 	drm_mode_create_dvi_i_properties(dev);
 
-	if (dev_priv->card_type < NV_50)
+	if (nv_device(drm->device)->card_type < NV_50)
 		gen = 0;
 	else
-	if (dev_priv->card_type < NV_D0)
+	if (nv_device(drm->device)->card_type < NV_D0)
 		gen = 1;
 	else
 		gen = 2;
@@ -307,11 +342,11 @@ nouveau_display_create(struct drm_device *dev)
 
 	dev->mode_config.min_width = 0;
 	dev->mode_config.min_height = 0;
-	if (dev_priv->card_type < NV_10) {
+	if (nv_device(drm->device)->card_type < NV_10) {
 		dev->mode_config.max_width = 2048;
 		dev->mode_config.max_height = 2048;
 	} else
-	if (dev_priv->card_type < NV_50) {
+	if (nv_device(drm->device)->card_type < NV_50) {
 		dev->mode_config.max_width = 4096;
 		dev->mode_config.max_height = 4096;
 	} else {
@@ -325,7 +360,13 @@ nouveau_display_create(struct drm_device *dev)
 	drm_kms_helper_poll_init(dev);
 	drm_kms_helper_poll_disable(dev);
 
-	ret = disp->create(dev);
+	if (nv_device(drm->device)->card_type < NV_50)
+		ret = nv04_display_create(dev);
+	else
+	if (nv_device(drm->device)->card_type < NV_D0)
+		ret = nv50_display_create(dev);
+	else
+		ret = nvd0_display_create(dev);
 	if (ret)
 		goto disp_create_err;
 
@@ -335,10 +376,11 @@ nouveau_display_create(struct drm_device *dev)
 			goto vblank_err;
 	}
 
+	nouveau_backlight_init(dev);
 	return 0;
 
 vblank_err:
-	disp->destroy(dev);
+	disp->dtor(dev);
 disp_create_err:
 	drm_kms_helper_poll_fini(dev);
 	drm_mode_config_cleanup(dev);
@@ -348,24 +390,109 @@ disp_create_err:
 void
 nouveau_display_destroy(struct drm_device *dev)
 {
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct nouveau_display_engine *disp = &dev_priv->engine.display;
+	struct nouveau_display *disp = nouveau_display(dev);
 
+	nouveau_backlight_exit(dev);
 	drm_vblank_cleanup(dev);
 
-	disp->destroy(dev);
+	disp->dtor(dev);
 
 	drm_kms_helper_poll_fini(dev);
 	drm_mode_config_cleanup(dev);
+	nouveau_drm(dev)->display = NULL;
+	kfree(disp);
+}
+
+int
+nouveau_display_suspend(struct drm_device *dev)
+{
+	struct nouveau_drm *drm = nouveau_drm(dev);
+	struct drm_crtc *crtc;
+
+	nouveau_display_fini(dev);
+
+	NV_INFO(drm, "unpinning framebuffer(s)...\n");
+	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+		struct nouveau_framebuffer *nouveau_fb;
+
+		nouveau_fb = nouveau_framebuffer(crtc->fb);
+		if (!nouveau_fb || !nouveau_fb->nvbo)
+			continue;
+
+		nouveau_bo_unpin(nouveau_fb->nvbo);
+	}
+
+	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+		struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
+
+		nouveau_bo_unmap(nv_crtc->cursor.nvbo);
+		nouveau_bo_unpin(nv_crtc->cursor.nvbo);
+	}
+
+	return 0;
+}
+
+void
+nouveau_display_resume(struct drm_device *dev)
+{
+	struct nouveau_drm *drm = nouveau_drm(dev);
+	struct drm_crtc *crtc;
+	int ret;
+
+	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+		struct nouveau_framebuffer *nouveau_fb;
+
+		nouveau_fb = nouveau_framebuffer(crtc->fb);
+		if (!nouveau_fb || !nouveau_fb->nvbo)
+			continue;
+
+		nouveau_bo_pin(nouveau_fb->nvbo, TTM_PL_FLAG_VRAM);
+	}
+
+	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+		struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
+
+		ret = nouveau_bo_pin(nv_crtc->cursor.nvbo, TTM_PL_FLAG_VRAM);
+		if (!ret)
+			ret = nouveau_bo_map(nv_crtc->cursor.nvbo);
+		if (ret)
+			NV_ERROR(drm, "Could not pin/map cursor.\n");
+	}
+
+	nouveau_fbcon_set_suspend(dev, 0);
+	nouveau_fbcon_zfill_all(dev);
+
+	nouveau_display_init(dev);
+
+	/* Force CLUT to get re-loaded during modeset */
+	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+		struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
+
+		nv_crtc->lut.depth = 0;
+	}
+
+	drm_helper_resume_force_mode(dev);
+
+	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+		struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
+		u32 offset = nv_crtc->cursor.nvbo->bo.offset;
+
+		nv_crtc->cursor.set_offset(nv_crtc, offset);
+		nv_crtc->cursor.set_pos(nv_crtc, nv_crtc->cursor_saved_x,
+						 nv_crtc->cursor_saved_y);
+	}
 }
 
 int
 nouveau_vblank_enable(struct drm_device *dev, int crtc)
 {
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_device *device = nouveau_dev(dev);
 
-	if (dev_priv->card_type >= NV_50)
-		nv_mask(dev, NV50_PDISPLAY_INTR_EN_1, 0,
+	if (device->card_type >= NV_D0)
+		nv_mask(device, 0x6100c0 + (crtc * 0x800), 1, 1);
+	else
+	if (device->card_type >= NV_50)
+		nv_mask(device, NV50_PDISPLAY_INTR_EN_1, 0,
 			NV50_PDISPLAY_INTR_EN_1_VBLANK_CRTC_(crtc));
 	else
 		NVWriteCRTC(dev, crtc, NV_PCRTC_INTR_EN_0,
@@ -377,10 +504,13 @@ nouveau_vblank_enable(struct drm_device *dev, int crtc)
 void
 nouveau_vblank_disable(struct drm_device *dev, int crtc)
 {
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_device *device = nouveau_dev(dev);
 
-	if (dev_priv->card_type >= NV_50)
-		nv_mask(dev, NV50_PDISPLAY_INTR_EN_1,
+	if (device->card_type >= NV_D0)
+		nv_mask(device, 0x6100c0 + (crtc * 0x800), 1, 0);
+	else
+	if (device->card_type >= NV_50)
+		nv_mask(device, NV50_PDISPLAY_INTR_EN_1,
 			NV50_PDISPLAY_INTR_EN_1_VBLANK_CRTC_(crtc), 0);
 	else
 		NVWriteCRTC(dev, crtc, NV_PCRTC_INTR_EN_0, 0);
@@ -434,15 +564,15 @@ nouveau_page_flip_emit(struct nouveau_channel *chan,
 		       struct nouveau_page_flip_state *s,
 		       struct nouveau_fence **pfence)
 {
-	struct nouveau_software_chan *swch = chan->engctx[NVOBJ_ENGINE_SW];
-	struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
-	struct drm_device *dev = chan->dev;
+	struct nouveau_fence_chan *fctx = chan->fence;
+	struct nouveau_drm *drm = chan->drm;
+	struct drm_device *dev = drm->dev;
 	unsigned long flags;
 	int ret;
 
 	/* Queue it to the pending list */
 	spin_lock_irqsave(&dev->event_lock, flags);
-	list_add_tail(&s->head, &swch->flip);
+	list_add_tail(&s->head, &fctx->flip);
 	spin_unlock_irqrestore(&dev->event_lock, flags);
 
 	/* Synchronize with the old framebuffer */
@@ -455,7 +585,7 @@ nouveau_page_flip_emit(struct nouveau_channel *chan,
 	if (ret)
 		goto fail;
 
-	if (dev_priv->card_type < NV_C0) {
+	if (nv_device(drm->device)->card_type < NV_C0) {
 		BEGIN_NV04(chan, NvSubSw, NV_SW_PAGE_FLIP, 1);
 		OUT_RING  (chan, 0x00000000);
 		OUT_RING  (chan, 0x00000000);
@@ -483,7 +613,7 @@ nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb,
 		       struct drm_pending_vblank_event *event)
 {
 	struct drm_device *dev = crtc->dev;
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_drm *drm = nouveau_drm(dev);
 	struct nouveau_bo *old_bo = nouveau_framebuffer(crtc->fb)->nvbo;
 	struct nouveau_bo *new_bo = nouveau_framebuffer(fb)->nvbo;
 	struct nouveau_page_flip_state *s;
@@ -491,7 +621,7 @@ nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb,
 	struct nouveau_fence *fence;
 	int ret;
 
-	if (!dev_priv->channel)
+	if (!drm->channel)
 		return -ENODEV;
 
 	s = kzalloc(sizeof(*s), GFP_KERNEL);
@@ -512,25 +642,25 @@ nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb,
 	/* Choose the channel the flip will be handled in */
 	fence = new_bo->bo.sync_obj;
 	if (fence)
-		chan = nouveau_channel_get_unlocked(fence->channel);
+		chan = fence->channel;
 	if (!chan)
-		chan = nouveau_channel_get_unlocked(dev_priv->channel);
-	mutex_lock(&chan->mutex);
+		chan = drm->channel;
+	mutex_lock(&chan->cli->mutex);
 
 	/* Emit a page flip */
-	if (dev_priv->card_type >= NV_50) {
-		if (dev_priv->card_type >= NV_D0)
+	if (nv_device(drm->device)->card_type >= NV_50) {
+		if (nv_device(drm->device)->card_type >= NV_D0)
 			ret = nvd0_display_flip_next(crtc, fb, chan, 0);
 		else
 			ret = nv50_display_flip_next(crtc, fb, chan);
 		if (ret) {
-			nouveau_channel_put(&chan);
+			mutex_unlock(&chan->cli->mutex);
 			goto fail_unreserve;
 		}
 	}
 
 	ret = nouveau_page_flip_emit(chan, old_bo, new_bo, s, &fence);
-	nouveau_channel_put(&chan);
+	mutex_unlock(&chan->cli->mutex);
 	if (ret)
 		goto fail_unreserve;
 
@@ -552,20 +682,21 @@ int
 nouveau_finish_page_flip(struct nouveau_channel *chan,
 			 struct nouveau_page_flip_state *ps)
 {
-	struct nouveau_software_chan *swch = chan->engctx[NVOBJ_ENGINE_SW];
-	struct drm_device *dev = chan->dev;
+	struct nouveau_fence_chan *fctx = chan->fence;
+	struct nouveau_drm *drm = chan->drm;
+	struct drm_device *dev = drm->dev;
 	struct nouveau_page_flip_state *s;
 	unsigned long flags;
 
 	spin_lock_irqsave(&dev->event_lock, flags);
 
-	if (list_empty(&swch->flip)) {
-		NV_ERROR(dev, "Unexpected pageflip in channel %d.\n", chan->id);
+	if (list_empty(&fctx->flip)) {
+		NV_ERROR(drm, "unexpected pageflip\n");
 		spin_unlock_irqrestore(&dev->event_lock, flags);
 		return -EINVAL;
 	}
 
-	s = list_first_entry(&swch->flip, struct nouveau_page_flip_state, head);
+	s = list_first_entry(&fctx->flip, struct nouveau_page_flip_state, head);
 	if (s->event) {
 		struct drm_pending_vblank_event *e = s->event;
 		struct timeval now;
@@ -588,6 +719,24 @@ nouveau_finish_page_flip(struct nouveau_channel *chan,
 }
 
 int
+nouveau_flip_complete(void *data)
+{
+	struct nouveau_channel *chan = data;
+	struct nouveau_drm *drm = chan->drm;
+	struct nouveau_page_flip_state state;
+
+	if (!nouveau_finish_page_flip(chan, &state)) {
+		if (nv_device(drm->device)->card_type < NV_50) {
+			nv_set_crtc_base(drm->dev, state.crtc, state.offset +
+					 state.y * state.pitch +
+					 state.x * state.bpp / 8);
+		}
+	}
+
+	return 0;
+}
+
+int
 nouveau_display_dumb_create(struct drm_file *file_priv, struct drm_device *dev,
 			    struct drm_mode_create_dumb *args)
 {
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.h b/drivers/gpu/drm/nouveau/nouveau_display.h
new file mode 100644
index 000000000000..722548bb3bd3
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_display.h
@@ -0,0 +1,94 @@
+#ifndef __NOUVEAU_DISPLAY_H__
+#define __NOUVEAU_DISPLAY_H__
+
+#include <subdev/vm.h>
+
+#include "nouveau_drm.h"
+
+struct nouveau_framebuffer {
+	struct drm_framebuffer base;
+	struct nouveau_bo *nvbo;
+	struct nouveau_vma vma;
+	u32 r_dma;
+	u32 r_format;
+	u32 r_pitch;
+};
+
+static inline struct nouveau_framebuffer *
+nouveau_framebuffer(struct drm_framebuffer *fb)
+{
+	return container_of(fb, struct nouveau_framebuffer, base);
+}
+
+int nouveau_framebuffer_init(struct drm_device *, struct nouveau_framebuffer *,
+			     struct drm_mode_fb_cmd2 *, struct nouveau_bo *);
+
+struct nouveau_page_flip_state {
+	struct list_head head;
+	struct drm_pending_vblank_event *event;
+	int crtc, bpp, pitch, x, y;
+	u64 offset;
+};
+
+struct nouveau_display {
+	void *priv;
+	void (*dtor)(struct drm_device *);
+	int  (*init)(struct drm_device *);
+	void (*fini)(struct drm_device *);
+
+	struct drm_property *dithering_mode;
+	struct drm_property *dithering_depth;
+	struct drm_property *underscan_property;
+	struct drm_property *underscan_hborder_property;
+	struct drm_property *underscan_vborder_property;
+	/* not really hue and saturation: */
+	struct drm_property *vibrant_hue_property;
+	struct drm_property *color_vibrance_property;
+};
+
+static inline struct nouveau_display *
+nouveau_display(struct drm_device *dev)
+{
+	return nouveau_drm(dev)->display;
+}
+
+int  nouveau_display_create(struct drm_device *dev);
+void nouveau_display_destroy(struct drm_device *dev);
+int  nouveau_display_init(struct drm_device *dev);
+void nouveau_display_fini(struct drm_device *dev);
+int  nouveau_display_suspend(struct drm_device *dev);
+void nouveau_display_resume(struct drm_device *dev);
+
+int  nouveau_vblank_enable(struct drm_device *dev, int crtc);
+void nouveau_vblank_disable(struct drm_device *dev, int crtc);
+
+int  nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb,
+			    struct drm_pending_vblank_event *event);
+int  nouveau_finish_page_flip(struct nouveau_channel *,
+			      struct nouveau_page_flip_state *);
+
+int  nouveau_display_dumb_create(struct drm_file *, struct drm_device *,
+				 struct drm_mode_create_dumb *args);
+int  nouveau_display_dumb_map_offset(struct drm_file *, struct drm_device *,
+				     u32 handle, u64 *offset);
+int  nouveau_display_dumb_destroy(struct drm_file *, struct drm_device *,
+				  u32 handle);
+
+void nouveau_hdmi_mode_set(struct drm_encoder *, struct drm_display_mode *);
+
+#ifdef CONFIG_DRM_NOUVEAU_BACKLIGHT
+extern int nouveau_backlight_init(struct drm_device *);
+extern void nouveau_backlight_exit(struct drm_device *);
+#else
+static inline int
+nouveau_backlight_init(struct drm_device *dev)
+{
+	return 0;
+}
+
+static inline void
+nouveau_backlight_exit(struct drm_device *dev) {
+}
+#endif
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_dma.c b/drivers/gpu/drm/nouveau/nouveau_dma.c
index 47d041269f65..40f91e1e5842 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dma.c
+++ b/drivers/gpu/drm/nouveau/nouveau_dma.c
@@ -24,40 +24,16 @@
  *
  */
 
-#include <drm/drmP.h>
-#include "nouveau_drv.h"
-#include "nouveau_dma.h"
-#include "nouveau_ramht.h"
-
-void
-nouveau_dma_init(struct nouveau_channel *chan)
-{
-	struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
-	struct nouveau_bo *pushbuf = chan->pushbuf_bo;
-
-	if (dev_priv->card_type >= NV_50) {
-		const int ib_size = pushbuf->bo.mem.size / 2;
-
-		chan->dma.ib_base = (pushbuf->bo.mem.size - ib_size) >> 2;
-		chan->dma.ib_max = (ib_size / 8) - 1;
-		chan->dma.ib_put = 0;
-		chan->dma.ib_free = chan->dma.ib_max - chan->dma.ib_put;
+#include <core/client.h>
 
-		chan->dma.max = (pushbuf->bo.mem.size - ib_size) >> 2;
-	} else {
-		chan->dma.max  = (pushbuf->bo.mem.size >> 2) - 2;
-	}
-
-	chan->dma.put  = 0;
-	chan->dma.cur  = chan->dma.put;
-	chan->dma.free = chan->dma.max - chan->dma.cur;
-}
+#include "nouveau_drm.h"
+#include "nouveau_dma.h"
 
 void
 OUT_RINGp(struct nouveau_channel *chan, const void *data, unsigned nr_dwords)
 {
 	bool is_iomem;
-	u32 *mem = ttm_kmap_obj_virtual(&chan->pushbuf_bo->kmap, &is_iomem);
+	u32 *mem = ttm_kmap_obj_virtual(&chan->push.buffer->kmap, &is_iomem);
 	mem = &mem[chan->dma.cur];
 	if (is_iomem)
 		memcpy_toio((void __force __iomem *)mem, data, nr_dwords * 4);
@@ -78,9 +54,9 @@ READ_GET(struct nouveau_channel *chan, uint64_t *prev_get, int *timeout)
 {
 	uint64_t val;
 
-	val = nvchan_rd32(chan, chan->user_get);
+	val = nv_ro32(chan->object, chan->user_get);
         if (chan->user_get_hi)
-                val |= (uint64_t)nvchan_rd32(chan, chan->user_get_hi) << 32;
+                val |= (uint64_t)nv_ro32(chan->object, chan->user_get_hi) << 32;
 
 	/* reset counter as long as GET is still advancing, this is
 	 * to avoid misdetecting a GPU lockup if the GPU happens to
@@ -92,32 +68,33 @@ READ_GET(struct nouveau_channel *chan, uint64_t *prev_get, int *timeout)
 	}
 
 	if ((++*timeout & 0xff) == 0) {
-		DRM_UDELAY(1);
+		udelay(1);
 		if (*timeout > 100000)
 			return -EBUSY;
 	}
 
-	if (val < chan->pushbuf_base ||
-	    val > chan->pushbuf_base + (chan->dma.max << 2))
+	if (val < chan->push.vma.offset ||
+	    val > chan->push.vma.offset + (chan->dma.max << 2))
 		return -EINVAL;
 
-	return (val - chan->pushbuf_base) >> 2;
+	return (val - chan->push.vma.offset) >> 2;
 }
 
 void
 nv50_dma_push(struct nouveau_channel *chan, struct nouveau_bo *bo,
 	      int delta, int length)
 {
-	struct nouveau_bo *pb = chan->pushbuf_bo;
+	struct nouveau_bo *pb = chan->push.buffer;
 	struct nouveau_vma *vma;
 	int ip = (chan->dma.ib_put * 2) + chan->dma.ib_base;
 	u64 offset;
 
-	vma = nouveau_bo_vma_find(bo, chan->vm);
+	vma = nouveau_bo_vma_find(bo, nv_client(chan->cli)->vm);
 	BUG_ON(!vma);
 	offset = vma->offset + delta;
 
 	BUG_ON(chan->dma.ib_free < 1);
+
 	nouveau_bo_wr32(pb, ip++, lower_32_bits(offset));
 	nouveau_bo_wr32(pb, ip++, upper_32_bits(offset) | length << 8);
 
@@ -127,7 +104,7 @@ nv50_dma_push(struct nouveau_channel *chan, struct nouveau_bo *bo,
 	/* Flush writes. */
 	nouveau_bo_rd32(pb, 0);
 
-	nvchan_wr32(chan, 0x8c, chan->dma.ib_put);
+	nv_wo32(chan->object, 0x8c, chan->dma.ib_put);
 	chan->dma.ib_free--;
 }
 
@@ -137,7 +114,7 @@ nv50_dma_push_wait(struct nouveau_channel *chan, int count)
 	uint32_t cnt = 0, prev_get = 0;
 
 	while (chan->dma.ib_free < count) {
-		uint32_t get = nvchan_rd32(chan, 0x88);
+		uint32_t get = nv_ro32(chan->object, 0x88);
 		if (get != prev_get) {
 			prev_get = get;
 			cnt = 0;
@@ -248,7 +225,7 @@ nouveau_dma_wait(struct nouveau_channel *chan, int slots, int size)
 			 * instruct the GPU to jump back to the start right
 			 * after processing the currently pending commands.
 			 */
-			OUT_RING(chan, chan->pushbuf_base | 0x20000000);
+			OUT_RING(chan, chan->push.vma.offset | 0x20000000);
 
 			/* wait for GET to depart from the skips area.
 			 * prevents writing GET==PUT and causing a race
diff --git a/drivers/gpu/drm/nouveau/nouveau_dma.h b/drivers/gpu/drm/nouveau/nouveau_dma.h
index 8db68be9544f..5c2e22932d1c 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dma.h
+++ b/drivers/gpu/drm/nouveau/nouveau_dma.h
@@ -27,10 +27,10 @@
 #ifndef __NOUVEAU_DMA_H__
 #define __NOUVEAU_DMA_H__
 
-#ifndef NOUVEAU_DMA_DEBUG
-#define NOUVEAU_DMA_DEBUG 0
-#endif
+#include "nouveau_bo.h"
+#include "nouveau_chan.h"
 
+int nouveau_dma_wait(struct nouveau_channel *, int slots, int size);
 void nv50_dma_push(struct nouveau_channel *, struct nouveau_bo *,
 		   int delta, int length);
 
@@ -116,12 +116,7 @@ RING_SPACE(struct nouveau_channel *chan, int size)
 static inline void
 OUT_RING(struct nouveau_channel *chan, int data)
 {
-	if (NOUVEAU_DMA_DEBUG) {
-		NV_INFO(chan->dev, "Ch%d/0x%08x: 0x%08x\n",
-			chan->id, chan->dma.cur << 2, data);
-	}
-
-	nouveau_bo_wr32(chan->pushbuf_bo, chan->dma.cur++, data);
+	nouveau_bo_wr32(chan->push.buffer, chan->dma.cur++, data);
 }
 
 extern void
@@ -159,24 +154,19 @@ BEGIN_IMC0(struct nouveau_channel *chan, int subc, int mthd, u16 data)
 
 #define WRITE_PUT(val) do {                                                    \
 	DRM_MEMORYBARRIER();                                                   \
-	nouveau_bo_rd32(chan->pushbuf_bo, 0);                                  \
-	nvchan_wr32(chan, chan->user_put, ((val) << 2) + chan->pushbuf_base);  \
+	nouveau_bo_rd32(chan->push.buffer, 0);                                 \
+	nv_wo32(chan->object, chan->user_put, ((val) << 2) + chan->push.vma.offset);  \
 } while (0)
 
 static inline void
 FIRE_RING(struct nouveau_channel *chan)
 {
-	if (NOUVEAU_DMA_DEBUG) {
-		NV_INFO(chan->dev, "Ch%d/0x%08x: PUSH!\n",
-			chan->id, chan->dma.cur << 2);
-	}
-
 	if (chan->dma.cur == chan->dma.put)
 		return;
 	chan->accel_done = true;
 
 	if (chan->dma.ib_max) {
-		nv50_dma_push(chan, chan->pushbuf_bo, chan->dma.put << 2,
+		nv50_dma_push(chan, chan->push.buffer, chan->dma.put << 2,
 			      (chan->dma.cur - chan->dma.put) << 2);
 	} else {
 		WRITE_PUT(chan->dma.cur);
@@ -191,4 +181,31 @@ WIND_RING(struct nouveau_channel *chan)
 	chan->dma.cur = chan->dma.put;
 }
 
+/* FIFO methods */
+#define NV01_SUBCHAN_OBJECT                                          0x00000000
+#define NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH                          0x00000010
+#define NV84_SUBCHAN_SEMAPHORE_ADDRESS_LOW                           0x00000014
+#define NV84_SUBCHAN_SEMAPHORE_SEQUENCE                              0x00000018
+#define NV84_SUBCHAN_SEMAPHORE_TRIGGER                               0x0000001c
+#define NV84_SUBCHAN_SEMAPHORE_TRIGGER_ACQUIRE_EQUAL                 0x00000001
+#define NV84_SUBCHAN_SEMAPHORE_TRIGGER_WRITE_LONG                    0x00000002
+#define NV84_SUBCHAN_SEMAPHORE_TRIGGER_ACQUIRE_GEQUAL                0x00000004
+#define NVC0_SUBCHAN_SEMAPHORE_TRIGGER_YIELD                         0x00001000
+#define NV84_SUBCHAN_NOTIFY_INTR                                     0x00000020
+#define NV84_SUBCHAN_WRCACHE_FLUSH                                   0x00000024
+#define NV10_SUBCHAN_REF_CNT                                         0x00000050
+#define NVSW_SUBCHAN_PAGE_FLIP                                       0x00000054
+#define NV11_SUBCHAN_DMA_SEMAPHORE                                   0x00000060
+#define NV11_SUBCHAN_SEMAPHORE_OFFSET                                0x00000064
+#define NV11_SUBCHAN_SEMAPHORE_ACQUIRE                               0x00000068
+#define NV11_SUBCHAN_SEMAPHORE_RELEASE                               0x0000006c
+#define NV40_SUBCHAN_YIELD                                           0x00000080
+
+/* NV_SW object class */
+#define NV_SW_DMA_VBLSEM                                             0x0000018c
+#define NV_SW_VBLSEM_OFFSET                                          0x00000400
+#define NV_SW_VBLSEM_RELEASE_VALUE                                   0x00000404
+#define NV_SW_VBLSEM_RELEASE                                         0x00000408
+#define NV_SW_PAGE_FLIP                                              0x00000500
+
 #endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_dp.c b/drivers/gpu/drm/nouveau/nouveau_dp.c
index 898e5e32293c..978a108ba7a1 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dp.c
+++ b/drivers/gpu/drm/nouveau/nouveau_dp.c
@@ -23,164 +23,37 @@
  */
 
 #include <drm/drmP.h>
+#include <drm/drm_dp_helper.h>
 
-#include "nouveau_drv.h"
-#include "nouveau_i2c.h"
+#include "nouveau_drm.h"
 #include "nouveau_connector.h"
 #include "nouveau_encoder.h"
 #include "nouveau_crtc.h"
-#include "nouveau_gpio.h"
 
-/******************************************************************************
- * aux channel util functions
- *****************************************************************************/
-#define AUX_DBG(fmt, args...) do {                                             \
-	if (nouveau_reg_debug & NOUVEAU_REG_DEBUG_AUXCH) {                     \
-		NV_PRINTK(KERN_DEBUG, dev, "AUXCH(%d): " fmt, ch, ##args);     \
-	}                                                                      \
-} while (0)
-#define AUX_ERR(fmt, args...) NV_ERROR(dev, "AUXCH(%d): " fmt, ch, ##args)
-
-static void
-auxch_fini(struct drm_device *dev, int ch)
-{
-	nv_mask(dev, 0x00e4e4 + (ch * 0x50), 0x00310000, 0x00000000);
-}
-
-static int
-auxch_init(struct drm_device *dev, int ch)
-{
-	const u32 unksel = 1; /* nfi which to use, or if it matters.. */
-	const u32 ureq = unksel ? 0x00100000 : 0x00200000;
-	const u32 urep = unksel ? 0x01000000 : 0x02000000;
-	u32 ctrl, timeout;
-
-	/* wait up to 1ms for any previous transaction to be done... */
-	timeout = 1000;
-	do {
-		ctrl = nv_rd32(dev, 0x00e4e4 + (ch * 0x50));
-		udelay(1);
-		if (!timeout--) {
-			AUX_ERR("begin idle timeout 0x%08x", ctrl);
-			return -EBUSY;
-		}
-	} while (ctrl & 0x03010000);
-
-	/* set some magic, and wait up to 1ms for it to appear */
-	nv_mask(dev, 0x00e4e4 + (ch * 0x50), 0x00300000, ureq);
-	timeout = 1000;
-	do {
-		ctrl = nv_rd32(dev, 0x00e4e4 + (ch * 0x50));
-		udelay(1);
-		if (!timeout--) {
-			AUX_ERR("magic wait 0x%08x\n", ctrl);
-			auxch_fini(dev, ch);
-			return -EBUSY;
-		}
-	} while ((ctrl & 0x03000000) != urep);
-
-	return 0;
-}
-
-static int
-auxch_tx(struct drm_device *dev, int ch, u8 type, u32 addr, u8 *data, u8 size)
-{
-	u32 ctrl, stat, timeout, retries;
-	u32 xbuf[4] = {};
-	int ret, i;
-
-	AUX_DBG("%d: 0x%08x %d\n", type, addr, size);
-
-	ret = auxch_init(dev, ch);
-	if (ret)
-		goto out;
-
-	stat = nv_rd32(dev, 0x00e4e8 + (ch * 0x50));
-	if (!(stat & 0x10000000)) {
-		AUX_DBG("sink not detected\n");
-		ret = -ENXIO;
-		goto out;
-	}
-
-	if (!(type & 1)) {
-		memcpy(xbuf, data, size);
-		for (i = 0; i < 16; i += 4) {
-			AUX_DBG("wr 0x%08x\n", xbuf[i / 4]);
-			nv_wr32(dev, 0x00e4c0 + (ch * 0x50) + i, xbuf[i / 4]);
-		}
-	}
-
-	ctrl  = nv_rd32(dev, 0x00e4e4 + (ch * 0x50));
-	ctrl &= ~0x0001f0ff;
-	ctrl |= type << 12;
-	ctrl |= size - 1;
-	nv_wr32(dev, 0x00e4e0 + (ch * 0x50), addr);
-
-	/* retry transaction a number of times on failure... */
-	ret = -EREMOTEIO;
-	for (retries = 0; retries < 32; retries++) {
-		/* reset, and delay a while if this is a retry */
-		nv_wr32(dev, 0x00e4e4 + (ch * 0x50), 0x80000000 | ctrl);
-		nv_wr32(dev, 0x00e4e4 + (ch * 0x50), 0x00000000 | ctrl);
-		if (retries)
-			udelay(400);
-
-		/* transaction request, wait up to 1ms for it to complete */
-		nv_wr32(dev, 0x00e4e4 + (ch * 0x50), 0x00010000 | ctrl);
-
-		timeout = 1000;
-		do {
-			ctrl = nv_rd32(dev, 0x00e4e4 + (ch * 0x50));
-			udelay(1);
-			if (!timeout--) {
-				AUX_ERR("tx req timeout 0x%08x\n", ctrl);
-				goto out;
-			}
-		} while (ctrl & 0x00010000);
-
-		/* read status, and check if transaction completed ok */
-		stat = nv_mask(dev, 0x00e4e8 + (ch * 0x50), 0, 0);
-		if (!(stat & 0x000f0f00)) {
-			ret = 0;
-			break;
-		}
-
-		AUX_DBG("%02d 0x%08x 0x%08x\n", retries, ctrl, stat);
-	}
-
-	if (type & 1) {
-		for (i = 0; i < 16; i += 4) {
-			xbuf[i / 4] = nv_rd32(dev, 0x00e4d0 + (ch * 0x50) + i);
-			AUX_DBG("rd 0x%08x\n", xbuf[i / 4]);
-		}
-		memcpy(data, xbuf, size);
-	}
-
-out:
-	auxch_fini(dev, ch);
-	return ret;
-}
+#include <subdev/gpio.h>
+#include <subdev/i2c.h>
 
 u8 *
-nouveau_dp_bios_data(struct drm_device *dev, struct dcb_entry *dcb, u8 **entry)
+nouveau_dp_bios_data(struct drm_device *dev, struct dcb_output *dcb, u8 **entry)
 {
+	struct nouveau_drm *drm = nouveau_drm(dev);
 	struct bit_entry d;
 	u8 *table;
 	int i;
 
 	if (bit_table(dev, 'd', &d)) {
-		NV_ERROR(dev, "BIT 'd' table not found\n");
+		NV_ERROR(drm, "BIT 'd' table not found\n");
 		return NULL;
 	}
 
 	if (d.version != 1) {
-		NV_ERROR(dev, "BIT 'd' table version %d unknown\n", d.version);
+		NV_ERROR(drm, "BIT 'd' table version %d unknown\n", d.version);
 		return NULL;
 	}
 
 	table = ROMPTR(dev, d.data[0]);
 	if (!table) {
-		NV_ERROR(dev, "displayport table pointer invalid\n");
+		NV_ERROR(drm, "displayport table pointer invalid\n");
 		return NULL;
 	}
 
@@ -191,7 +64,7 @@ nouveau_dp_bios_data(struct drm_device *dev, struct dcb_entry *dcb, u8 **entry)
 	case 0x40:
 		break;
 	default:
-		NV_ERROR(dev, "displayport table 0x%02x unknown\n", table[0]);
+		NV_ERROR(drm, "displayport table 0x%02x unknown\n", table[0]);
 		return NULL;
 	}
 
@@ -201,7 +74,7 @@ nouveau_dp_bios_data(struct drm_device *dev, struct dcb_entry *dcb, u8 **entry)
 			return table;
 	}
 
-	NV_ERROR(dev, "displayport encoder table not found\n");
+	NV_ERROR(drm, "displayport encoder table not found\n");
 	return NULL;
 }
 
@@ -209,9 +82,9 @@ nouveau_dp_bios_data(struct drm_device *dev, struct dcb_entry *dcb, u8 **entry)
  * link training
  *****************************************************************************/
 struct dp_state {
+	struct nouveau_i2c_port *auxch;
 	struct dp_train_func *func;
-	struct dcb_entry *dcb;
-	int auxch;
+	struct dcb_output *dcb;
 	int crtc;
 	u8 *dpcd;
 	int link_nr;
@@ -223,9 +96,10 @@ struct dp_state {
 static void
 dp_set_link_config(struct drm_device *dev, struct dp_state *dp)
 {
+	struct nouveau_drm *drm = nouveau_drm(dev);
 	u8 sink[2];
 
-	NV_DEBUG_KMS(dev, "%d lanes at %d KB/s\n", dp->link_nr, dp->link_bw);
+	NV_DEBUG(drm, "%d lanes at %d KB/s\n", dp->link_nr, dp->link_bw);
 
 	/* set desired link configuration on the source */
 	dp->func->link_set(dev, dp->dcb, dp->crtc, dp->link_nr, dp->link_bw,
@@ -237,27 +111,29 @@ dp_set_link_config(struct drm_device *dev, struct dp_state *dp)
 	if (dp->dpcd[2] & DP_ENHANCED_FRAME_CAP)
 		sink[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
 
-	auxch_tx(dev, dp->auxch, 8, DP_LINK_BW_SET, sink, 2);
+	nv_wraux(dp->auxch, DP_LINK_BW_SET, sink, 2);
 }
 
 static void
 dp_set_training_pattern(struct drm_device *dev, struct dp_state *dp, u8 pattern)
 {
+	struct nouveau_drm *drm = nouveau_drm(dev);
 	u8 sink_tp;
 
-	NV_DEBUG_KMS(dev, "training pattern %d\n", pattern);
+	NV_DEBUG(drm, "training pattern %d\n", pattern);
 
 	dp->func->train_set(dev, dp->dcb, pattern);
 
-	auxch_tx(dev, dp->auxch, 9, DP_TRAINING_PATTERN_SET, &sink_tp, 1);
+	nv_rdaux(dp->auxch, DP_TRAINING_PATTERN_SET, &sink_tp, 1);
 	sink_tp &= ~DP_TRAINING_PATTERN_MASK;
 	sink_tp |= pattern;
-	auxch_tx(dev, dp->auxch, 8, DP_TRAINING_PATTERN_SET, &sink_tp, 1);
+	nv_wraux(dp->auxch, DP_TRAINING_PATTERN_SET, &sink_tp, 1);
 }
 
 static int
 dp_link_train_commit(struct drm_device *dev, struct dp_state *dp)
 {
+	struct nouveau_drm *drm = nouveau_drm(dev);
 	int i;
 
 	for (i = 0; i < dp->link_nr; i++) {
@@ -271,27 +147,26 @@ dp_link_train_commit(struct drm_device *dev, struct dp_state *dp)
 		if ((lpre << 3) == DP_TRAIN_PRE_EMPHASIS_9_5)
 			dp->conf[i] |= DP_TRAIN_MAX_PRE_EMPHASIS_REACHED;
 
-		NV_DEBUG_KMS(dev, "config lane %d %02x\n", i, dp->conf[i]);
+		NV_DEBUG(drm, "config lane %d %02x\n", i, dp->conf[i]);
 		dp->func->train_adj(dev, dp->dcb, i, lvsw, lpre);
 	}
 
-	return auxch_tx(dev, dp->auxch, 8, DP_TRAINING_LANE0_SET, dp->conf, 4);
+	return nv_wraux(dp->auxch, DP_TRAINING_LANE0_SET, dp->conf, 4);
 }
 
 static int
 dp_link_train_update(struct drm_device *dev, struct dp_state *dp, u32 delay)
 {
+	struct nouveau_drm *drm = nouveau_drm(dev);
 	int ret;
 
 	udelay(delay);
 
-	ret = auxch_tx(dev, dp->auxch, 9, DP_LANE0_1_STATUS, dp->stat, 6);
+	ret = nv_rdaux(dp->auxch, DP_LANE0_1_STATUS, dp->stat, 6);
 	if (ret)
 		return ret;
 
-	NV_DEBUG_KMS(dev, "status %02x %02x %02x %02x %02x %02x\n",
-		     dp->stat[0], dp->stat[1], dp->stat[2], dp->stat[3],
-		     dp->stat[4], dp->stat[5]);
+	NV_DEBUG(drm, "status %*ph\n", 6, dp->stat);
 	return 0;
 }
 
@@ -409,7 +284,7 @@ dp_link_train_fini(struct drm_device *dev, struct dp_state *dp)
 	nouveau_bios_run_init_table(dev, script, dp->dcb, dp->crtc);
 }
 
-bool
+static bool
 nouveau_dp_link_train(struct drm_encoder *encoder, u32 datarate,
 		      struct dp_train_func *func)
 {
@@ -418,19 +293,20 @@ nouveau_dp_link_train(struct drm_encoder *encoder, u32 datarate,
 	struct nouveau_connector *nv_connector =
 		nouveau_encoder_connector_get(nv_encoder);
 	struct drm_device *dev = encoder->dev;
-	struct nouveau_i2c_chan *auxch;
+	struct nouveau_drm *drm = nouveau_drm(dev);
+	struct nouveau_i2c *i2c = nouveau_i2c(drm->device);
+	struct nouveau_gpio *gpio = nouveau_gpio(drm->device);
 	const u32 bw_list[] = { 270000, 162000, 0 };
 	const u32 *link_bw = bw_list;
 	struct dp_state dp;
 
-	auxch = nouveau_i2c_find(dev, nv_encoder->dcb->i2c_index);
-	if (!auxch)
+	dp.auxch = i2c->find(i2c, nv_encoder->dcb->i2c_index);
+	if (!dp.auxch)
 		return false;
 
 	dp.func = func;
 	dp.dcb = nv_encoder->dcb;
 	dp.crtc = nv_crtc->index;
-	dp.auxch = auxch->drive;
 	dp.dpcd = nv_encoder->dp.dpcd;
 
 	/* adjust required bandwidth for 8B/10B coding overhead */
@@ -440,7 +316,7 @@ nouveau_dp_link_train(struct drm_encoder *encoder, u32 datarate,
 	 * we take during link training (DP_SET_POWER is one), we need
 	 * to ignore them for the moment to avoid races.
 	 */
-	nouveau_gpio_irq(dev, 0, nv_connector->hpd, 0xff, false);
+	gpio->irq(gpio, 0, nv_connector->hpd, 0xff, false);
 
 	/* enable down-spreading, if possible */
 	dp_set_downspread(dev, &dp, nv_encoder->dp.dpcd[3] & 1);
@@ -483,7 +359,7 @@ nouveau_dp_link_train(struct drm_encoder *encoder, u32 datarate,
 	dp_link_train_fini(dev, &dp);
 
 	/* re-enable hotplug detect */
-	nouveau_gpio_irq(dev, 0, nv_connector->hpd, 0xff, true);
+	gpio->irq(gpio, 0, nv_connector->hpd, 0xff, true);
 	return true;
 }
 
@@ -492,10 +368,12 @@ nouveau_dp_dpms(struct drm_encoder *encoder, int mode, u32 datarate,
 		struct dp_train_func *func)
 {
 	struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
-	struct nouveau_i2c_chan *auxch;
+	struct nouveau_drm *drm = nouveau_drm(encoder->dev);
+	struct nouveau_i2c *i2c = nouveau_i2c(drm->device);
+	struct nouveau_i2c_port *auxch;
 	u8 status;
 
-	auxch = nouveau_i2c_find(encoder->dev, nv_encoder->dcb->i2c_index);
+	auxch = i2c->find(i2c, nv_encoder->dcb->i2c_index);
 	if (!auxch)
 		return;
 
@@ -504,27 +382,28 @@ nouveau_dp_dpms(struct drm_encoder *encoder, int mode, u32 datarate,
 	else
 		status = DP_SET_POWER_D3;
 
-	nouveau_dp_auxch(auxch, 8, DP_SET_POWER, &status, 1);
+	nv_wraux(auxch, DP_SET_POWER, &status, 1);
 
 	if (mode == DRM_MODE_DPMS_ON)
 		nouveau_dp_link_train(encoder, datarate, func);
 }
 
 static void
-nouveau_dp_probe_oui(struct drm_device *dev, struct nouveau_i2c_chan *auxch,
+nouveau_dp_probe_oui(struct drm_device *dev, struct nouveau_i2c_port *auxch,
 		     u8 *dpcd)
 {
+	struct nouveau_drm *drm = nouveau_drm(dev);
 	u8 buf[3];
 
 	if (!(dpcd[DP_DOWN_STREAM_PORT_COUNT] & DP_OUI_SUPPORT))
 		return;
 
-	if (!auxch_tx(dev, auxch->drive, 9, DP_SINK_OUI, buf, 3))
-		NV_DEBUG_KMS(dev, "Sink OUI: %02hx%02hx%02hx\n",
+	if (!nv_rdaux(auxch, DP_SINK_OUI, buf, 3))
+		NV_DEBUG(drm, "Sink OUI: %02hx%02hx%02hx\n",
 			     buf[0], buf[1], buf[2]);
 
-	if (!auxch_tx(dev, auxch->drive, 9, DP_BRANCH_OUI, buf, 3))
-		NV_DEBUG_KMS(dev, "Branch OUI: %02hx%02hx%02hx\n",
+	if (!nv_rdaux(auxch, DP_BRANCH_OUI, buf, 3))
+		NV_DEBUG(drm, "Branch OUI: %02hx%02hx%02hx\n",
 			     buf[0], buf[1], buf[2]);
 
 }
@@ -534,24 +413,26 @@ nouveau_dp_detect(struct drm_encoder *encoder)
 {
 	struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
 	struct drm_device *dev = encoder->dev;
-	struct nouveau_i2c_chan *auxch;
+	struct nouveau_drm *drm = nouveau_drm(dev);
+	struct nouveau_i2c *i2c = nouveau_i2c(drm->device);
+	struct nouveau_i2c_port *auxch;
 	u8 *dpcd = nv_encoder->dp.dpcd;
 	int ret;
 
-	auxch = nouveau_i2c_find(dev, nv_encoder->dcb->i2c_index);
+	auxch = i2c->find(i2c, nv_encoder->dcb->i2c_index);
 	if (!auxch)
 		return false;
 
-	ret = auxch_tx(dev, auxch->drive, 9, DP_DPCD_REV, dpcd, 8);
+	ret = nv_rdaux(auxch, DP_DPCD_REV, dpcd, 8);
 	if (ret)
 		return false;
 
 	nv_encoder->dp.link_bw = 27000 * dpcd[1];
 	nv_encoder->dp.link_nr = dpcd[2] & DP_MAX_LANE_COUNT_MASK;
 
-	NV_DEBUG_KMS(dev, "display: %dx%d dpcd 0x%02x\n",
+	NV_DEBUG(drm, "display: %dx%d dpcd 0x%02x\n",
 		     nv_encoder->dp.link_nr, nv_encoder->dp.link_bw, dpcd[0]);
-	NV_DEBUG_KMS(dev, "encoder: %dx%d\n",
+	NV_DEBUG(drm, "encoder: %dx%d\n",
 		     nv_encoder->dcb->dpconf.link_nr,
 		     nv_encoder->dcb->dpconf.link_bw);
 
@@ -560,65 +441,10 @@ nouveau_dp_detect(struct drm_encoder *encoder)
 	if (nv_encoder->dcb->dpconf.link_bw < nv_encoder->dp.link_bw)
 		nv_encoder->dp.link_bw = nv_encoder->dcb->dpconf.link_bw;
 
-	NV_DEBUG_KMS(dev, "maximum: %dx%d\n",
+	NV_DEBUG(drm, "maximum: %dx%d\n",
 		     nv_encoder->dp.link_nr, nv_encoder->dp.link_bw);
 
 	nouveau_dp_probe_oui(dev, auxch, dpcd);
 
 	return true;
 }
-
-int
-nouveau_dp_auxch(struct nouveau_i2c_chan *auxch, int cmd, int addr,
-		 uint8_t *data, int data_nr)
-{
-	return auxch_tx(auxch->dev, auxch->drive, cmd, addr, data, data_nr);
-}
-
-static int
-nouveau_dp_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
-{
-	struct nouveau_i2c_chan *auxch = (struct nouveau_i2c_chan *)adap;
-	struct i2c_msg *msg = msgs;
-	int ret, mcnt = num;
-
-	while (mcnt--) {
-		u8 remaining = msg->len;
-		u8 *ptr = msg->buf;
-
-		while (remaining) {
-			u8 cnt = (remaining > 16) ? 16 : remaining;
-			u8 cmd;
-
-			if (msg->flags & I2C_M_RD)
-				cmd = AUX_I2C_READ;
-			else
-				cmd = AUX_I2C_WRITE;
-
-			if (mcnt || remaining > 16)
-				cmd |= AUX_I2C_MOT;
-
-			ret = nouveau_dp_auxch(auxch, cmd, msg->addr, ptr, cnt);
-			if (ret < 0)
-				return ret;
-
-			ptr += cnt;
-			remaining -= cnt;
-		}
-
-		msg++;
-	}
-
-	return num;
-}
-
-static u32
-nouveau_dp_i2c_func(struct i2c_adapter *adap)
-{
-	return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
-}
-
-const struct i2c_algorithm nouveau_dp_i2c_algo = {
-	.master_xfer = nouveau_dp_i2c_xfer,
-	.functionality = nouveau_dp_i2c_func
-};
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c
new file mode 100644
index 000000000000..ccae8c26ae2b
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_drm.c
@@ -0,0 +1,693 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <linux/console.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+
+#include <core/device.h>
+#include <core/client.h>
+#include <core/gpuobj.h>
+#include <core/class.h>
+
+#include <subdev/device.h>
+#include <subdev/vm.h>
+
+#include "nouveau_drm.h"
+#include "nouveau_irq.h"
+#include "nouveau_dma.h"
+#include "nouveau_ttm.h"
+#include "nouveau_gem.h"
+#include "nouveau_agp.h"
+#include "nouveau_vga.h"
+#include "nouveau_pm.h"
+#include "nouveau_acpi.h"
+#include "nouveau_bios.h"
+#include "nouveau_ioctl.h"
+#include "nouveau_abi16.h"
+#include "nouveau_fbcon.h"
+#include "nouveau_fence.h"
+
+#include "nouveau_ttm.h"
+
+MODULE_PARM_DESC(config, "option string to pass to driver core");
+static char *nouveau_config;
+module_param_named(config, nouveau_config, charp, 0400);
+
+MODULE_PARM_DESC(debug, "debug string to pass to driver core");
+static char *nouveau_debug;
+module_param_named(debug, nouveau_debug, charp, 0400);
+
+MODULE_PARM_DESC(noaccel, "disable kernel/abi16 acceleration");
+static int nouveau_noaccel = 0;
+module_param_named(noaccel, nouveau_noaccel, int, 0400);
+
+MODULE_PARM_DESC(modeset, "enable driver");
+static int nouveau_modeset = -1;
+module_param_named(modeset, nouveau_modeset, int, 0400);
+
+static struct drm_driver driver;
+
+static u64
+nouveau_name(struct pci_dev *pdev)
+{
+	u64 name = (u64)pci_domain_nr(pdev->bus) << 32;
+	name |= pdev->bus->number << 16;
+	name |= PCI_SLOT(pdev->devfn) << 8;
+	return name | PCI_FUNC(pdev->devfn);
+}
+
+static int
+nouveau_cli_create(struct pci_dev *pdev, const char *name,
+		   int size, void **pcli)
+{
+	struct nouveau_cli *cli;
+	int ret;
+
+	ret = nouveau_client_create_(name, nouveau_name(pdev), nouveau_config,
+				     nouveau_debug, size, pcli);
+	cli = *pcli;
+	if (ret)
+		return ret;
+
+	mutex_init(&cli->mutex);
+	return 0;
+}
+
+static void
+nouveau_cli_destroy(struct nouveau_cli *cli)
+{
+	struct nouveau_object *client = nv_object(cli);
+	nouveau_vm_ref(NULL, &cli->base.vm, NULL);
+	nouveau_client_fini(&cli->base, false);
+	atomic_set(&client->refcount, 1);
+	nouveau_object_ref(NULL, &client);
+}
+
+static void
+nouveau_accel_fini(struct nouveau_drm *drm)
+{
+	nouveau_gpuobj_ref(NULL, &drm->notify);
+	nouveau_channel_del(&drm->channel);
+	nouveau_channel_del(&drm->cechan);
+	if (drm->fence)
+		nouveau_fence(drm)->dtor(drm);
+}
+
+static void
+nouveau_accel_init(struct nouveau_drm *drm)
+{
+	struct nouveau_device *device = nv_device(drm->device);
+	struct nouveau_object *object;
+	u32 arg0, arg1;
+	int ret;
+
+	if (nouveau_noaccel)
+		return;
+
+	/* initialise synchronisation routines */
+	if      (device->card_type < NV_10) ret = nv04_fence_create(drm);
+	else if (device->chipset   <  0x84) ret = nv10_fence_create(drm);
+	else if (device->card_type < NV_C0) ret = nv84_fence_create(drm);
+	else                                ret = nvc0_fence_create(drm);
+	if (ret) {
+		NV_ERROR(drm, "failed to initialise sync subsystem, %d\n", ret);
+		nouveau_accel_fini(drm);
+		return;
+	}
+
+	if (device->card_type >= NV_E0) {
+		ret = nouveau_channel_new(drm, &drm->client, NVDRM_DEVICE,
+					  NVDRM_CHAN + 1,
+					  NVE0_CHANNEL_IND_ENGINE_CE0 |
+					  NVE0_CHANNEL_IND_ENGINE_CE1, 0,
+					  &drm->cechan);
+		if (ret)
+			NV_ERROR(drm, "failed to create ce channel, %d\n", ret);
+
+		arg0 = NVE0_CHANNEL_IND_ENGINE_GR;
+		arg1 = 0;
+	} else {
+		arg0 = NvDmaFB;
+		arg1 = NvDmaTT;
+	}
+
+	ret = nouveau_channel_new(drm, &drm->client, NVDRM_DEVICE, NVDRM_CHAN,
+				  arg0, arg1, &drm->channel);
+	if (ret) {
+		NV_ERROR(drm, "failed to create kernel channel, %d\n", ret);
+		nouveau_accel_fini(drm);
+		return;
+	}
+
+	if (device->card_type < NV_C0) {
+		ret = nouveau_gpuobj_new(drm->device, NULL, 32, 0, 0,
+					&drm->notify);
+		if (ret) {
+			NV_ERROR(drm, "failed to allocate notifier, %d\n", ret);
+			nouveau_accel_fini(drm);
+			return;
+		}
+
+		ret = nouveau_object_new(nv_object(drm),
+					 drm->channel->handle, NvNotify0,
+					 0x003d, &(struct nv_dma_class) {
+						.flags = NV_DMA_TARGET_VRAM |
+							 NV_DMA_ACCESS_RDWR,
+						.start = drm->notify->addr,
+						.limit = drm->notify->addr + 31
+						}, sizeof(struct nv_dma_class),
+					 &object);
+		if (ret) {
+			nouveau_accel_fini(drm);
+			return;
+		}
+	}
+
+
+	nouveau_bo_move_init(drm);
+}
+
+static int __devinit
+nouveau_drm_probe(struct pci_dev *pdev, const struct pci_device_id *pent)
+{
+	struct nouveau_device *device;
+	struct apertures_struct *aper;
+	bool boot = false;
+	int ret;
+
+	/* remove conflicting drivers (vesafb, efifb etc) */
+	aper = alloc_apertures(3);
+	if (!aper)
+		return -ENOMEM;
+
+	aper->ranges[0].base = pci_resource_start(pdev, 1);
+	aper->ranges[0].size = pci_resource_len(pdev, 1);
+	aper->count = 1;
+
+	if (pci_resource_len(pdev, 2)) {
+		aper->ranges[aper->count].base = pci_resource_start(pdev, 2);
+		aper->ranges[aper->count].size = pci_resource_len(pdev, 2);
+		aper->count++;
+	}
+
+	if (pci_resource_len(pdev, 3)) {
+		aper->ranges[aper->count].base = pci_resource_start(pdev, 3);
+		aper->ranges[aper->count].size = pci_resource_len(pdev, 3);
+		aper->count++;
+	}
+
+#ifdef CONFIG_X86
+	boot = pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW;
+#endif
+	remove_conflicting_framebuffers(aper, "nouveaufb", boot);
+
+	ret = nouveau_device_create(pdev, nouveau_name(pdev), pci_name(pdev),
+				    nouveau_config, nouveau_debug, &device);
+	if (ret)
+		return ret;
+
+	pci_set_master(pdev);
+
+	ret = drm_get_pci_dev(pdev, pent, &driver);
+	if (ret) {
+		nouveau_object_ref(NULL, (struct nouveau_object **)&device);
+		return ret;
+	}
+
+	return 0;
+}
+
+static int
+nouveau_drm_load(struct drm_device *dev, unsigned long flags)
+{
+	struct pci_dev *pdev = dev->pdev;
+	struct nouveau_device *device;
+	struct nouveau_drm *drm;
+	int ret;
+
+	ret = nouveau_cli_create(pdev, "DRM", sizeof(*drm), (void**)&drm);
+	if (ret)
+		return ret;
+
+	dev->dev_private = drm;
+	drm->dev = dev;
+
+	INIT_LIST_HEAD(&drm->clients);
+	spin_lock_init(&drm->tile.lock);
+
+	/* make sure AGP controller is in a consistent state before we
+	 * (possibly) execute vbios init tables (see nouveau_agp.h)
+	 */
+	if (drm_pci_device_is_agp(dev) && dev->agp) {
+		/* dummy device object, doesn't init anything, but allows
+		 * agp code access to registers
+		 */
+		ret = nouveau_object_new(nv_object(drm), NVDRM_CLIENT,
+					 NVDRM_DEVICE, 0x0080,
+					 &(struct nv_device_class) {
+						.device = ~0,
+						.disable =
+						 ~(NV_DEVICE_DISABLE_MMIO |
+						   NV_DEVICE_DISABLE_IDENTIFY),
+						.debug0 = ~0,
+					 }, sizeof(struct nv_device_class),
+					 &drm->device);
+		if (ret)
+			goto fail_device;
+
+		nouveau_agp_reset(drm);
+		nouveau_object_del(nv_object(drm), NVDRM_CLIENT, NVDRM_DEVICE);
+	}
+
+	ret = nouveau_object_new(nv_object(drm), NVDRM_CLIENT, NVDRM_DEVICE,
+				 0x0080, &(struct nv_device_class) {
+					.device = ~0,
+					.disable = 0,
+					.debug0 = 0,
+				 }, sizeof(struct nv_device_class),
+				 &drm->device);
+	if (ret)
+		goto fail_device;
+
+	/* workaround an odd issue on nvc1 by disabling the device's
+	 * nosnoop capability.  hopefully won't cause issues until a
+	 * better fix is found - assuming there is one...
+	 */
+	device = nv_device(drm->device);
+	if (nv_device(drm->device)->chipset == 0xc1)
+		nv_mask(device, 0x00088080, 0x00000800, 0x00000000);
+
+	nouveau_vga_init(drm);
+	nouveau_agp_init(drm);
+
+	if (device->card_type >= NV_50) {
+		ret = nouveau_vm_new(nv_device(drm->device), 0, (1ULL << 40),
+				     0x1000, &drm->client.base.vm);
+		if (ret)
+			goto fail_device;
+	}
+
+	ret = nouveau_ttm_init(drm);
+	if (ret)
+		goto fail_ttm;
+
+	ret = nouveau_bios_init(dev);
+	if (ret)
+		goto fail_bios;
+
+	ret = nouveau_irq_init(dev);
+	if (ret)
+		goto fail_irq;
+
+	ret = nouveau_display_create(dev);
+	if (ret)
+		goto fail_dispctor;
+
+	if (dev->mode_config.num_crtc) {
+		ret = nouveau_display_init(dev);
+		if (ret)
+			goto fail_dispinit;
+	}
+
+	nouveau_pm_init(dev);
+
+	nouveau_accel_init(drm);
+	nouveau_fbcon_init(dev);
+	return 0;
+
+fail_dispinit:
+	nouveau_display_destroy(dev);
+fail_dispctor:
+	nouveau_irq_fini(dev);
+fail_irq:
+	nouveau_bios_takedown(dev);
+fail_bios:
+	nouveau_ttm_fini(drm);
+fail_ttm:
+	nouveau_agp_fini(drm);
+	nouveau_vga_fini(drm);
+fail_device:
+	nouveau_cli_destroy(&drm->client);
+	return ret;
+}
+
+static int
+nouveau_drm_unload(struct drm_device *dev)
+{
+	struct nouveau_drm *drm = nouveau_drm(dev);
+
+	nouveau_fbcon_fini(dev);
+	nouveau_accel_fini(drm);
+
+	nouveau_pm_fini(dev);
+
+	nouveau_display_fini(dev);
+	nouveau_display_destroy(dev);
+
+	nouveau_irq_fini(dev);
+	nouveau_bios_takedown(dev);
+
+	nouveau_ttm_fini(drm);
+	nouveau_agp_fini(drm);
+	nouveau_vga_fini(drm);
+
+	nouveau_cli_destroy(&drm->client);
+	return 0;
+}
+
+static void
+nouveau_drm_remove(struct pci_dev *pdev)
+{
+	struct drm_device *dev = pci_get_drvdata(pdev);
+	struct nouveau_drm *drm = nouveau_drm(dev);
+	struct nouveau_object *device;
+
+	device = drm->client.base.device;
+	drm_put_dev(dev);
+
+	nouveau_object_ref(NULL, &device);
+	nouveau_object_debug();
+}
+
+int
+nouveau_drm_suspend(struct pci_dev *pdev, pm_message_t pm_state)
+{
+	struct drm_device *dev = pci_get_drvdata(pdev);
+	struct nouveau_drm *drm = nouveau_drm(dev);
+	struct nouveau_cli *cli;
+	int ret;
+
+	if (dev->switch_power_state == DRM_SWITCH_POWER_OFF ||
+	    pm_state.event == PM_EVENT_PRETHAW)
+		return 0;
+
+	NV_INFO(drm, "suspending fbcon...\n");
+	nouveau_fbcon_set_suspend(dev, 1);
+
+	NV_INFO(drm, "suspending display...\n");
+	ret = nouveau_display_suspend(dev);
+	if (ret)
+		return ret;
+
+	NV_INFO(drm, "evicting buffers...\n");
+	ttm_bo_evict_mm(&drm->ttm.bdev, TTM_PL_VRAM);
+
+	if (drm->fence && nouveau_fence(drm)->suspend) {
+		if (!nouveau_fence(drm)->suspend(drm))
+			return -ENOMEM;
+	}
+
+	NV_INFO(drm, "suspending client object trees...\n");
+	list_for_each_entry(cli, &drm->clients, head) {
+		ret = nouveau_client_fini(&cli->base, true);
+		if (ret)
+			goto fail_client;
+	}
+
+	ret = nouveau_client_fini(&drm->client.base, true);
+	if (ret)
+		goto fail_client;
+
+	nouveau_agp_fini(drm);
+
+	pci_save_state(pdev);
+	if (pm_state.event == PM_EVENT_SUSPEND) {
+		pci_disable_device(pdev);
+		pci_set_power_state(pdev, PCI_D3hot);
+	}
+
+	return 0;
+
+fail_client:
+	list_for_each_entry_continue_reverse(cli, &drm->clients, head) {
+		nouveau_client_init(&cli->base);
+	}
+
+	NV_INFO(drm, "resuming display...\n");
+	nouveau_display_resume(dev);
+	return ret;
+}
+
+int
+nouveau_drm_resume(struct pci_dev *pdev)
+{
+	struct drm_device *dev = pci_get_drvdata(pdev);
+	struct nouveau_drm *drm = nouveau_drm(dev);
+	struct nouveau_cli *cli;
+	int ret;
+
+	if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
+		return 0;
+
+	NV_INFO(drm, "re-enabling device...\n");
+	pci_set_power_state(pdev, PCI_D0);
+	pci_restore_state(pdev);
+	ret = pci_enable_device(pdev);
+	if (ret)
+		return ret;
+	pci_set_master(pdev);
+
+	nouveau_agp_reset(drm);
+
+	NV_INFO(drm, "resuming client object trees...\n");
+	nouveau_client_init(&drm->client.base);
+	nouveau_agp_init(drm);
+
+	list_for_each_entry(cli, &drm->clients, head) {
+		nouveau_client_init(&cli->base);
+	}
+
+	if (drm->fence && nouveau_fence(drm)->resume)
+		nouveau_fence(drm)->resume(drm);
+
+	nouveau_run_vbios_init(dev);
+	nouveau_irq_postinstall(dev);
+	nouveau_pm_resume(dev);
+
+	NV_INFO(drm, "resuming display...\n");
+	nouveau_display_resume(dev);
+	return 0;
+}
+
+static int
+nouveau_drm_open(struct drm_device *dev, struct drm_file *fpriv)
+{
+	struct pci_dev *pdev = dev->pdev;
+	struct nouveau_drm *drm = nouveau_drm(dev);
+	struct nouveau_cli *cli;
+	char name[16];
+	int ret;
+
+	snprintf(name, sizeof(name), "%d", pid_nr(fpriv->pid));
+
+	ret = nouveau_cli_create(pdev, name, sizeof(*cli), (void **)&cli);
+	if (ret)
+		return ret;
+
+	if (nv_device(drm->device)->card_type >= NV_50) {
+		ret = nouveau_vm_new(nv_device(drm->device), 0, (1ULL << 40),
+				     0x1000, &cli->base.vm);
+		if (ret) {
+			nouveau_cli_destroy(cli);
+			return ret;
+		}
+	}
+
+	fpriv->driver_priv = cli;
+
+	mutex_lock(&drm->client.mutex);
+	list_add(&cli->head, &drm->clients);
+	mutex_unlock(&drm->client.mutex);
+	return 0;
+}
+
+static void
+nouveau_drm_preclose(struct drm_device *dev, struct drm_file *fpriv)
+{
+	struct nouveau_cli *cli = nouveau_cli(fpriv);
+	struct nouveau_drm *drm = nouveau_drm(dev);
+
+	if (cli->abi16)
+		nouveau_abi16_fini(cli->abi16);
+
+	mutex_lock(&drm->client.mutex);
+	list_del(&cli->head);
+	mutex_unlock(&drm->client.mutex);
+}
+
+static void
+nouveau_drm_postclose(struct drm_device *dev, struct drm_file *fpriv)
+{
+	struct nouveau_cli *cli = nouveau_cli(fpriv);
+	nouveau_cli_destroy(cli);
+}
+
+static struct drm_ioctl_desc
+nouveau_ioctls[] = {
+	DRM_IOCTL_DEF_DRV(NOUVEAU_GETPARAM, nouveau_abi16_ioctl_getparam, DRM_UNLOCKED|DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(NOUVEAU_SETPARAM, nouveau_abi16_ioctl_setparam, DRM_UNLOCKED|DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+	DRM_IOCTL_DEF_DRV(NOUVEAU_CHANNEL_ALLOC, nouveau_abi16_ioctl_channel_alloc, DRM_UNLOCKED|DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(NOUVEAU_CHANNEL_FREE, nouveau_abi16_ioctl_channel_free, DRM_UNLOCKED|DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(NOUVEAU_GROBJ_ALLOC, nouveau_abi16_ioctl_grobj_alloc, DRM_UNLOCKED|DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(NOUVEAU_NOTIFIEROBJ_ALLOC, nouveau_abi16_ioctl_notifierobj_alloc, DRM_UNLOCKED|DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(NOUVEAU_GPUOBJ_FREE, nouveau_abi16_ioctl_gpuobj_free, DRM_UNLOCKED|DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_NEW, nouveau_gem_ioctl_new, DRM_UNLOCKED|DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_PUSHBUF, nouveau_gem_ioctl_pushbuf, DRM_UNLOCKED|DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_CPU_PREP, nouveau_gem_ioctl_cpu_prep, DRM_UNLOCKED|DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_CPU_FINI, nouveau_gem_ioctl_cpu_fini, DRM_UNLOCKED|DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_INFO, nouveau_gem_ioctl_info, DRM_UNLOCKED|DRM_AUTH),
+};
+
+static const struct file_operations
+nouveau_driver_fops = {
+	.owner = THIS_MODULE,
+	.open = drm_open,
+	.release = drm_release,
+	.unlocked_ioctl = drm_ioctl,
+	.mmap = nouveau_ttm_mmap,
+	.poll = drm_poll,
+	.fasync = drm_fasync,
+	.read = drm_read,
+#if defined(CONFIG_COMPAT)
+	.compat_ioctl = nouveau_compat_ioctl,
+#endif
+	.llseek = noop_llseek,
+};
+
+static struct drm_driver
+driver = {
+	.driver_features =
+		DRIVER_USE_AGP | DRIVER_PCI_DMA | DRIVER_SG |
+		DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_GEM |
+		DRIVER_MODESET | DRIVER_PRIME,
+
+	.load = nouveau_drm_load,
+	.unload = nouveau_drm_unload,
+	.open = nouveau_drm_open,
+	.preclose = nouveau_drm_preclose,
+	.postclose = nouveau_drm_postclose,
+	.lastclose = nouveau_vga_lastclose,
+
+	.irq_preinstall = nouveau_irq_preinstall,
+	.irq_postinstall = nouveau_irq_postinstall,
+	.irq_uninstall = nouveau_irq_uninstall,
+	.irq_handler = nouveau_irq_handler,
+
+	.get_vblank_counter = drm_vblank_count,
+	.enable_vblank = nouveau_vblank_enable,
+	.disable_vblank = nouveau_vblank_disable,
+
+	.ioctls = nouveau_ioctls,
+	.fops = &nouveau_driver_fops,
+
+	.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
+	.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
+	.gem_prime_export = nouveau_gem_prime_export,
+	.gem_prime_import = nouveau_gem_prime_import,
+
+	.gem_init_object = nouveau_gem_object_new,
+	.gem_free_object = nouveau_gem_object_del,
+	.gem_open_object = nouveau_gem_object_open,
+	.gem_close_object = nouveau_gem_object_close,
+
+	.dumb_create = nouveau_display_dumb_create,
+	.dumb_map_offset = nouveau_display_dumb_map_offset,
+	.dumb_destroy = nouveau_display_dumb_destroy,
+
+	.name = DRIVER_NAME,
+	.desc = DRIVER_DESC,
+#ifdef GIT_REVISION
+	.date = GIT_REVISION,
+#else
+	.date = DRIVER_DATE,
+#endif
+	.major = DRIVER_MAJOR,
+	.minor = DRIVER_MINOR,
+	.patchlevel = DRIVER_PATCHLEVEL,
+};
+
+static struct pci_device_id
+nouveau_drm_pci_table[] = {
+	{
+		PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID),
+		.class = PCI_BASE_CLASS_DISPLAY << 16,
+		.class_mask  = 0xff << 16,
+	},
+	{
+		PCI_DEVICE(PCI_VENDOR_ID_NVIDIA_SGS, PCI_ANY_ID),
+		.class = PCI_BASE_CLASS_DISPLAY << 16,
+		.class_mask  = 0xff << 16,
+	},
+	{}
+};
+
+static struct pci_driver
+nouveau_drm_pci_driver = {
+	.name = "nouveau",
+	.id_table = nouveau_drm_pci_table,
+	.probe = nouveau_drm_probe,
+	.remove = nouveau_drm_remove,
+	.suspend = nouveau_drm_suspend,
+	.resume = nouveau_drm_resume,
+};
+
+static int __init
+nouveau_drm_init(void)
+{
+	driver.num_ioctls = ARRAY_SIZE(nouveau_ioctls);
+
+	if (nouveau_modeset == -1) {
+#ifdef CONFIG_VGA_CONSOLE
+		if (vgacon_text_force())
+			nouveau_modeset = 0;
+		else
+#endif
+			nouveau_modeset = 1;
+	}
+
+	if (!nouveau_modeset)
+		return 0;
+
+	nouveau_register_dsm_handler();
+	return drm_pci_init(&driver, &nouveau_drm_pci_driver);
+}
+
+static void __exit
+nouveau_drm_exit(void)
+{
+	if (!nouveau_modeset)
+		return;
+
+	drm_pci_exit(&driver, &nouveau_drm_pci_driver);
+	nouveau_unregister_dsm_handler();
+}
+
+module_init(nouveau_drm_init);
+module_exit(nouveau_drm_exit);
+
+MODULE_DEVICE_TABLE(pci, nouveau_drm_pci_table);
+MODULE_AUTHOR(DRIVER_AUTHOR);
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_LICENSE("GPL and additional rights");
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.h b/drivers/gpu/drm/nouveau/nouveau_drm.h
new file mode 100644
index 000000000000..819471217546
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_drm.h
@@ -0,0 +1,144 @@
+#ifndef __NOUVEAU_DRMCLI_H__
+#define __NOUVEAU_DRMCLI_H__
+
+#define DRIVER_AUTHOR		"Nouveau Project"
+#define DRIVER_EMAIL		"nouveau@lists.freedesktop.org"
+
+#define DRIVER_NAME		"nouveau"
+#define DRIVER_DESC		"nVidia Riva/TNT/GeForce/Quadro/Tesla"
+#define DRIVER_DATE		"20120801"
+
+#define DRIVER_MAJOR		1
+#define DRIVER_MINOR		1
+#define DRIVER_PATCHLEVEL	0
+
+#include <core/client.h>
+
+#include <subdev/vm.h>
+
+#include <drmP.h>
+#include <drm/nouveau_drm.h>
+
+#include <drm/ttm/ttm_bo_api.h>
+#include <drm/ttm/ttm_bo_driver.h>
+#include <drm/ttm/ttm_placement.h>
+#include <drm/ttm/ttm_memory.h>
+#include <drm/ttm/ttm_module.h>
+#include <drm/ttm/ttm_page_alloc.h>
+
+struct nouveau_channel;
+
+#define DRM_FILE_PAGE_OFFSET (0x100000000ULL >> PAGE_SHIFT)
+
+#include "nouveau_fence.h"
+#include "nouveau_bios.h"
+
+struct nouveau_drm_tile {
+	struct nouveau_fence *fence;
+	bool used;
+};
+
+enum nouveau_drm_handle {
+	NVDRM_CLIENT = 0xffffffff,
+	NVDRM_DEVICE = 0xdddddddd,
+	NVDRM_PUSH   = 0xbbbb0000, /* |= client chid */
+	NVDRM_CHAN   = 0xcccc0000, /* |= client chid */
+};
+
+struct nouveau_cli {
+	struct nouveau_client base;
+	struct list_head head;
+	struct mutex mutex;
+	void *abi16;
+};
+
+static inline struct nouveau_cli *
+nouveau_cli(struct drm_file *fpriv)
+{
+	return fpriv ? fpriv->driver_priv : NULL;
+}
+
+struct nouveau_drm {
+	struct nouveau_cli client;
+	struct drm_device *dev;
+
+	struct nouveau_object *device;
+	struct list_head clients;
+
+	struct {
+		enum {
+			UNKNOWN = 0,
+			DISABLE = 1,
+			ENABLED = 2
+		} stat;
+		u32 base;
+		u32 size;
+	} agp;
+
+	/* TTM interface support */
+	struct {
+		struct drm_global_reference mem_global_ref;
+		struct ttm_bo_global_ref bo_global_ref;
+		struct ttm_bo_device bdev;
+		atomic_t validate_sequence;
+		int (*move)(struct nouveau_channel *,
+			    struct ttm_buffer_object *,
+			    struct ttm_mem_reg *, struct ttm_mem_reg *);
+		int mtrr;
+	} ttm;
+
+	/* GEM interface support */
+	struct {
+		u64 vram_available;
+		u64 gart_available;
+	} gem;
+
+	/* synchronisation */
+	void *fence;
+
+	/* context for accelerated drm-internal operations */
+	struct nouveau_channel *cechan;
+	struct nouveau_channel *channel;
+	struct nouveau_gpuobj *notify;
+	struct nouveau_fbdev *fbcon;
+
+	/* nv10-nv40 tiling regions */
+	struct {
+		struct nouveau_drm_tile reg[15];
+		spinlock_t lock;
+	} tile;
+
+	/* modesetting */
+	struct nvbios vbios;
+	struct nouveau_display *display;
+	struct backlight_device *backlight;
+
+	/* power management */
+	struct nouveau_pm *pm;
+};
+
+static inline struct nouveau_drm *
+nouveau_drm(struct drm_device *dev)
+{
+	return dev->dev_private;
+}
+
+static inline struct nouveau_device *
+nouveau_dev(struct drm_device *dev)
+{
+	return nv_device(nouveau_drm(dev)->device);
+}
+
+int nouveau_drm_suspend(struct pci_dev *, pm_message_t);
+int nouveau_drm_resume(struct pci_dev *);
+
+#define NV_FATAL(cli, fmt, args...) nv_fatal((cli), fmt, ##args)
+#define NV_ERROR(cli, fmt, args...) nv_error((cli), fmt, ##args)
+#define NV_WARN(cli, fmt, args...) nv_warn((cli), fmt, ##args)
+#define NV_INFO(cli, fmt, args...) nv_info((cli), fmt, ##args)
+#define NV_DEBUG(cli, fmt, args...) do {                                       \
+	if (drm_debug & DRM_UT_DRIVER)                                         \
+		nv_info((cli), fmt, ##args);                                   \
+} while (0)
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.c b/drivers/gpu/drm/nouveau/nouveau_drv.c
deleted file mode 100644
index 8b5e558d7c73..000000000000
--- a/drivers/gpu/drm/nouveau/nouveau_drv.c
+++ /dev/null
@@ -1,512 +0,0 @@
-/*
- * Copyright 2005 Stephane Marchesin.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- */
-
-#include <linux/console.h>
-#include <linux/module.h>
-
-#include <drm/drmP.h>
-#include <drm/drm_crtc_helper.h>
-#include "nouveau_drv.h"
-#include "nouveau_abi16.h"
-#include "nouveau_hw.h"
-#include "nouveau_fb.h"
-#include "nouveau_fbcon.h"
-#include "nouveau_pm.h"
-#include "nouveau_fifo.h"
-#include "nv50_display.h"
-
-#include <drm/drm_pciids.h>
-
-MODULE_PARM_DESC(agpmode, "AGP mode (0 to disable AGP)");
-int nouveau_agpmode = -1;
-module_param_named(agpmode, nouveau_agpmode, int, 0400);
-
-MODULE_PARM_DESC(modeset, "Enable kernel modesetting");
-int nouveau_modeset = -1;
-module_param_named(modeset, nouveau_modeset, int, 0400);
-
-MODULE_PARM_DESC(vbios, "Override default VBIOS location");
-char *nouveau_vbios;
-module_param_named(vbios, nouveau_vbios, charp, 0400);
-
-MODULE_PARM_DESC(vram_pushbuf, "Force DMA push buffers to be in VRAM");
-int nouveau_vram_pushbuf;
-module_param_named(vram_pushbuf, nouveau_vram_pushbuf, int, 0400);
-
-MODULE_PARM_DESC(vram_notify, "Force DMA notifiers to be in VRAM");
-int nouveau_vram_notify = 0;
-module_param_named(vram_notify, nouveau_vram_notify, int, 0400);
-
-MODULE_PARM_DESC(vram_type, "Override detected VRAM type");
-char *nouveau_vram_type;
-module_param_named(vram_type, nouveau_vram_type, charp, 0400);
-
-MODULE_PARM_DESC(duallink, "Allow dual-link TMDS (>=GeForce 8)");
-int nouveau_duallink = 1;
-module_param_named(duallink, nouveau_duallink, int, 0400);
-
-MODULE_PARM_DESC(uscript_lvds, "LVDS output script table ID (>=GeForce 8)");
-int nouveau_uscript_lvds = -1;
-module_param_named(uscript_lvds, nouveau_uscript_lvds, int, 0400);
-
-MODULE_PARM_DESC(uscript_tmds, "TMDS output script table ID (>=GeForce 8)");
-int nouveau_uscript_tmds = -1;
-module_param_named(uscript_tmds, nouveau_uscript_tmds, int, 0400);
-
-MODULE_PARM_DESC(ignorelid, "Ignore ACPI lid status");
-int nouveau_ignorelid = 0;
-module_param_named(ignorelid, nouveau_ignorelid, int, 0400);
-
-MODULE_PARM_DESC(noaccel, "Disable all acceleration");
-int nouveau_noaccel = -1;
-module_param_named(noaccel, nouveau_noaccel, int, 0400);
-
-MODULE_PARM_DESC(nofbaccel, "Disable fbcon acceleration");
-int nouveau_nofbaccel = 0;
-module_param_named(nofbaccel, nouveau_nofbaccel, int, 0400);
-
-MODULE_PARM_DESC(force_post, "Force POST");
-int nouveau_force_post = 0;
-module_param_named(force_post, nouveau_force_post, int, 0400);
-
-MODULE_PARM_DESC(override_conntype, "Ignore DCB connector type");
-int nouveau_override_conntype = 0;
-module_param_named(override_conntype, nouveau_override_conntype, int, 0400);
-
-MODULE_PARM_DESC(tv_disable, "Disable TV-out detection");
-int nouveau_tv_disable = 0;
-module_param_named(tv_disable, nouveau_tv_disable, int, 0400);
-
-MODULE_PARM_DESC(tv_norm, "Default TV norm.\n"
-		 "\t\tSupported: PAL, PAL-M, PAL-N, PAL-Nc, NTSC-M, NTSC-J,\n"
-		 "\t\t\thd480i, hd480p, hd576i, hd576p, hd720p, hd1080i.\n"
-		 "\t\tDefault: PAL\n"
-		 "\t\t*NOTE* Ignored for cards with external TV encoders.");
-char *nouveau_tv_norm;
-module_param_named(tv_norm, nouveau_tv_norm, charp, 0400);
-
-MODULE_PARM_DESC(reg_debug, "Register access debug bitmask:\n"
-		"\t\t0x1 mc, 0x2 video, 0x4 fb, 0x8 extdev,\n"
-		"\t\t0x10 crtc, 0x20 ramdac, 0x40 vgacrtc, 0x80 rmvio,\n"
-		"\t\t0x100 vgaattr, 0x200 EVO (G80+)");
-int nouveau_reg_debug;
-module_param_named(reg_debug, nouveau_reg_debug, int, 0600);
-
-MODULE_PARM_DESC(perflvl, "Performance level (default: boot)");
-char *nouveau_perflvl;
-module_param_named(perflvl, nouveau_perflvl, charp, 0400);
-
-MODULE_PARM_DESC(perflvl_wr, "Allow perflvl changes (warning: dangerous!)");
-int nouveau_perflvl_wr;
-module_param_named(perflvl_wr, nouveau_perflvl_wr, int, 0400);
-
-MODULE_PARM_DESC(msi, "Enable MSI (default: off)");
-int nouveau_msi;
-module_param_named(msi, nouveau_msi, int, 0400);
-
-MODULE_PARM_DESC(ctxfw, "Use external HUB/GPC ucode (fermi)");
-int nouveau_ctxfw;
-module_param_named(ctxfw, nouveau_ctxfw, int, 0400);
-
-MODULE_PARM_DESC(mxmdcb, "Santise DCB table according to MXM-SIS");
-int nouveau_mxmdcb = 1;
-module_param_named(mxmdcb, nouveau_mxmdcb, int, 0400);
-
-int nouveau_fbpercrtc;
-#if 0
-module_param_named(fbpercrtc, nouveau_fbpercrtc, int, 0400);
-#endif
-
-static struct pci_device_id pciidlist[] = {
-	{
-		PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID),
-		.class = PCI_BASE_CLASS_DISPLAY << 16,
-		.class_mask  = 0xff << 16,
-	},
-	{
-		PCI_DEVICE(PCI_VENDOR_ID_NVIDIA_SGS, PCI_ANY_ID),
-		.class = PCI_BASE_CLASS_DISPLAY << 16,
-		.class_mask  = 0xff << 16,
-	},
-	{}
-};
-
-MODULE_DEVICE_TABLE(pci, pciidlist);
-
-static struct drm_driver driver;
-
-static int __devinit
-nouveau_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
-{
-	return drm_get_pci_dev(pdev, ent, &driver);
-}
-
-static void
-nouveau_pci_remove(struct pci_dev *pdev)
-{
-	struct drm_device *dev = pci_get_drvdata(pdev);
-
-	drm_put_dev(dev);
-}
-
-int
-nouveau_pci_suspend(struct pci_dev *pdev, pm_message_t pm_state)
-{
-	struct drm_device *dev = pci_get_drvdata(pdev);
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct nouveau_instmem_engine *pinstmem = &dev_priv->engine.instmem;
-	struct nouveau_fifo_priv *pfifo = nv_engine(dev, NVOBJ_ENGINE_FIFO);
-	struct nouveau_channel *chan;
-	struct drm_crtc *crtc;
-	int ret, i, e;
-
-	if (pm_state.event == PM_EVENT_PRETHAW)
-		return 0;
-
-	if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
-		return 0;
-
-	NV_INFO(dev, "Disabling display...\n");
-	nouveau_display_fini(dev);
-
-	NV_INFO(dev, "Disabling fbcon...\n");
-	nouveau_fbcon_set_suspend(dev, 1);
-
-	NV_INFO(dev, "Unpinning framebuffer(s)...\n");
-	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
-		struct nouveau_framebuffer *nouveau_fb;
-
-		nouveau_fb = nouveau_framebuffer(crtc->fb);
-		if (!nouveau_fb || !nouveau_fb->nvbo)
-			continue;
-
-		nouveau_bo_unpin(nouveau_fb->nvbo);
-	}
-
-	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
-		struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
-
-		nouveau_bo_unmap(nv_crtc->cursor.nvbo);
-		nouveau_bo_unpin(nv_crtc->cursor.nvbo);
-	}
-
-	NV_INFO(dev, "Evicting buffers...\n");
-	ttm_bo_evict_mm(&dev_priv->ttm.bdev, TTM_PL_VRAM);
-
-	NV_INFO(dev, "Idling channels...\n");
-	for (i = 0; i < (pfifo ? pfifo->channels : 0); i++) {
-		chan = dev_priv->channels.ptr[i];
-
-		if (chan && chan->pushbuf_bo)
-			nouveau_channel_idle(chan);
-	}
-
-	for (e = NVOBJ_ENGINE_NR - 1; e >= 0; e--) {
-		if (!dev_priv->eng[e])
-			continue;
-
-		ret = dev_priv->eng[e]->fini(dev, e, true);
-		if (ret) {
-			NV_ERROR(dev, "... engine %d failed: %d\n", e, ret);
-			goto out_abort;
-		}
-	}
-
-	ret = pinstmem->suspend(dev);
-	if (ret) {
-		NV_ERROR(dev, "... failed: %d\n", ret);
-		goto out_abort;
-	}
-
-	NV_INFO(dev, "Suspending GPU objects...\n");
-	ret = nouveau_gpuobj_suspend(dev);
-	if (ret) {
-		NV_ERROR(dev, "... failed: %d\n", ret);
-		pinstmem->resume(dev);
-		goto out_abort;
-	}
-
-	NV_INFO(dev, "And we're gone!\n");
-	pci_save_state(pdev);
-	if (pm_state.event == PM_EVENT_SUSPEND) {
-		pci_disable_device(pdev);
-		pci_set_power_state(pdev, PCI_D3hot);
-	}
-
-	return 0;
-
-out_abort:
-	NV_INFO(dev, "Re-enabling acceleration..\n");
-	for (e = e + 1; e < NVOBJ_ENGINE_NR; e++) {
-		if (dev_priv->eng[e])
-			dev_priv->eng[e]->init(dev, e);
-	}
-	return ret;
-}
-
-int
-nouveau_pci_resume(struct pci_dev *pdev)
-{
-	struct drm_device *dev = pci_get_drvdata(pdev);
-	struct nouveau_fifo_priv *pfifo = nv_engine(dev, NVOBJ_ENGINE_FIFO);
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct nouveau_engine *engine = &dev_priv->engine;
-	struct drm_crtc *crtc;
-	int ret, i;
-
-	if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
-		return 0;
-
-	NV_INFO(dev, "We're back, enabling device...\n");
-	pci_set_power_state(pdev, PCI_D0);
-	pci_restore_state(pdev);
-	if (pci_enable_device(pdev))
-		return -1;
-	pci_set_master(dev->pdev);
-
-	/* Make sure the AGP controller is in a consistent state */
-	if (dev_priv->gart_info.type == NOUVEAU_GART_AGP)
-		nouveau_mem_reset_agp(dev);
-
-	/* Make the CRTCs accessible */
-	engine->display.early_init(dev);
-
-	NV_INFO(dev, "POSTing device...\n");
-	ret = nouveau_run_vbios_init(dev);
-	if (ret)
-		return ret;
-
-	if (dev_priv->gart_info.type == NOUVEAU_GART_AGP) {
-		ret = nouveau_mem_init_agp(dev);
-		if (ret) {
-			NV_ERROR(dev, "error reinitialising AGP: %d\n", ret);
-			return ret;
-		}
-	}
-
-	NV_INFO(dev, "Restoring GPU objects...\n");
-	nouveau_gpuobj_resume(dev);
-
-	NV_INFO(dev, "Reinitialising engines...\n");
-	engine->instmem.resume(dev);
-	engine->mc.init(dev);
-	engine->timer.init(dev);
-	engine->fb.init(dev);
-	for (i = 0; i < NVOBJ_ENGINE_NR; i++) {
-		if (dev_priv->eng[i])
-			dev_priv->eng[i]->init(dev, i);
-	}
-
-	nouveau_irq_postinstall(dev);
-
-	/* Re-write SKIPS, they'll have been lost over the suspend */
-	if (nouveau_vram_pushbuf) {
-		struct nouveau_channel *chan;
-		int j;
-
-		for (i = 0; i < (pfifo ? pfifo->channels : 0); i++) {
-			chan = dev_priv->channels.ptr[i];
-			if (!chan || !chan->pushbuf_bo)
-				continue;
-
-			for (j = 0; j < NOUVEAU_DMA_SKIPS; j++)
-				nouveau_bo_wr32(chan->pushbuf_bo, i, 0);
-		}
-	}
-
-	nouveau_pm_resume(dev);
-
-	NV_INFO(dev, "Restoring mode...\n");
-	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
-		struct nouveau_framebuffer *nouveau_fb;
-
-		nouveau_fb = nouveau_framebuffer(crtc->fb);
-		if (!nouveau_fb || !nouveau_fb->nvbo)
-			continue;
-
-		nouveau_bo_pin(nouveau_fb->nvbo, TTM_PL_FLAG_VRAM);
-	}
-
-	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
-		struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
-
-		ret = nouveau_bo_pin(nv_crtc->cursor.nvbo, TTM_PL_FLAG_VRAM);
-		if (!ret)
-			ret = nouveau_bo_map(nv_crtc->cursor.nvbo);
-		if (ret)
-			NV_ERROR(dev, "Could not pin/map cursor.\n");
-	}
-
-	nouveau_fbcon_set_suspend(dev, 0);
-	nouveau_fbcon_zfill_all(dev);
-
-	nouveau_display_init(dev);
-
-	/* Force CLUT to get re-loaded during modeset */
-	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
-		struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
-
-		nv_crtc->lut.depth = 0;
-	}
-
-	drm_helper_resume_force_mode(dev);
-
-	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
-		struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
-		u32 offset = nv_crtc->cursor.nvbo->bo.offset;
-
-		nv_crtc->cursor.set_offset(nv_crtc, offset);
-		nv_crtc->cursor.set_pos(nv_crtc, nv_crtc->cursor_saved_x,
-						 nv_crtc->cursor_saved_y);
-	}
-
-	return 0;
-}
-
-static struct drm_ioctl_desc nouveau_ioctls[] = {
-	DRM_IOCTL_DEF_DRV(NOUVEAU_GETPARAM, nouveau_abi16_ioctl_getparam, DRM_UNLOCKED|DRM_AUTH),
-	DRM_IOCTL_DEF_DRV(NOUVEAU_SETPARAM, nouveau_abi16_ioctl_setparam, DRM_UNLOCKED|DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
-	DRM_IOCTL_DEF_DRV(NOUVEAU_CHANNEL_ALLOC, nouveau_abi16_ioctl_channel_alloc, DRM_UNLOCKED|DRM_AUTH),
-	DRM_IOCTL_DEF_DRV(NOUVEAU_CHANNEL_FREE, nouveau_abi16_ioctl_channel_free, DRM_UNLOCKED|DRM_AUTH),
-	DRM_IOCTL_DEF_DRV(NOUVEAU_GROBJ_ALLOC, nouveau_abi16_ioctl_grobj_alloc, DRM_UNLOCKED|DRM_AUTH),
-	DRM_IOCTL_DEF_DRV(NOUVEAU_NOTIFIEROBJ_ALLOC, nouveau_abi16_ioctl_notifierobj_alloc, DRM_UNLOCKED|DRM_AUTH),
-	DRM_IOCTL_DEF_DRV(NOUVEAU_GPUOBJ_FREE, nouveau_abi16_ioctl_gpuobj_free, DRM_UNLOCKED|DRM_AUTH),
-	DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_NEW, nouveau_gem_ioctl_new, DRM_UNLOCKED|DRM_AUTH),
-	DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_PUSHBUF, nouveau_gem_ioctl_pushbuf, DRM_UNLOCKED|DRM_AUTH),
-	DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_CPU_PREP, nouveau_gem_ioctl_cpu_prep, DRM_UNLOCKED|DRM_AUTH),
-	DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_CPU_FINI, nouveau_gem_ioctl_cpu_fini, DRM_UNLOCKED|DRM_AUTH),
-	DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_INFO, nouveau_gem_ioctl_info, DRM_UNLOCKED|DRM_AUTH),
-};
-
-static const struct file_operations nouveau_driver_fops = {
-	.owner = THIS_MODULE,
-	.open = drm_open,
-	.release = drm_release,
-	.unlocked_ioctl = drm_ioctl,
-	.mmap = nouveau_ttm_mmap,
-	.poll = drm_poll,
-	.fasync = drm_fasync,
-	.read = drm_read,
-#if defined(CONFIG_COMPAT)
-	.compat_ioctl = nouveau_compat_ioctl,
-#endif
-	.llseek = noop_llseek,
-};
-
-static struct drm_driver driver = {
-	.driver_features =
-		DRIVER_USE_AGP | DRIVER_PCI_DMA | DRIVER_SG |
-		DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_GEM |
-		DRIVER_MODESET | DRIVER_PRIME,
-	.load = nouveau_load,
-	.firstopen = nouveau_firstopen,
-	.lastclose = nouveau_lastclose,
-	.unload = nouveau_unload,
-	.open = nouveau_open,
-	.preclose = nouveau_preclose,
-	.postclose = nouveau_postclose,
-#if defined(CONFIG_DRM_NOUVEAU_DEBUG)
-	.debugfs_init = nouveau_debugfs_init,
-	.debugfs_cleanup = nouveau_debugfs_takedown,
-#endif
-	.irq_preinstall = nouveau_irq_preinstall,
-	.irq_postinstall = nouveau_irq_postinstall,
-	.irq_uninstall = nouveau_irq_uninstall,
-	.irq_handler = nouveau_irq_handler,
-	.get_vblank_counter = drm_vblank_count,
-	.enable_vblank = nouveau_vblank_enable,
-	.disable_vblank = nouveau_vblank_disable,
-	.ioctls = nouveau_ioctls,
-	.fops = &nouveau_driver_fops,
-
-	.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
-	.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
-	.gem_prime_export = nouveau_gem_prime_export,
-	.gem_prime_import = nouveau_gem_prime_import,
-
-	.gem_init_object = nouveau_gem_object_new,
-	.gem_free_object = nouveau_gem_object_del,
-	.gem_open_object = nouveau_gem_object_open,
-	.gem_close_object = nouveau_gem_object_close,
-
-	.dumb_create = nouveau_display_dumb_create,
-	.dumb_map_offset = nouveau_display_dumb_map_offset,
-	.dumb_destroy = nouveau_display_dumb_destroy,
-
-	.name = DRIVER_NAME,
-	.desc = DRIVER_DESC,
-#ifdef GIT_REVISION
-	.date = GIT_REVISION,
-#else
-	.date = DRIVER_DATE,
-#endif
-	.major = DRIVER_MAJOR,
-	.minor = DRIVER_MINOR,
-	.patchlevel = DRIVER_PATCHLEVEL,
-};
-
-static struct pci_driver nouveau_pci_driver = {
-		.name = DRIVER_NAME,
-		.id_table = pciidlist,
-		.probe = nouveau_pci_probe,
-		.remove = nouveau_pci_remove,
-		.suspend = nouveau_pci_suspend,
-		.resume = nouveau_pci_resume
-};
-
-static int __init nouveau_init(void)
-{
-	driver.num_ioctls = ARRAY_SIZE(nouveau_ioctls);
-
-	if (nouveau_modeset == -1) {
-#ifdef CONFIG_VGA_CONSOLE
-		if (vgacon_text_force())
-			nouveau_modeset = 0;
-		else
-#endif
-			nouveau_modeset = 1;
-	}
-
-	if (!nouveau_modeset)
-		return 0;
-
-	nouveau_register_dsm_handler();
-	return drm_pci_init(&driver, &nouveau_pci_driver);
-}
-
-static void __exit nouveau_exit(void)
-{
-	if (!nouveau_modeset)
-		return;
-
-	drm_pci_exit(&driver, &nouveau_pci_driver);
-	nouveau_unregister_dsm_handler();
-}
-
-module_init(nouveau_init);
-module_exit(nouveau_exit);
-
-MODULE_AUTHOR(DRIVER_AUTHOR);
-MODULE_DESCRIPTION(DRIVER_DESC);
-MODULE_LICENSE("GPL and additional rights");
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h
deleted file mode 100644
index 543c79bd958c..000000000000
--- a/drivers/gpu/drm/nouveau/nouveau_drv.h
+++ /dev/null
@@ -1,1655 +0,0 @@
-/*
- * Copyright 2005 Stephane Marchesin.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- */
-
-#ifndef __NOUVEAU_DRV_H__
-#define __NOUVEAU_DRV_H__
-
-#define DRIVER_AUTHOR		"Stephane Marchesin"
-#define DRIVER_EMAIL		"nouveau@lists.freedesktop.org"
-
-#define DRIVER_NAME		"nouveau"
-#define DRIVER_DESC		"nVidia Riva/TNT/GeForce"
-#define DRIVER_DATE		"20120316"
-
-#define DRIVER_MAJOR		1
-#define DRIVER_MINOR		0
-#define DRIVER_PATCHLEVEL	0
-
-#define NOUVEAU_FAMILY   0x0000FFFF
-#define NOUVEAU_FLAGS    0xFFFF0000
-
-#include <drm/ttm/ttm_bo_api.h>
-#include <drm/ttm/ttm_bo_driver.h>
-#include <drm/ttm/ttm_placement.h>
-#include <drm/ttm/ttm_memory.h>
-#include <drm/ttm/ttm_module.h>
-
-struct nouveau_fpriv {
-	spinlock_t lock;
-	struct list_head channels;
-	struct nouveau_vm *vm;
-};
-
-static inline struct nouveau_fpriv *
-nouveau_fpriv(struct drm_file *file_priv)
-{
-	return file_priv ? file_priv->driver_priv : NULL;
-}
-
-#define DRM_FILE_PAGE_OFFSET (0x100000000ULL >> PAGE_SHIFT)
-
-#include <drm/nouveau_drm.h>
-#include "nouveau_reg.h"
-#include "nouveau_bios.h"
-#include "nouveau_util.h"
-
-struct nouveau_grctx;
-struct nouveau_mem;
-#include "nouveau_vm.h"
-
-#define MAX_NUM_DCB_ENTRIES 16
-
-#define NOUVEAU_MAX_CHANNEL_NR 4096
-#define NOUVEAU_MAX_TILE_NR 15
-
-struct nouveau_mem {
-	struct drm_device *dev;
-
-	struct nouveau_vma bar_vma;
-	struct nouveau_vma vma[2];
-	u8  page_shift;
-
-	struct drm_mm_node *tag;
-	struct list_head regions;
-	dma_addr_t *pages;
-	u32 memtype;
-	u64 offset;
-	u64 size;
-	struct sg_table *sg;
-};
-
-struct nouveau_tile_reg {
-	bool used;
-	uint32_t addr;
-	uint32_t limit;
-	uint32_t pitch;
-	uint32_t zcomp;
-	struct drm_mm_node *tag_mem;
-	struct nouveau_fence *fence;
-};
-
-struct nouveau_bo {
-	struct ttm_buffer_object bo;
-	struct ttm_placement placement;
-	u32 valid_domains;
-	u32 placements[3];
-	u32 busy_placements[3];
-	struct ttm_bo_kmap_obj kmap;
-	struct list_head head;
-
-	/* protected by ttm_bo_reserve() */
-	struct drm_file *reserved_by;
-	struct list_head entry;
-	int pbbo_index;
-	bool validate_mapped;
-
-	struct list_head vma_list;
-	unsigned page_shift;
-
-	uint32_t tile_mode;
-	uint32_t tile_flags;
-	struct nouveau_tile_reg *tile;
-
-	struct drm_gem_object *gem;
-	int pin_refcnt;
-
-	struct ttm_bo_kmap_obj dma_buf_vmap;
-	int vmapping_count;
-};
-
-#define nouveau_bo_tile_layout(nvbo)				\
-	((nvbo)->tile_flags & NOUVEAU_GEM_TILE_LAYOUT_MASK)
-
-static inline struct nouveau_bo *
-nouveau_bo(struct ttm_buffer_object *bo)
-{
-	return container_of(bo, struct nouveau_bo, bo);
-}
-
-static inline struct nouveau_bo *
-nouveau_gem_object(struct drm_gem_object *gem)
-{
-	return gem ? gem->driver_private : NULL;
-}
-
-/* TODO: submit equivalent to TTM generic API upstream? */
-static inline void __iomem *
-nvbo_kmap_obj_iovirtual(struct nouveau_bo *nvbo)
-{
-	bool is_iomem;
-	void __iomem *ioptr = (void __force __iomem *)ttm_kmap_obj_virtual(
-						&nvbo->kmap, &is_iomem);
-	WARN_ON_ONCE(ioptr && !is_iomem);
-	return ioptr;
-}
-
-enum nouveau_flags {
-	NV_NFORCE   = 0x10000000,
-	NV_NFORCE2  = 0x20000000
-};
-
-#define NVOBJ_ENGINE_SW		0
-#define NVOBJ_ENGINE_GR		1
-#define NVOBJ_ENGINE_CRYPT	2
-#define NVOBJ_ENGINE_COPY0	3
-#define NVOBJ_ENGINE_COPY1	4
-#define NVOBJ_ENGINE_MPEG	5
-#define NVOBJ_ENGINE_PPP	NVOBJ_ENGINE_MPEG
-#define NVOBJ_ENGINE_BSP	6
-#define NVOBJ_ENGINE_VP		7
-#define NVOBJ_ENGINE_FIFO	14
-#define NVOBJ_ENGINE_FENCE	15
-#define NVOBJ_ENGINE_NR		16
-#define NVOBJ_ENGINE_DISPLAY	(NVOBJ_ENGINE_NR + 0) /*XXX*/
-
-#define NVOBJ_FLAG_DONT_MAP             (1 << 0)
-#define NVOBJ_FLAG_ZERO_ALLOC		(1 << 1)
-#define NVOBJ_FLAG_ZERO_FREE		(1 << 2)
-#define NVOBJ_FLAG_VM			(1 << 3)
-#define NVOBJ_FLAG_VM_USER		(1 << 4)
-
-#define NVOBJ_CINST_GLOBAL	0xdeadbeef
-
-struct nouveau_gpuobj {
-	struct drm_device *dev;
-	struct kref refcount;
-	struct list_head list;
-
-	void *node;
-	u32 *suspend;
-
-	uint32_t flags;
-
-	u32 size;
-	u32 pinst;	/* PRAMIN BAR offset */
-	u32 cinst;	/* Channel offset */
-	u64 vinst;	/* VRAM address */
-	u64 linst;	/* VM address */
-
-	uint32_t engine;
-	uint32_t class;
-
-	void (*dtor)(struct drm_device *, struct nouveau_gpuobj *);
-	void *priv;
-};
-
-struct nouveau_page_flip_state {
-	struct list_head head;
-	struct drm_pending_vblank_event *event;
-	int crtc, bpp, pitch, x, y;
-	uint64_t offset;
-};
-
-enum nouveau_channel_mutex_class {
-	NOUVEAU_UCHANNEL_MUTEX,
-	NOUVEAU_KCHANNEL_MUTEX
-};
-
-struct nouveau_channel {
-	struct drm_device *dev;
-	struct list_head list;
-	int id;
-
-	/* references to the channel data structure */
-	struct kref ref;
-	/* users of the hardware channel resources, the hardware
-	 * context will be kicked off when it reaches zero. */
-	atomic_t users;
-	struct mutex mutex;
-
-	/* owner of this fifo */
-	struct drm_file *file_priv;
-	/* mapping of the fifo itself */
-	struct drm_local_map *map;
-
-	/* mapping of the regs controlling the fifo */
-	void __iomem *user;
-	uint32_t user_get;
-	uint32_t user_get_hi;
-	uint32_t user_put;
-
-	/* DMA push buffer */
-	struct nouveau_gpuobj *pushbuf;
-	struct nouveau_bo     *pushbuf_bo;
-	struct nouveau_vma     pushbuf_vma;
-	uint64_t               pushbuf_base;
-
-	/* Notifier memory */
-	struct nouveau_bo *notifier_bo;
-	struct nouveau_vma notifier_vma;
-	struct drm_mm notifier_heap;
-
-	/* PFIFO context */
-	struct nouveau_gpuobj *ramfc;
-
-	/* Execution engine contexts */
-	void *engctx[NVOBJ_ENGINE_NR];
-
-	/* NV50 VM */
-	struct nouveau_vm     *vm;
-	struct nouveau_gpuobj *vm_pd;
-
-	/* Objects */
-	struct nouveau_gpuobj *ramin; /* Private instmem */
-	struct drm_mm          ramin_heap; /* Private PRAMIN heap */
-	struct nouveau_ramht  *ramht; /* Hash table */
-
-	/* GPU object info for stuff used in-kernel (mm_enabled) */
-	uint32_t m2mf_ntfy;
-	uint32_t vram_handle;
-	uint32_t gart_handle;
-	bool accel_done;
-
-	/* Push buffer state (only for drm's channel on !mm_enabled) */
-	struct {
-		int max;
-		int free;
-		int cur;
-		int put;
-		/* access via pushbuf_bo */
-
-		int ib_base;
-		int ib_max;
-		int ib_free;
-		int ib_put;
-	} dma;
-
-	struct {
-		bool active;
-		char name[32];
-		struct drm_info_list info;
-	} debugfs;
-};
-
-struct nouveau_exec_engine {
-	void (*destroy)(struct drm_device *, int engine);
-	int  (*init)(struct drm_device *, int engine);
-	int  (*fini)(struct drm_device *, int engine, bool suspend);
-	int  (*context_new)(struct nouveau_channel *, int engine);
-	void (*context_del)(struct nouveau_channel *, int engine);
-	int  (*object_new)(struct nouveau_channel *, int engine,
-			   u32 handle, u16 class);
-	void (*set_tile_region)(struct drm_device *dev, int i);
-	void (*tlb_flush)(struct drm_device *, int engine);
-};
-
-struct nouveau_instmem_engine {
-	void	*priv;
-
-	int	(*init)(struct drm_device *dev);
-	void	(*takedown)(struct drm_device *dev);
-	int	(*suspend)(struct drm_device *dev);
-	void	(*resume)(struct drm_device *dev);
-
-	int	(*get)(struct nouveau_gpuobj *, struct nouveau_channel *,
-		       u32 size, u32 align);
-	void	(*put)(struct nouveau_gpuobj *);
-	int	(*map)(struct nouveau_gpuobj *);
-	void	(*unmap)(struct nouveau_gpuobj *);
-
-	void	(*flush)(struct drm_device *);
-};
-
-struct nouveau_mc_engine {
-	int  (*init)(struct drm_device *dev);
-	void (*takedown)(struct drm_device *dev);
-};
-
-struct nouveau_timer_engine {
-	int      (*init)(struct drm_device *dev);
-	void     (*takedown)(struct drm_device *dev);
-	uint64_t (*read)(struct drm_device *dev);
-};
-
-struct nouveau_fb_engine {
-	int num_tiles;
-	struct drm_mm tag_heap;
-	void *priv;
-
-	int  (*init)(struct drm_device *dev);
-	void (*takedown)(struct drm_device *dev);
-
-	void (*init_tile_region)(struct drm_device *dev, int i,
-				 uint32_t addr, uint32_t size,
-				 uint32_t pitch, uint32_t flags);
-	void (*set_tile_region)(struct drm_device *dev, int i);
-	void (*free_tile_region)(struct drm_device *dev, int i);
-};
-
-struct nouveau_display_engine {
-	void *priv;
-	int (*early_init)(struct drm_device *);
-	void (*late_takedown)(struct drm_device *);
-	int (*create)(struct drm_device *);
-	void (*destroy)(struct drm_device *);
-	int (*init)(struct drm_device *);
-	void (*fini)(struct drm_device *);
-
-	struct drm_property *dithering_mode;
-	struct drm_property *dithering_depth;
-	struct drm_property *underscan_property;
-	struct drm_property *underscan_hborder_property;
-	struct drm_property *underscan_vborder_property;
-	/* not really hue and saturation: */
-	struct drm_property *vibrant_hue_property;
-	struct drm_property *color_vibrance_property;
-};
-
-struct nouveau_gpio_engine {
-	spinlock_t lock;
-	struct list_head isr;
-	int (*init)(struct drm_device *);
-	void (*fini)(struct drm_device *);
-	int (*drive)(struct drm_device *, int line, int dir, int out);
-	int (*sense)(struct drm_device *, int line);
-	void (*irq_enable)(struct drm_device *, int line, bool);
-};
-
-struct nouveau_pm_voltage_level {
-	u32 voltage; /* microvolts */
-	u8  vid;
-};
-
-struct nouveau_pm_voltage {
-	bool supported;
-	u8 version;
-	u8 vid_mask;
-
-	struct nouveau_pm_voltage_level *level;
-	int nr_level;
-};
-
-/* Exclusive upper limits */
-#define NV_MEM_CL_DDR2_MAX 8
-#define NV_MEM_WR_DDR2_MAX 9
-#define NV_MEM_CL_DDR3_MAX 17
-#define NV_MEM_WR_DDR3_MAX 17
-#define NV_MEM_CL_GDDR3_MAX 16
-#define NV_MEM_WR_GDDR3_MAX 18
-#define NV_MEM_CL_GDDR5_MAX 21
-#define NV_MEM_WR_GDDR5_MAX 20
-
-struct nouveau_pm_memtiming {
-	int id;
-
-	u32 reg[9];
-	u32 mr[4];
-
-	u8 tCWL;
-
-	u8 odt;
-	u8 drive_strength;
-};
-
-struct nouveau_pm_tbl_header {
-	u8 version;
-	u8 header_len;
-	u8 entry_cnt;
-	u8 entry_len;
-};
-
-struct nouveau_pm_tbl_entry {
-	u8 tWR;
-	u8 tWTR;
-	u8 tCL;
-	u8 tRC;
-	u8 empty_4;
-	u8 tRFC;	/* Byte 5 */
-	u8 empty_6;
-	u8 tRAS;	/* Byte 7 */
-	u8 empty_8;
-	u8 tRP;		/* Byte 9 */
-	u8 tRCDRD;
-	u8 tRCDWR;
-	u8 tRRD;
-	u8 tUNK_13;
-	u8 RAM_FT1;		/* 14, a bitmask of random RAM features */
-	u8 empty_15;
-	u8 tUNK_16;
-	u8 empty_17;
-	u8 tUNK_18;
-	u8 tCWL;
-	u8 tUNK_20, tUNK_21;
-};
-
-struct nouveau_pm_profile;
-struct nouveau_pm_profile_func {
-	void (*destroy)(struct nouveau_pm_profile *);
-	void (*init)(struct nouveau_pm_profile *);
-	void (*fini)(struct nouveau_pm_profile *);
-	struct nouveau_pm_level *(*select)(struct nouveau_pm_profile *);
-};
-
-struct nouveau_pm_profile {
-	const struct nouveau_pm_profile_func *func;
-	struct list_head head;
-	char name[8];
-};
-
-#define NOUVEAU_PM_MAX_LEVEL 8
-struct nouveau_pm_level {
-	struct nouveau_pm_profile profile;
-	struct device_attribute dev_attr;
-	char name[32];
-	int id;
-
-	struct nouveau_pm_memtiming timing;
-	u32 memory;
-	u16 memscript;
-
-	u32 core;
-	u32 shader;
-	u32 rop;
-	u32 copy;
-	u32 daemon;
-	u32 vdec;
-	u32 dom6;
-	u32 unka0;	/* nva3:nvc0 */
-	u32 hub01;	/* nvc0- */
-	u32 hub06;	/* nvc0- */
-	u32 hub07;	/* nvc0- */
-
-	u32 volt_min; /* microvolts */
-	u32 volt_max;
-	u8  fanspeed;
-};
-
-struct nouveau_pm_temp_sensor_constants {
-	u16 offset_constant;
-	s16 offset_mult;
-	s16 offset_div;
-	s16 slope_mult;
-	s16 slope_div;
-};
-
-struct nouveau_pm_threshold_temp {
-	s16 critical;
-	s16 down_clock;
-	s16 fan_boost;
-};
-
-struct nouveau_pm_fan {
-	u32 percent;
-	u32 min_duty;
-	u32 max_duty;
-	u32 pwm_freq;
-	u32 pwm_divisor;
-};
-
-struct nouveau_pm_engine {
-	struct nouveau_pm_voltage voltage;
-	struct nouveau_pm_level perflvl[NOUVEAU_PM_MAX_LEVEL];
-	int nr_perflvl;
-	struct nouveau_pm_temp_sensor_constants sensor_constants;
-	struct nouveau_pm_threshold_temp threshold_temp;
-	struct nouveau_pm_fan fan;
-
-	struct nouveau_pm_profile *profile_ac;
-	struct nouveau_pm_profile *profile_dc;
-	struct nouveau_pm_profile *profile;
-	struct list_head profiles;
-
-	struct nouveau_pm_level boot;
-	struct nouveau_pm_level *cur;
-
-	struct device *hwmon;
-	struct notifier_block acpi_nb;
-
-	int  (*clocks_get)(struct drm_device *, struct nouveau_pm_level *);
-	void *(*clocks_pre)(struct drm_device *, struct nouveau_pm_level *);
-	int (*clocks_set)(struct drm_device *, void *);
-
-	int (*voltage_get)(struct drm_device *);
-	int (*voltage_set)(struct drm_device *, int voltage);
-	int (*pwm_get)(struct drm_device *, int line, u32*, u32*);
-	int (*pwm_set)(struct drm_device *, int line, u32, u32);
-	int (*temp_get)(struct drm_device *);
-};
-
-struct nouveau_vram_engine {
-	struct nouveau_mm mm;
-
-	int  (*init)(struct drm_device *);
-	void (*takedown)(struct drm_device *dev);
-	int  (*get)(struct drm_device *, u64, u32 align, u32 size_nc,
-		    u32 type, struct nouveau_mem **);
-	void (*put)(struct drm_device *, struct nouveau_mem **);
-
-	bool (*flags_valid)(struct drm_device *, u32 tile_flags);
-};
-
-struct nouveau_engine {
-	struct nouveau_instmem_engine instmem;
-	struct nouveau_mc_engine      mc;
-	struct nouveau_timer_engine   timer;
-	struct nouveau_fb_engine      fb;
-	struct nouveau_display_engine display;
-	struct nouveau_gpio_engine    gpio;
-	struct nouveau_pm_engine      pm;
-	struct nouveau_vram_engine    vram;
-};
-
-struct nouveau_pll_vals {
-	union {
-		struct {
-#ifdef __BIG_ENDIAN
-			uint8_t N1, M1, N2, M2;
-#else
-			uint8_t M1, N1, M2, N2;
-#endif
-		};
-		struct {
-			uint16_t NM1, NM2;
-		} __attribute__((packed));
-	};
-	int log2P;
-
-	int refclk;
-};
-
-enum nv04_fp_display_regs {
-	FP_DISPLAY_END,
-	FP_TOTAL,
-	FP_CRTC,
-	FP_SYNC_START,
-	FP_SYNC_END,
-	FP_VALID_START,
-	FP_VALID_END
-};
-
-struct nv04_crtc_reg {
-	unsigned char MiscOutReg;
-	uint8_t CRTC[0xa0];
-	uint8_t CR58[0x10];
-	uint8_t Sequencer[5];
-	uint8_t Graphics[9];
-	uint8_t Attribute[21];
-	unsigned char DAC[768];
-
-	/* PCRTC regs */
-	uint32_t fb_start;
-	uint32_t crtc_cfg;
-	uint32_t cursor_cfg;
-	uint32_t gpio_ext;
-	uint32_t crtc_830;
-	uint32_t crtc_834;
-	uint32_t crtc_850;
-	uint32_t crtc_eng_ctrl;
-
-	/* PRAMDAC regs */
-	uint32_t nv10_cursync;
-	struct nouveau_pll_vals pllvals;
-	uint32_t ramdac_gen_ctrl;
-	uint32_t ramdac_630;
-	uint32_t ramdac_634;
-	uint32_t tv_setup;
-	uint32_t tv_vtotal;
-	uint32_t tv_vskew;
-	uint32_t tv_vsync_delay;
-	uint32_t tv_htotal;
-	uint32_t tv_hskew;
-	uint32_t tv_hsync_delay;
-	uint32_t tv_hsync_delay2;
-	uint32_t fp_horiz_regs[7];
-	uint32_t fp_vert_regs[7];
-	uint32_t dither;
-	uint32_t fp_control;
-	uint32_t dither_regs[6];
-	uint32_t fp_debug_0;
-	uint32_t fp_debug_1;
-	uint32_t fp_debug_2;
-	uint32_t fp_margin_color;
-	uint32_t ramdac_8c0;
-	uint32_t ramdac_a20;
-	uint32_t ramdac_a24;
-	uint32_t ramdac_a34;
-	uint32_t ctv_regs[38];
-};
-
-struct nv04_output_reg {
-	uint32_t output;
-	int head;
-};
-
-struct nv04_mode_state {
-	struct nv04_crtc_reg crtc_reg[2];
-	uint32_t pllsel;
-	uint32_t sel_clk;
-};
-
-enum nouveau_card_type {
-	NV_04      = 0x04,
-	NV_10      = 0x10,
-	NV_20      = 0x20,
-	NV_30      = 0x30,
-	NV_40      = 0x40,
-	NV_50      = 0x50,
-	NV_C0      = 0xc0,
-	NV_D0      = 0xd0,
-	NV_E0      = 0xe0,
-};
-
-struct drm_nouveau_private {
-	struct drm_device *dev;
-	bool noaccel;
-
-	/* the card type, takes NV_* as values */
-	enum nouveau_card_type card_type;
-	/* exact chipset, derived from NV_PMC_BOOT_0 */
-	int chipset;
-	int flags;
-	u32 crystal;
-
-	void __iomem *mmio;
-
-	spinlock_t ramin_lock;
-	void __iomem *ramin;
-	u32 ramin_size;
-	u32 ramin_base;
-	bool ramin_available;
-	struct drm_mm ramin_heap;
-	struct nouveau_exec_engine *eng[NVOBJ_ENGINE_NR];
-	struct list_head gpuobj_list;
-	struct list_head classes;
-
-	struct nouveau_bo *vga_ram;
-
-	/* interrupt handling */
-	void (*irq_handler[32])(struct drm_device *);
-	bool msi_enabled;
-
-	struct {
-		struct drm_global_reference mem_global_ref;
-		struct ttm_bo_global_ref bo_global_ref;
-		struct ttm_bo_device bdev;
-		atomic_t validate_sequence;
-		int (*move)(struct nouveau_channel *,
-			    struct ttm_buffer_object *,
-			    struct ttm_mem_reg *, struct ttm_mem_reg *);
-	} ttm;
-
-	struct {
-		spinlock_t lock;
-		struct drm_mm heap;
-		struct nouveau_bo *bo;
-	} fence;
-
-	struct {
-		spinlock_t lock;
-		struct nouveau_channel *ptr[NOUVEAU_MAX_CHANNEL_NR];
-	} channels;
-
-	struct nouveau_engine engine;
-	struct nouveau_channel *channel;
-
-	/* For PFIFO and PGRAPH. */
-	spinlock_t context_switch_lock;
-
-	/* VM/PRAMIN flush, legacy PRAMIN aperture */
-	spinlock_t vm_lock;
-
-	/* RAMIN configuration, RAMFC, RAMHT and RAMRO offsets */
-	struct nouveau_ramht  *ramht;
-	struct nouveau_gpuobj *ramfc;
-	struct nouveau_gpuobj *ramro;
-
-	uint32_t ramin_rsvd_vram;
-
-	struct {
-		enum {
-			NOUVEAU_GART_NONE = 0,
-			NOUVEAU_GART_AGP,	/* AGP */
-			NOUVEAU_GART_PDMA,	/* paged dma object */
-			NOUVEAU_GART_HW		/* on-chip gart/vm */
-		} type;
-		uint64_t aper_base;
-		uint64_t aper_size;
-		uint64_t aper_free;
-
-		struct ttm_backend_func *func;
-
-		struct {
-			struct page *page;
-			dma_addr_t   addr;
-		} dummy;
-
-		struct nouveau_gpuobj *sg_ctxdma;
-	} gart_info;
-
-	/* nv10-nv40 tiling regions */
-	struct {
-		struct nouveau_tile_reg reg[NOUVEAU_MAX_TILE_NR];
-		spinlock_t lock;
-	} tile;
-
-	/* VRAM/fb configuration */
-	enum {
-		NV_MEM_TYPE_UNKNOWN = 0,
-		NV_MEM_TYPE_STOLEN,
-		NV_MEM_TYPE_SGRAM,
-		NV_MEM_TYPE_SDRAM,
-		NV_MEM_TYPE_DDR1,
-		NV_MEM_TYPE_DDR2,
-		NV_MEM_TYPE_DDR3,
-		NV_MEM_TYPE_GDDR2,
-		NV_MEM_TYPE_GDDR3,
-		NV_MEM_TYPE_GDDR4,
-		NV_MEM_TYPE_GDDR5
-	} vram_type;
-	uint64_t vram_size;
-	uint64_t vram_sys_base;
-	bool vram_rank_B;
-
-	uint64_t fb_available_size;
-	uint64_t fb_mappable_pages;
-	uint64_t fb_aper_free;
-	int fb_mtrr;
-
-	/* BAR control (NV50-) */
-	struct nouveau_vm *bar1_vm;
-	struct nouveau_vm *bar3_vm;
-
-	/* G8x/G9x virtual address space */
-	struct nouveau_vm *chan_vm;
-
-	struct nvbios vbios;
-	u8 *mxms;
-	struct list_head i2c_ports;
-
-	struct nv04_mode_state mode_reg;
-	struct nv04_mode_state saved_reg;
-	uint32_t saved_vga_font[4][16384];
-	uint32_t crtc_owner;
-	uint32_t dac_users[4];
-
-	struct backlight_device *backlight;
-
-	struct {
-		struct dentry *channel_root;
-	} debugfs;
-
-	struct nouveau_fbdev *nfbdev;
-	struct apertures_struct *apertures;
-};
-
-static inline struct drm_nouveau_private *
-nouveau_private(struct drm_device *dev)
-{
-	return dev->dev_private;
-}
-
-static inline struct drm_nouveau_private *
-nouveau_bdev(struct ttm_bo_device *bd)
-{
-	return container_of(bd, struct drm_nouveau_private, ttm.bdev);
-}
-
-static inline int
-nouveau_bo_ref(struct nouveau_bo *ref, struct nouveau_bo **pnvbo)
-{
-	struct nouveau_bo *prev;
-
-	if (!pnvbo)
-		return -EINVAL;
-	prev = *pnvbo;
-
-	*pnvbo = ref ? nouveau_bo(ttm_bo_reference(&ref->bo)) : NULL;
-	if (prev) {
-		struct ttm_buffer_object *bo = &prev->bo;
-
-		ttm_bo_unref(&bo);
-	}
-
-	return 0;
-}
-
-/* nouveau_drv.c */
-extern int nouveau_modeset;
-extern int nouveau_agpmode;
-extern int nouveau_duallink;
-extern int nouveau_uscript_lvds;
-extern int nouveau_uscript_tmds;
-extern int nouveau_vram_pushbuf;
-extern int nouveau_vram_notify;
-extern char *nouveau_vram_type;
-extern int nouveau_fbpercrtc;
-extern int nouveau_tv_disable;
-extern char *nouveau_tv_norm;
-extern int nouveau_reg_debug;
-extern char *nouveau_vbios;
-extern int nouveau_ignorelid;
-extern int nouveau_nofbaccel;
-extern int nouveau_noaccel;
-extern int nouveau_force_post;
-extern int nouveau_override_conntype;
-extern char *nouveau_perflvl;
-extern int nouveau_perflvl_wr;
-extern int nouveau_msi;
-extern int nouveau_ctxfw;
-extern int nouveau_mxmdcb;
-
-extern int nouveau_pci_suspend(struct pci_dev *pdev, pm_message_t pm_state);
-extern int nouveau_pci_resume(struct pci_dev *pdev);
-
-/* nouveau_state.c */
-extern int  nouveau_open(struct drm_device *, struct drm_file *);
-extern void nouveau_preclose(struct drm_device *dev, struct drm_file *);
-extern void nouveau_postclose(struct drm_device *, struct drm_file *);
-extern int  nouveau_load(struct drm_device *, unsigned long flags);
-extern int  nouveau_firstopen(struct drm_device *);
-extern void nouveau_lastclose(struct drm_device *);
-extern int  nouveau_unload(struct drm_device *);
-extern bool nouveau_wait_eq(struct drm_device *, uint64_t timeout,
-			    uint32_t reg, uint32_t mask, uint32_t val);
-extern bool nouveau_wait_ne(struct drm_device *, uint64_t timeout,
-			    uint32_t reg, uint32_t mask, uint32_t val);
-extern bool nouveau_wait_cb(struct drm_device *, u64 timeout,
-			    bool (*cond)(void *), void *);
-extern bool nouveau_wait_for_idle(struct drm_device *);
-extern int  nouveau_card_init(struct drm_device *);
-
-/* nouveau_mem.c */
-extern int  nouveau_mem_vram_init(struct drm_device *);
-extern void nouveau_mem_vram_fini(struct drm_device *);
-extern int  nouveau_mem_gart_init(struct drm_device *);
-extern void nouveau_mem_gart_fini(struct drm_device *);
-extern int  nouveau_mem_init_agp(struct drm_device *);
-extern int  nouveau_mem_reset_agp(struct drm_device *);
-extern void nouveau_mem_close(struct drm_device *);
-extern bool nouveau_mem_flags_valid(struct drm_device *, u32 tile_flags);
-extern int  nouveau_mem_timing_calc(struct drm_device *, u32 freq,
-				    struct nouveau_pm_memtiming *);
-extern void nouveau_mem_timing_read(struct drm_device *,
-				    struct nouveau_pm_memtiming *);
-extern int nouveau_mem_vbios_type(struct drm_device *);
-extern struct nouveau_tile_reg *nv10_mem_set_tiling(
-	struct drm_device *dev, uint32_t addr, uint32_t size,
-	uint32_t pitch, uint32_t flags);
-extern void nv10_mem_put_tile_region(struct drm_device *dev,
-				     struct nouveau_tile_reg *tile,
-				     struct nouveau_fence *fence);
-extern const struct ttm_mem_type_manager_func nouveau_vram_manager;
-extern const struct ttm_mem_type_manager_func nouveau_gart_manager;
-
-/* nouveau_notifier.c */
-extern int  nouveau_notifier_init_channel(struct nouveau_channel *);
-extern void nouveau_notifier_takedown_channel(struct nouveau_channel *);
-extern int  nouveau_notifier_alloc(struct nouveau_channel *, uint32_t handle,
-				   int cout, uint32_t start, uint32_t end,
-				   uint32_t *offset);
-
-/* nouveau_channel.c */
-extern void nouveau_channel_cleanup(struct drm_device *, struct drm_file *);
-extern int  nouveau_channel_alloc(struct drm_device *dev,
-				  struct nouveau_channel **chan,
-				  struct drm_file *file_priv,
-				  uint32_t fb_ctxdma, uint32_t tt_ctxdma);
-extern struct nouveau_channel *
-nouveau_channel_get_unlocked(struct nouveau_channel *);
-extern struct nouveau_channel *
-nouveau_channel_get(struct drm_file *, int id);
-extern void nouveau_channel_put_unlocked(struct nouveau_channel **);
-extern void nouveau_channel_put(struct nouveau_channel **);
-extern void nouveau_channel_ref(struct nouveau_channel *chan,
-				struct nouveau_channel **pchan);
-extern int  nouveau_channel_idle(struct nouveau_channel *chan);
-
-/* nouveau_gpuobj.c */
-#define NVOBJ_ENGINE_ADD(d, e, p) do {                                         \
-	struct drm_nouveau_private *dev_priv = (d)->dev_private;               \
-	dev_priv->eng[NVOBJ_ENGINE_##e] = (p);                                 \
-} while (0)
-
-#define NVOBJ_ENGINE_DEL(d, e) do {                                            \
-	struct drm_nouveau_private *dev_priv = (d)->dev_private;               \
-	dev_priv->eng[NVOBJ_ENGINE_##e] = NULL;                                \
-} while (0)
-
-#define NVOBJ_CLASS(d, c, e) do {                                              \
-	int ret = nouveau_gpuobj_class_new((d), (c), NVOBJ_ENGINE_##e);        \
-	if (ret)                                                               \
-		return ret;                                                    \
-} while (0)
-
-#define NVOBJ_MTHD(d, c, m, e) do {                                            \
-	int ret = nouveau_gpuobj_mthd_new((d), (c), (m), (e));                 \
-	if (ret)                                                               \
-		return ret;                                                    \
-} while (0)
-
-extern int  nouveau_gpuobj_early_init(struct drm_device *);
-extern int  nouveau_gpuobj_init(struct drm_device *);
-extern void nouveau_gpuobj_takedown(struct drm_device *);
-extern int  nouveau_gpuobj_suspend(struct drm_device *dev);
-extern void nouveau_gpuobj_resume(struct drm_device *dev);
-extern int  nouveau_gpuobj_class_new(struct drm_device *, u32 class, u32 eng);
-extern int  nouveau_gpuobj_mthd_new(struct drm_device *, u32 class, u32 mthd,
-				    int (*exec)(struct nouveau_channel *,
-						u32 class, u32 mthd, u32 data));
-extern int  nouveau_gpuobj_mthd_call(struct nouveau_channel *, u32, u32, u32);
-extern int  nouveau_gpuobj_mthd_call2(struct drm_device *, int, u32, u32, u32);
-extern int nouveau_gpuobj_channel_init(struct nouveau_channel *,
-				       uint32_t vram_h, uint32_t tt_h);
-extern void nouveau_gpuobj_channel_takedown(struct nouveau_channel *);
-extern int nouveau_gpuobj_new(struct drm_device *, struct nouveau_channel *,
-			      uint32_t size, int align, uint32_t flags,
-			      struct nouveau_gpuobj **);
-extern void nouveau_gpuobj_ref(struct nouveau_gpuobj *,
-			       struct nouveau_gpuobj **);
-extern int nouveau_gpuobj_new_fake(struct drm_device *, u32 pinst, u64 vinst,
-				   u32 size, u32 flags,
-				   struct nouveau_gpuobj **);
-extern int nouveau_gpuobj_dma_new(struct nouveau_channel *, int class,
-				  uint64_t offset, uint64_t size, int access,
-				  int target, struct nouveau_gpuobj **);
-extern int nouveau_gpuobj_gr_new(struct nouveau_channel *, u32 handle, int class);
-extern int nv50_gpuobj_dma_new(struct nouveau_channel *, int class, u64 base,
-			       u64 size, int target, int access, u32 type,
-			       u32 comp, struct nouveau_gpuobj **pobj);
-extern void nv50_gpuobj_dma_init(struct nouveau_gpuobj *, u32 offset,
-				 int class, u64 base, u64 size, int target,
-				 int access, u32 type, u32 comp);
-
-/* nouveau_irq.c */
-extern int         nouveau_irq_init(struct drm_device *);
-extern void        nouveau_irq_fini(struct drm_device *);
-extern irqreturn_t nouveau_irq_handler(DRM_IRQ_ARGS);
-extern void        nouveau_irq_register(struct drm_device *, int status_bit,
-					void (*)(struct drm_device *));
-extern void        nouveau_irq_unregister(struct drm_device *, int status_bit);
-extern void        nouveau_irq_preinstall(struct drm_device *);
-extern int         nouveau_irq_postinstall(struct drm_device *);
-extern void        nouveau_irq_uninstall(struct drm_device *);
-
-/* nouveau_sgdma.c */
-extern int nouveau_sgdma_init(struct drm_device *);
-extern void nouveau_sgdma_takedown(struct drm_device *);
-extern uint32_t nouveau_sgdma_get_physical(struct drm_device *,
-					   uint32_t offset);
-extern struct ttm_tt *nouveau_sgdma_create_ttm(struct ttm_bo_device *bdev,
-					       unsigned long size,
-					       uint32_t page_flags,
-					       struct page *dummy_read_page);
-
-/* nouveau_debugfs.c */
-#if defined(CONFIG_DRM_NOUVEAU_DEBUG)
-extern int  nouveau_debugfs_init(struct drm_minor *);
-extern void nouveau_debugfs_takedown(struct drm_minor *);
-extern int  nouveau_debugfs_channel_init(struct nouveau_channel *);
-extern void nouveau_debugfs_channel_fini(struct nouveau_channel *);
-#else
-static inline int
-nouveau_debugfs_init(struct drm_minor *minor)
-{
-	return 0;
-}
-
-static inline void nouveau_debugfs_takedown(struct drm_minor *minor)
-{
-}
-
-static inline int
-nouveau_debugfs_channel_init(struct nouveau_channel *chan)
-{
-	return 0;
-}
-
-static inline void
-nouveau_debugfs_channel_fini(struct nouveau_channel *chan)
-{
-}
-#endif
-
-/* nouveau_dma.c */
-extern void nouveau_dma_init(struct nouveau_channel *);
-extern int  nouveau_dma_wait(struct nouveau_channel *, int slots, int size);
-
-/* nouveau_acpi.c */
-#define ROM_BIOS_PAGE 4096
-#if defined(CONFIG_ACPI)
-void nouveau_register_dsm_handler(void);
-void nouveau_unregister_dsm_handler(void);
-void nouveau_switcheroo_optimus_dsm(void);
-int nouveau_acpi_get_bios_chunk(uint8_t *bios, int offset, int len);
-bool nouveau_acpi_rom_supported(struct pci_dev *pdev);
-int nouveau_acpi_edid(struct drm_device *, struct drm_connector *);
-#else
-static inline void nouveau_register_dsm_handler(void) {}
-static inline void nouveau_unregister_dsm_handler(void) {}
-static inline void nouveau_switcheroo_optimus_dsm(void) {}
-static inline bool nouveau_acpi_rom_supported(struct pci_dev *pdev) { return false; }
-static inline int nouveau_acpi_get_bios_chunk(uint8_t *bios, int offset, int len) { return -EINVAL; }
-static inline int nouveau_acpi_edid(struct drm_device *dev, struct drm_connector *connector) { return -EINVAL; }
-#endif
-
-/* nouveau_backlight.c */
-#ifdef CONFIG_DRM_NOUVEAU_BACKLIGHT
-extern int nouveau_backlight_init(struct drm_device *);
-extern void nouveau_backlight_exit(struct drm_device *);
-#else
-static inline int nouveau_backlight_init(struct drm_device *dev)
-{
-	return 0;
-}
-
-static inline void nouveau_backlight_exit(struct drm_device *dev) { }
-#endif
-
-/* nouveau_bios.c */
-extern int nouveau_bios_init(struct drm_device *);
-extern void nouveau_bios_takedown(struct drm_device *dev);
-extern int nouveau_run_vbios_init(struct drm_device *);
-extern void nouveau_bios_run_init_table(struct drm_device *, uint16_t table,
-					struct dcb_entry *, int crtc);
-extern void nouveau_bios_init_exec(struct drm_device *, uint16_t table);
-extern struct dcb_connector_table_entry *
-nouveau_bios_connector_entry(struct drm_device *, int index);
-extern u32 get_pll_register(struct drm_device *, enum pll_types);
-extern int get_pll_limits(struct drm_device *, uint32_t limit_match,
-			  struct pll_lims *);
-extern int nouveau_bios_run_display_table(struct drm_device *, u16 id, int clk,
-					  struct dcb_entry *, int crtc);
-extern bool nouveau_bios_fp_mode(struct drm_device *, struct drm_display_mode *);
-extern uint8_t *nouveau_bios_embedded_edid(struct drm_device *);
-extern int nouveau_bios_parse_lvds_table(struct drm_device *, int pxclk,
-					 bool *dl, bool *if_is_24bit);
-extern int run_tmds_table(struct drm_device *, struct dcb_entry *,
-			  int head, int pxclk);
-extern int call_lvds_script(struct drm_device *, struct dcb_entry *, int head,
-			    enum LVDS_script, int pxclk);
-bool bios_encoder_match(struct dcb_entry *, u32 hash);
-
-/* nouveau_mxm.c */
-int  nouveau_mxm_init(struct drm_device *dev);
-void nouveau_mxm_fini(struct drm_device *dev);
-
-/* nouveau_ttm.c */
-int nouveau_ttm_global_init(struct drm_nouveau_private *);
-void nouveau_ttm_global_release(struct drm_nouveau_private *);
-int nouveau_ttm_mmap(struct file *, struct vm_area_struct *);
-
-/* nouveau_hdmi.c */
-void nouveau_hdmi_mode_set(struct drm_encoder *, struct drm_display_mode *);
-
-/* nv04_fb.c */
-extern int  nv04_fb_vram_init(struct drm_device *);
-extern int  nv04_fb_init(struct drm_device *);
-extern void nv04_fb_takedown(struct drm_device *);
-
-/* nv10_fb.c */
-extern int  nv10_fb_vram_init(struct drm_device *dev);
-extern int  nv1a_fb_vram_init(struct drm_device *dev);
-extern int  nv10_fb_init(struct drm_device *);
-extern void nv10_fb_takedown(struct drm_device *);
-extern void nv10_fb_init_tile_region(struct drm_device *dev, int i,
-				     uint32_t addr, uint32_t size,
-				     uint32_t pitch, uint32_t flags);
-extern void nv10_fb_set_tile_region(struct drm_device *dev, int i);
-extern void nv10_fb_free_tile_region(struct drm_device *dev, int i);
-
-/* nv20_fb.c */
-extern int  nv20_fb_vram_init(struct drm_device *dev);
-extern int  nv20_fb_init(struct drm_device *);
-extern void nv20_fb_takedown(struct drm_device *);
-extern void nv20_fb_init_tile_region(struct drm_device *dev, int i,
-				     uint32_t addr, uint32_t size,
-				     uint32_t pitch, uint32_t flags);
-extern void nv20_fb_set_tile_region(struct drm_device *dev, int i);
-extern void nv20_fb_free_tile_region(struct drm_device *dev, int i);
-
-/* nv30_fb.c */
-extern int  nv30_fb_init(struct drm_device *);
-extern void nv30_fb_takedown(struct drm_device *);
-extern void nv30_fb_init_tile_region(struct drm_device *dev, int i,
-				     uint32_t addr, uint32_t size,
-				     uint32_t pitch, uint32_t flags);
-extern void nv30_fb_free_tile_region(struct drm_device *dev, int i);
-
-/* nv40_fb.c */
-extern int  nv40_fb_vram_init(struct drm_device *dev);
-extern int  nv40_fb_init(struct drm_device *);
-extern void nv40_fb_takedown(struct drm_device *);
-extern void nv40_fb_set_tile_region(struct drm_device *dev, int i);
-
-/* nv50_fb.c */
-extern int  nv50_fb_init(struct drm_device *);
-extern void nv50_fb_takedown(struct drm_device *);
-extern void nv50_fb_vm_trap(struct drm_device *, int display);
-
-/* nvc0_fb.c */
-extern int  nvc0_fb_init(struct drm_device *);
-extern void nvc0_fb_takedown(struct drm_device *);
-
-/* nv04_graph.c */
-extern int  nv04_graph_create(struct drm_device *);
-extern int  nv04_graph_object_new(struct nouveau_channel *, int, u32, u16);
-extern int  nv04_graph_mthd_page_flip(struct nouveau_channel *chan,
-				      u32 class, u32 mthd, u32 data);
-extern struct nouveau_bitfield nv04_graph_nsource[];
-
-/* nv10_graph.c */
-extern int  nv10_graph_create(struct drm_device *);
-extern struct nouveau_channel *nv10_graph_channel(struct drm_device *);
-extern struct nouveau_bitfield nv10_graph_intr[];
-extern struct nouveau_bitfield nv10_graph_nstatus[];
-
-/* nv20_graph.c */
-extern int  nv20_graph_create(struct drm_device *);
-
-/* nv40_graph.c */
-extern int  nv40_graph_create(struct drm_device *);
-extern void nv40_grctx_init(struct drm_device *, u32 *size);
-extern void nv40_grctx_fill(struct drm_device *, struct nouveau_gpuobj *);
-
-/* nv50_graph.c */
-extern int  nv50_graph_create(struct drm_device *);
-extern struct nouveau_enum nv50_data_error_names[];
-extern int  nv50_graph_isr_chid(struct drm_device *dev, u64 inst);
-extern int  nv50_grctx_init(struct drm_device *, u32 *, u32, u32 *, u32 *);
-extern void nv50_grctx_fill(struct drm_device *, struct nouveau_gpuobj *);
-
-/* nvc0_graph.c */
-extern int  nvc0_graph_create(struct drm_device *);
-extern int  nvc0_graph_isr_chid(struct drm_device *dev, u64 inst);
-
-/* nve0_graph.c */
-extern int  nve0_graph_create(struct drm_device *);
-
-/* nv84_crypt.c */
-extern int  nv84_crypt_create(struct drm_device *);
-
-/* nv98_crypt.c */
-extern int  nv98_crypt_create(struct drm_device *dev);
-
-/* nva3_copy.c */
-extern int  nva3_copy_create(struct drm_device *dev);
-
-/* nvc0_copy.c */
-extern int  nvc0_copy_create(struct drm_device *dev, int engine);
-
-/* nv31_mpeg.c */
-extern int  nv31_mpeg_create(struct drm_device *dev);
-
-/* nv50_mpeg.c */
-extern int  nv50_mpeg_create(struct drm_device *dev);
-
-/* nv84_bsp.c */
-/* nv98_bsp.c */
-extern int  nv84_bsp_create(struct drm_device *dev);
-
-/* nv84_vp.c */
-/* nv98_vp.c */
-extern int  nv84_vp_create(struct drm_device *dev);
-
-/* nv98_ppp.c */
-extern int  nv98_ppp_create(struct drm_device *dev);
-
-/* nv04_instmem.c */
-extern int  nv04_instmem_init(struct drm_device *);
-extern void nv04_instmem_takedown(struct drm_device *);
-extern int  nv04_instmem_suspend(struct drm_device *);
-extern void nv04_instmem_resume(struct drm_device *);
-extern int  nv04_instmem_get(struct nouveau_gpuobj *, struct nouveau_channel *,
-			     u32 size, u32 align);
-extern void nv04_instmem_put(struct nouveau_gpuobj *);
-extern int  nv04_instmem_map(struct nouveau_gpuobj *);
-extern void nv04_instmem_unmap(struct nouveau_gpuobj *);
-extern void nv04_instmem_flush(struct drm_device *);
-
-/* nv50_instmem.c */
-extern int  nv50_instmem_init(struct drm_device *);
-extern void nv50_instmem_takedown(struct drm_device *);
-extern int  nv50_instmem_suspend(struct drm_device *);
-extern void nv50_instmem_resume(struct drm_device *);
-extern int  nv50_instmem_get(struct nouveau_gpuobj *, struct nouveau_channel *,
-			     u32 size, u32 align);
-extern void nv50_instmem_put(struct nouveau_gpuobj *);
-extern int  nv50_instmem_map(struct nouveau_gpuobj *);
-extern void nv50_instmem_unmap(struct nouveau_gpuobj *);
-extern void nv50_instmem_flush(struct drm_device *);
-extern void nv84_instmem_flush(struct drm_device *);
-
-/* nvc0_instmem.c */
-extern int  nvc0_instmem_init(struct drm_device *);
-extern void nvc0_instmem_takedown(struct drm_device *);
-extern int  nvc0_instmem_suspend(struct drm_device *);
-extern void nvc0_instmem_resume(struct drm_device *);
-
-/* nv04_mc.c */
-extern int  nv04_mc_init(struct drm_device *);
-extern void nv04_mc_takedown(struct drm_device *);
-
-/* nv40_mc.c */
-extern int  nv40_mc_init(struct drm_device *);
-extern void nv40_mc_takedown(struct drm_device *);
-
-/* nv50_mc.c */
-extern int  nv50_mc_init(struct drm_device *);
-extern void nv50_mc_takedown(struct drm_device *);
-
-/* nv04_timer.c */
-extern int  nv04_timer_init(struct drm_device *);
-extern uint64_t nv04_timer_read(struct drm_device *);
-extern void nv04_timer_takedown(struct drm_device *);
-
-extern long nouveau_compat_ioctl(struct file *file, unsigned int cmd,
-				 unsigned long arg);
-
-/* nv04_dac.c */
-extern int nv04_dac_create(struct drm_connector *, struct dcb_entry *);
-extern uint32_t nv17_dac_sample_load(struct drm_encoder *encoder);
-extern int nv04_dac_output_offset(struct drm_encoder *encoder);
-extern void nv04_dac_update_dacclk(struct drm_encoder *encoder, bool enable);
-extern bool nv04_dac_in_use(struct drm_encoder *encoder);
-
-/* nv04_dfp.c */
-extern int nv04_dfp_create(struct drm_connector *, struct dcb_entry *);
-extern int nv04_dfp_get_bound_head(struct drm_device *dev, struct dcb_entry *dcbent);
-extern void nv04_dfp_bind_head(struct drm_device *dev, struct dcb_entry *dcbent,
-			       int head, bool dl);
-extern void nv04_dfp_disable(struct drm_device *dev, int head);
-extern void nv04_dfp_update_fp_control(struct drm_encoder *encoder, int mode);
-
-/* nv04_tv.c */
-extern int nv04_tv_identify(struct drm_device *dev, int i2c_index);
-extern int nv04_tv_create(struct drm_connector *, struct dcb_entry *);
-
-/* nv17_tv.c */
-extern int nv17_tv_create(struct drm_connector *, struct dcb_entry *);
-
-/* nv04_display.c */
-extern int nv04_display_early_init(struct drm_device *);
-extern void nv04_display_late_takedown(struct drm_device *);
-extern int nv04_display_create(struct drm_device *);
-extern void nv04_display_destroy(struct drm_device *);
-extern int nv04_display_init(struct drm_device *);
-extern void nv04_display_fini(struct drm_device *);
-
-/* nvd0_display.c */
-extern int nvd0_display_create(struct drm_device *);
-extern void nvd0_display_destroy(struct drm_device *);
-extern int nvd0_display_init(struct drm_device *);
-extern void nvd0_display_fini(struct drm_device *);
-struct nouveau_bo *nvd0_display_crtc_sema(struct drm_device *, int crtc);
-void nvd0_display_flip_stop(struct drm_crtc *);
-int nvd0_display_flip_next(struct drm_crtc *, struct drm_framebuffer *,
-			   struct nouveau_channel *, u32 swap_interval);
-
-/* nv04_crtc.c */
-extern int nv04_crtc_create(struct drm_device *, int index);
-
-/* nouveau_bo.c */
-extern struct ttm_bo_driver nouveau_bo_driver;
-extern void nouveau_bo_move_init(struct nouveau_channel *);
-extern int nouveau_bo_new(struct drm_device *, int size, int align,
-			  uint32_t flags, uint32_t tile_mode,
-			  uint32_t tile_flags,
-			  struct sg_table *sg,
-			  struct nouveau_bo **);
-extern int nouveau_bo_pin(struct nouveau_bo *, uint32_t flags);
-extern int nouveau_bo_unpin(struct nouveau_bo *);
-extern int nouveau_bo_map(struct nouveau_bo *);
-extern void nouveau_bo_unmap(struct nouveau_bo *);
-extern void nouveau_bo_placement_set(struct nouveau_bo *, uint32_t type,
-				     uint32_t busy);
-extern u16 nouveau_bo_rd16(struct nouveau_bo *nvbo, unsigned index);
-extern void nouveau_bo_wr16(struct nouveau_bo *nvbo, unsigned index, u16 val);
-extern u32 nouveau_bo_rd32(struct nouveau_bo *nvbo, unsigned index);
-extern void nouveau_bo_wr32(struct nouveau_bo *nvbo, unsigned index, u32 val);
-extern void nouveau_bo_fence(struct nouveau_bo *, struct nouveau_fence *);
-extern int nouveau_bo_validate(struct nouveau_bo *, bool interruptible,
-			       bool no_wait_reserve, bool no_wait_gpu);
-
-extern struct nouveau_vma *
-nouveau_bo_vma_find(struct nouveau_bo *, struct nouveau_vm *);
-extern int  nouveau_bo_vma_add(struct nouveau_bo *, struct nouveau_vm *,
-			       struct nouveau_vma *);
-extern void nouveau_bo_vma_del(struct nouveau_bo *, struct nouveau_vma *);
-
-/* nouveau_gem.c */
-extern int nouveau_gem_new(struct drm_device *, int size, int align,
-			   uint32_t domain, uint32_t tile_mode,
-			   uint32_t tile_flags, struct nouveau_bo **);
-extern int nouveau_gem_object_new(struct drm_gem_object *);
-extern void nouveau_gem_object_del(struct drm_gem_object *);
-extern int nouveau_gem_object_open(struct drm_gem_object *, struct drm_file *);
-extern void nouveau_gem_object_close(struct drm_gem_object *,
-				     struct drm_file *);
-extern int nouveau_gem_ioctl_new(struct drm_device *, void *,
-				 struct drm_file *);
-extern int nouveau_gem_ioctl_pushbuf(struct drm_device *, void *,
-				     struct drm_file *);
-extern int nouveau_gem_ioctl_cpu_prep(struct drm_device *, void *,
-				      struct drm_file *);
-extern int nouveau_gem_ioctl_cpu_fini(struct drm_device *, void *,
-				      struct drm_file *);
-extern int nouveau_gem_ioctl_info(struct drm_device *, void *,
-				  struct drm_file *);
-
-extern struct dma_buf *nouveau_gem_prime_export(struct drm_device *dev,
-				struct drm_gem_object *obj, int flags);
-extern struct drm_gem_object *nouveau_gem_prime_import(struct drm_device *dev,
-				struct dma_buf *dma_buf);
-
-/* nouveau_display.c */
-int nouveau_display_create(struct drm_device *dev);
-void nouveau_display_destroy(struct drm_device *dev);
-int nouveau_display_init(struct drm_device *dev);
-void nouveau_display_fini(struct drm_device *dev);
-int nouveau_vblank_enable(struct drm_device *dev, int crtc);
-void nouveau_vblank_disable(struct drm_device *dev, int crtc);
-int nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb,
-			   struct drm_pending_vblank_event *event);
-int nouveau_finish_page_flip(struct nouveau_channel *,
-			     struct nouveau_page_flip_state *);
-int nouveau_display_dumb_create(struct drm_file *, struct drm_device *,
-				struct drm_mode_create_dumb *args);
-int nouveau_display_dumb_map_offset(struct drm_file *, struct drm_device *,
-				    uint32_t handle, uint64_t *offset);
-int nouveau_display_dumb_destroy(struct drm_file *, struct drm_device *,
-				 uint32_t handle);
-
-/* nv10_gpio.c */
-int nv10_gpio_init(struct drm_device *dev);
-void nv10_gpio_fini(struct drm_device *dev);
-int nv10_gpio_drive(struct drm_device *dev, int line, int dir, int out);
-int nv10_gpio_sense(struct drm_device *dev, int line);
-void nv10_gpio_irq_enable(struct drm_device *, int line, bool on);
-
-/* nv50_gpio.c */
-int nv50_gpio_init(struct drm_device *dev);
-void nv50_gpio_fini(struct drm_device *dev);
-int nv50_gpio_drive(struct drm_device *dev, int line, int dir, int out);
-int nv50_gpio_sense(struct drm_device *dev, int line);
-void nv50_gpio_irq_enable(struct drm_device *, int line, bool on);
-int nvd0_gpio_drive(struct drm_device *dev, int line, int dir, int out);
-int nvd0_gpio_sense(struct drm_device *dev, int line);
-
-/* nv50_calc.c */
-int nv50_calc_pll(struct drm_device *, struct pll_lims *, int clk,
-		  int *N1, int *M1, int *N2, int *M2, int *P);
-int nva3_calc_pll(struct drm_device *, struct pll_lims *,
-		  int clk, int *N, int *fN, int *M, int *P);
-
-#ifndef ioread32_native
-#ifdef __BIG_ENDIAN
-#define ioread16_native ioread16be
-#define iowrite16_native iowrite16be
-#define ioread32_native  ioread32be
-#define iowrite32_native iowrite32be
-#else /* def __BIG_ENDIAN */
-#define ioread16_native ioread16
-#define iowrite16_native iowrite16
-#define ioread32_native  ioread32
-#define iowrite32_native iowrite32
-#endif /* def __BIG_ENDIAN else */
-#endif /* !ioread32_native */
-
-/* channel control reg access */
-static inline u32 nvchan_rd32(struct nouveau_channel *chan, unsigned reg)
-{
-	return ioread32_native(chan->user + reg);
-}
-
-static inline void nvchan_wr32(struct nouveau_channel *chan,
-							unsigned reg, u32 val)
-{
-	iowrite32_native(val, chan->user + reg);
-}
-
-/* register access */
-static inline u32 nv_rd32(struct drm_device *dev, unsigned reg)
-{
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	return ioread32_native(dev_priv->mmio + reg);
-}
-
-static inline void nv_wr32(struct drm_device *dev, unsigned reg, u32 val)
-{
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	iowrite32_native(val, dev_priv->mmio + reg);
-}
-
-static inline u32 nv_mask(struct drm_device *dev, u32 reg, u32 mask, u32 val)
-{
-	u32 tmp = nv_rd32(dev, reg);
-	nv_wr32(dev, reg, (tmp & ~mask) | val);
-	return tmp;
-}
-
-static inline u8 nv_rd08(struct drm_device *dev, unsigned reg)
-{
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	return ioread8(dev_priv->mmio + reg);
-}
-
-static inline void nv_wr08(struct drm_device *dev, unsigned reg, u8 val)
-{
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	iowrite8(val, dev_priv->mmio + reg);
-}
-
-#define nv_wait(dev, reg, mask, val) \
-	nouveau_wait_eq(dev, 2000000000ULL, (reg), (mask), (val))
-#define nv_wait_ne(dev, reg, mask, val) \
-	nouveau_wait_ne(dev, 2000000000ULL, (reg), (mask), (val))
-#define nv_wait_cb(dev, func, data) \
-	nouveau_wait_cb(dev, 2000000000ULL, (func), (data))
-
-/* PRAMIN access */
-static inline u32 nv_ri32(struct drm_device *dev, unsigned offset)
-{
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	return ioread32_native(dev_priv->ramin + offset);
-}
-
-static inline void nv_wi32(struct drm_device *dev, unsigned offset, u32 val)
-{
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	iowrite32_native(val, dev_priv->ramin + offset);
-}
-
-/* object access */
-extern u32 nv_ro32(struct nouveau_gpuobj *, u32 offset);
-extern void nv_wo32(struct nouveau_gpuobj *, u32 offset, u32 val);
-
-/*
- * Logging
- * Argument d is (struct drm_device *).
- */
-#define NV_PRINTK(level, d, fmt, arg...) \
-	printk(level "[" DRM_NAME "] " DRIVER_NAME " %s: " fmt, \
-					pci_name(d->pdev), ##arg)
-#ifndef NV_DEBUG_NOTRACE
-#define NV_DEBUG(d, fmt, arg...) do {                                          \
-	if (drm_debug & DRM_UT_DRIVER) {                                       \
-		NV_PRINTK(KERN_DEBUG, d, "%s:%d - " fmt, __func__,             \
-			  __LINE__, ##arg);                                    \
-	}                                                                      \
-} while (0)
-#define NV_DEBUG_KMS(d, fmt, arg...) do {                                      \
-	if (drm_debug & DRM_UT_KMS) {                                          \
-		NV_PRINTK(KERN_DEBUG, d, "%s:%d - " fmt, __func__,             \
-			  __LINE__, ##arg);                                    \
-	}                                                                      \
-} while (0)
-#else
-#define NV_DEBUG(d, fmt, arg...) do {                                          \
-	if (drm_debug & DRM_UT_DRIVER)                                         \
-		NV_PRINTK(KERN_DEBUG, d, fmt, ##arg);                          \
-} while (0)
-#define NV_DEBUG_KMS(d, fmt, arg...) do {                                      \
-	if (drm_debug & DRM_UT_KMS)                                            \
-		NV_PRINTK(KERN_DEBUG, d, fmt, ##arg);                          \
-} while (0)
-#endif
-#define NV_ERROR(d, fmt, arg...) NV_PRINTK(KERN_ERR, d, fmt, ##arg)
-#define NV_INFO(d, fmt, arg...) NV_PRINTK(KERN_INFO, d, fmt, ##arg)
-#define NV_TRACEWARN(d, fmt, arg...) NV_PRINTK(KERN_NOTICE, d, fmt, ##arg)
-#define NV_TRACE(d, fmt, arg...) NV_PRINTK(KERN_INFO, d, fmt, ##arg)
-#define NV_WARN(d, fmt, arg...) NV_PRINTK(KERN_WARNING, d, fmt, ##arg)
-#define NV_WARNONCE(d, fmt, arg...) do {                                       \
-	static int _warned = 0;                                                \
-	if (!_warned) {                                                        \
-		NV_WARN(d, fmt, ##arg);                                        \
-		_warned = 1;                                                   \
-	}                                                                      \
-} while(0)
-
-/* nouveau_reg_debug bitmask */
-enum {
-	NOUVEAU_REG_DEBUG_MC             = 0x1,
-	NOUVEAU_REG_DEBUG_VIDEO          = 0x2,
-	NOUVEAU_REG_DEBUG_FB             = 0x4,
-	NOUVEAU_REG_DEBUG_EXTDEV         = 0x8,
-	NOUVEAU_REG_DEBUG_CRTC           = 0x10,
-	NOUVEAU_REG_DEBUG_RAMDAC         = 0x20,
-	NOUVEAU_REG_DEBUG_VGACRTC        = 0x40,
-	NOUVEAU_REG_DEBUG_RMVIO          = 0x80,
-	NOUVEAU_REG_DEBUG_VGAATTR        = 0x100,
-	NOUVEAU_REG_DEBUG_EVO            = 0x200,
-	NOUVEAU_REG_DEBUG_AUXCH          = 0x400
-};
-
-#define NV_REG_DEBUG(type, dev, fmt, arg...) do { \
-	if (nouveau_reg_debug & NOUVEAU_REG_DEBUG_##type) \
-		NV_PRINTK(KERN_DEBUG, dev, "%s: " fmt, __func__, ##arg); \
-} while (0)
-
-static inline bool
-nv_two_heads(struct drm_device *dev)
-{
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	const int impl = dev->pci_device & 0x0ff0;
-
-	if (dev_priv->card_type >= NV_10 && impl != 0x0100 &&
-	    impl != 0x0150 && impl != 0x01a0 && impl != 0x0200)
-		return true;
-
-	return false;
-}
-
-static inline bool
-nv_gf4_disp_arch(struct drm_device *dev)
-{
-	return nv_two_heads(dev) && (dev->pci_device & 0x0ff0) != 0x0110;
-}
-
-static inline bool
-nv_two_reg_pll(struct drm_device *dev)
-{
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	const int impl = dev->pci_device & 0x0ff0;
-
-	if (impl == 0x0310 || impl == 0x0340 || dev_priv->card_type >= NV_40)
-		return true;
-	return false;
-}
-
-static inline bool
-nv_match_device(struct drm_device *dev, unsigned device,
-		unsigned sub_vendor, unsigned sub_device)
-{
-	return dev->pdev->device == device &&
-		dev->pdev->subsystem_vendor == sub_vendor &&
-		dev->pdev->subsystem_device == sub_device;
-}
-
-static inline void *
-nv_engine(struct drm_device *dev, int engine)
-{
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	return (void *)dev_priv->eng[engine];
-}
-
-/* returns 1 if device is one of the nv4x using the 0x4497 object class,
- * helpful to determine a number of other hardware features
- */
-static inline int
-nv44_graph_class(struct drm_device *dev)
-{
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-
-	if ((dev_priv->chipset & 0xf0) == 0x60)
-		return 1;
-
-	return !(0x0baf & (1 << (dev_priv->chipset & 0x0f)));
-}
-
-/* memory type/access flags, do not match hardware values */
-#define NV_MEM_ACCESS_RO  1
-#define NV_MEM_ACCESS_WO  2
-#define NV_MEM_ACCESS_RW (NV_MEM_ACCESS_RO | NV_MEM_ACCESS_WO)
-#define NV_MEM_ACCESS_SYS 4
-#define NV_MEM_ACCESS_VM  8
-#define NV_MEM_ACCESS_NOSNOOP 16
-
-#define NV_MEM_TARGET_VRAM        0
-#define NV_MEM_TARGET_PCI         1
-#define NV_MEM_TARGET_PCI_NOSNOOP 2
-#define NV_MEM_TARGET_VM          3
-#define NV_MEM_TARGET_GART        4
-
-#define NV_MEM_TYPE_VM 0x7f
-#define NV_MEM_COMP_VM 0x03
-
-/* FIFO methods */
-#define NV01_SUBCHAN_OBJECT                                          0x00000000
-#define NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH                          0x00000010
-#define NV84_SUBCHAN_SEMAPHORE_ADDRESS_LOW                           0x00000014
-#define NV84_SUBCHAN_SEMAPHORE_SEQUENCE                              0x00000018
-#define NV84_SUBCHAN_SEMAPHORE_TRIGGER                               0x0000001c
-#define NV84_SUBCHAN_SEMAPHORE_TRIGGER_ACQUIRE_EQUAL                 0x00000001
-#define NV84_SUBCHAN_SEMAPHORE_TRIGGER_WRITE_LONG                    0x00000002
-#define NV84_SUBCHAN_SEMAPHORE_TRIGGER_ACQUIRE_GEQUAL                0x00000004
-#define NVC0_SUBCHAN_SEMAPHORE_TRIGGER_YIELD                         0x00001000
-#define NV84_SUBCHAN_NOTIFY_INTR                                     0x00000020
-#define NV84_SUBCHAN_WRCACHE_FLUSH                                   0x00000024
-#define NV10_SUBCHAN_REF_CNT                                         0x00000050
-#define NVSW_SUBCHAN_PAGE_FLIP                                       0x00000054
-#define NV11_SUBCHAN_DMA_SEMAPHORE                                   0x00000060
-#define NV11_SUBCHAN_SEMAPHORE_OFFSET                                0x00000064
-#define NV11_SUBCHAN_SEMAPHORE_ACQUIRE                               0x00000068
-#define NV11_SUBCHAN_SEMAPHORE_RELEASE                               0x0000006c
-#define NV40_SUBCHAN_YIELD                                           0x00000080
-
-/* NV_SW object class */
-#define NV_SW                                                        0x0000506e
-#define NV_SW_DMA_VBLSEM                                             0x0000018c
-#define NV_SW_VBLSEM_OFFSET                                          0x00000400
-#define NV_SW_VBLSEM_RELEASE_VALUE                                   0x00000404
-#define NV_SW_VBLSEM_RELEASE                                         0x00000408
-#define NV_SW_PAGE_FLIP                                              0x00000500
-
-#endif /* __NOUVEAU_DRV_H__ */
diff --git a/drivers/gpu/drm/nouveau/nouveau_encoder.h b/drivers/gpu/drm/nouveau/nouveau_encoder.h
index db07b978946e..6a17bf2ba9a4 100644
--- a/drivers/gpu/drm/nouveau/nouveau_encoder.h
+++ b/drivers/gpu/drm/nouveau/nouveau_encoder.h
@@ -27,23 +27,27 @@
 #ifndef __NOUVEAU_ENCODER_H__
 #define __NOUVEAU_ENCODER_H__
 
+#include <subdev/bios/dcb.h>
+
 #include <drm/drm_encoder_slave.h>
-#include "nouveau_drv.h"
+#include "nv04_display.h"
 
 #define NV_DPMS_CLEARED 0x80
 
+struct nouveau_i2c_port;
+
 struct dp_train_func {
-	void (*link_set)(struct drm_device *, struct dcb_entry *, int crtc,
+	void (*link_set)(struct drm_device *, struct dcb_output *, int crtc,
 			 int nr, u32 bw, bool enhframe);
-	void (*train_set)(struct drm_device *, struct dcb_entry *, u8 pattern);
-	void (*train_adj)(struct drm_device *, struct dcb_entry *,
+	void (*train_set)(struct drm_device *, struct dcb_output *, u8 pattern);
+	void (*train_adj)(struct drm_device *, struct dcb_output *,
 			  u8 lane, u8 swing, u8 preem);
 };
 
 struct nouveau_encoder {
 	struct drm_encoder_slave base;
 
-	struct dcb_entry *dcb;
+	struct dcb_output *dcb;
 	int or;
 
 	/* different to drm_encoder.crtc, this reflects what's
@@ -87,18 +91,16 @@ get_slave_funcs(struct drm_encoder *enc)
 }
 
 /* nouveau_dp.c */
-int nouveau_dp_auxch(struct nouveau_i2c_chan *auxch, int cmd, int addr,
-		     uint8_t *data, int data_nr);
 bool nouveau_dp_detect(struct drm_encoder *);
 void nouveau_dp_dpms(struct drm_encoder *, int mode, u32 datarate,
 		     struct dp_train_func *);
-u8 *nouveau_dp_bios_data(struct drm_device *, struct dcb_entry *, u8 **);
+u8 *nouveau_dp_bios_data(struct drm_device *, struct dcb_output *, u8 **);
 
 struct nouveau_connector *
 nouveau_encoder_connector_get(struct nouveau_encoder *encoder);
-int nv50_sor_create(struct drm_connector *, struct dcb_entry *);
+int nv50_sor_create(struct drm_connector *, struct dcb_output *);
 void nv50_sor_dp_calc_tu(struct drm_device *, int, int, u32, u32);
-int nv50_dac_create(struct drm_connector *, struct dcb_entry *);
+int nv50_dac_create(struct drm_connector *, struct dcb_output *);
 
 
 #endif /* __NOUVEAU_ENCODER_H__ */
diff --git a/drivers/gpu/drm/nouveau/nouveau_fb.h b/drivers/gpu/drm/nouveau/nouveau_fb.h
deleted file mode 100644
index f3fb649fe454..000000000000
--- a/drivers/gpu/drm/nouveau/nouveau_fb.h
+++ /dev/null
@@ -1,47 +0,0 @@
-/*
- * Copyright (C) 2008 Maarten Maathuis.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining
- * a copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sublicense, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial
- * portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
- * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
- * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
- * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
- * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-
-#ifndef __NOUVEAU_FB_H__
-#define __NOUVEAU_FB_H__
-
-struct nouveau_framebuffer {
-	struct drm_framebuffer base;
-	struct nouveau_bo *nvbo;
-	struct nouveau_vma vma;
-	u32 r_dma;
-	u32 r_format;
-	u32 r_pitch;
-};
-
-static inline struct nouveau_framebuffer *
-nouveau_framebuffer(struct drm_framebuffer *fb)
-{
-	return container_of(fb, struct nouveau_framebuffer, base);
-}
-
-int nouveau_framebuffer_init(struct drm_device *dev, struct nouveau_framebuffer *nouveau_fb,
-			     struct drm_mode_fb_cmd2 *mode_cmd, struct nouveau_bo *nvbo);
-#endif /* __NOUVEAU_FB_H__ */
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
index 7e41a4006087..67a1a069de28 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
@@ -42,19 +42,30 @@
 #include <drm/drm_crtc.h>
 #include <drm/drm_crtc_helper.h>
 #include <drm/drm_fb_helper.h>
-#include "nouveau_drv.h"
-#include <drm/nouveau_drm.h>
-#include "nouveau_crtc.h"
-#include "nouveau_fb.h"
+
+#include "nouveau_drm.h"
+#include "nouveau_gem.h"
+#include "nouveau_bo.h"
 #include "nouveau_fbcon.h"
-#include "nouveau_dma.h"
+#include "nouveau_chan.h"
+
+#include "nouveau_crtc.h"
+
+#include <core/client.h>
+#include <core/device.h>
+
+#include <subdev/fb.h>
+
+MODULE_PARM_DESC(nofbaccel, "Disable fbcon acceleration");
+static int nouveau_nofbaccel = 0;
+module_param_named(nofbaccel, nouveau_nofbaccel, int, 0400);
 
 static void
 nouveau_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
 {
-	struct nouveau_fbdev *nfbdev = info->par;
-	struct drm_device *dev = nfbdev->dev;
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_fbdev *fbcon = info->par;
+	struct nouveau_drm *drm = nouveau_drm(fbcon->dev);
+	struct nouveau_device *device = nv_device(drm->device);
 	int ret;
 
 	if (info->state != FBINFO_STATE_RUNNING)
@@ -62,15 +73,15 @@ nouveau_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
 
 	ret = -ENODEV;
 	if (!in_interrupt() && !(info->flags & FBINFO_HWACCEL_DISABLED) &&
-	    mutex_trylock(&dev_priv->channel->mutex)) {
-		if (dev_priv->card_type < NV_50)
+	    mutex_trylock(&drm->client.mutex)) {
+		if (device->card_type < NV_50)
 			ret = nv04_fbcon_fillrect(info, rect);
 		else
-		if (dev_priv->card_type < NV_C0)
+		if (device->card_type < NV_C0)
 			ret = nv50_fbcon_fillrect(info, rect);
 		else
 			ret = nvc0_fbcon_fillrect(info, rect);
-		mutex_unlock(&dev_priv->channel->mutex);
+		mutex_unlock(&drm->client.mutex);
 	}
 
 	if (ret == 0)
@@ -84,9 +95,9 @@ nouveau_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
 static void
 nouveau_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *image)
 {
-	struct nouveau_fbdev *nfbdev = info->par;
-	struct drm_device *dev = nfbdev->dev;
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_fbdev *fbcon = info->par;
+	struct nouveau_drm *drm = nouveau_drm(fbcon->dev);
+	struct nouveau_device *device = nv_device(drm->device);
 	int ret;
 
 	if (info->state != FBINFO_STATE_RUNNING)
@@ -94,15 +105,15 @@ nouveau_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *image)
 
 	ret = -ENODEV;
 	if (!in_interrupt() && !(info->flags & FBINFO_HWACCEL_DISABLED) &&
-	    mutex_trylock(&dev_priv->channel->mutex)) {
-		if (dev_priv->card_type < NV_50)
+	    mutex_trylock(&drm->client.mutex)) {
+		if (device->card_type < NV_50)
 			ret = nv04_fbcon_copyarea(info, image);
 		else
-		if (dev_priv->card_type < NV_C0)
+		if (device->card_type < NV_C0)
 			ret = nv50_fbcon_copyarea(info, image);
 		else
 			ret = nvc0_fbcon_copyarea(info, image);
-		mutex_unlock(&dev_priv->channel->mutex);
+		mutex_unlock(&drm->client.mutex);
 	}
 
 	if (ret == 0)
@@ -116,9 +127,9 @@ nouveau_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *image)
 static void
 nouveau_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
 {
-	struct nouveau_fbdev *nfbdev = info->par;
-	struct drm_device *dev = nfbdev->dev;
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_fbdev *fbcon = info->par;
+	struct nouveau_drm *drm = nouveau_drm(fbcon->dev);
+	struct nouveau_device *device = nv_device(drm->device);
 	int ret;
 
 	if (info->state != FBINFO_STATE_RUNNING)
@@ -126,15 +137,15 @@ nouveau_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
 
 	ret = -ENODEV;
 	if (!in_interrupt() && !(info->flags & FBINFO_HWACCEL_DISABLED) &&
-	    mutex_trylock(&dev_priv->channel->mutex)) {
-		if (dev_priv->card_type < NV_50)
+	    mutex_trylock(&drm->client.mutex)) {
+		if (device->card_type < NV_50)
 			ret = nv04_fbcon_imageblit(info, image);
 		else
-		if (dev_priv->card_type < NV_C0)
+		if (device->card_type < NV_C0)
 			ret = nv50_fbcon_imageblit(info, image);
 		else
 			ret = nvc0_fbcon_imageblit(info, image);
-		mutex_unlock(&dev_priv->channel->mutex);
+		mutex_unlock(&drm->client.mutex);
 	}
 
 	if (ret == 0)
@@ -148,10 +159,9 @@ nouveau_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
 static int
 nouveau_fbcon_sync(struct fb_info *info)
 {
-	struct nouveau_fbdev *nfbdev = info->par;
-	struct drm_device *dev = nfbdev->dev;
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct nouveau_channel *chan = dev_priv->channel;
+	struct nouveau_fbdev *fbcon = info->par;
+	struct nouveau_drm *drm = nouveau_drm(fbcon->dev);
+	struct nouveau_channel *chan = drm->channel;
 	int ret;
 
 	if (!chan || !chan->accel_done || in_interrupt() ||
@@ -159,11 +169,11 @@ nouveau_fbcon_sync(struct fb_info *info)
 	    info->flags & FBINFO_HWACCEL_DISABLED)
 		return 0;
 
-	if (!mutex_trylock(&chan->mutex))
+	if (!mutex_trylock(&drm->client.mutex))
 		return 0;
 
 	ret = nouveau_channel_idle(chan);
-	mutex_unlock(&chan->mutex);
+	mutex_unlock(&drm->client.mutex);
 	if (ret) {
 		nouveau_fbcon_gpu_lockup(info);
 		return 0;
@@ -223,9 +233,9 @@ static void nouveau_fbcon_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
 }
 
 static void
-nouveau_fbcon_zfill(struct drm_device *dev, struct nouveau_fbdev *nfbdev)
+nouveau_fbcon_zfill(struct drm_device *dev, struct nouveau_fbdev *fbcon)
 {
-	struct fb_info *info = nfbdev->helper.fbdev;
+	struct fb_info *info = fbcon->helper.fbdev;
 	struct fb_fillrect rect;
 
 	/* Clear the entire fbcon.  The drm will program every connector
@@ -241,11 +251,12 @@ nouveau_fbcon_zfill(struct drm_device *dev, struct nouveau_fbdev *nfbdev)
 }
 
 static int
-nouveau_fbcon_create(struct nouveau_fbdev *nfbdev,
+nouveau_fbcon_create(struct nouveau_fbdev *fbcon,
 		     struct drm_fb_helper_surface_size *sizes)
 {
-	struct drm_device *dev = nfbdev->dev;
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct drm_device *dev = fbcon->dev;
+	struct nouveau_drm *drm = nouveau_drm(dev);
+	struct nouveau_device *device = nv_device(drm->device);
 	struct fb_info *info;
 	struct drm_framebuffer *fb;
 	struct nouveau_framebuffer *nouveau_fb;
@@ -253,7 +264,6 @@ nouveau_fbcon_create(struct nouveau_fbdev *nfbdev,
 	struct nouveau_bo *nvbo;
 	struct drm_mode_fb_cmd2 mode_cmd;
 	struct pci_dev *pdev = dev->pdev;
-	struct device *device = &pdev->dev;
 	int size, ret;
 
 	mode_cmd.width = sizes->surface_width;
@@ -271,37 +281,38 @@ nouveau_fbcon_create(struct nouveau_fbdev *nfbdev,
 	ret = nouveau_gem_new(dev, size, 0, NOUVEAU_GEM_DOMAIN_VRAM,
 			      0, 0x0000, &nvbo);
 	if (ret) {
-		NV_ERROR(dev, "failed to allocate framebuffer\n");
+		NV_ERROR(drm, "failed to allocate framebuffer\n");
 		goto out;
 	}
 
 	ret = nouveau_bo_pin(nvbo, TTM_PL_FLAG_VRAM);
 	if (ret) {
-		NV_ERROR(dev, "failed to pin fb: %d\n", ret);
+		NV_ERROR(drm, "failed to pin fb: %d\n", ret);
 		nouveau_bo_ref(NULL, &nvbo);
 		goto out;
 	}
 
 	ret = nouveau_bo_map(nvbo);
 	if (ret) {
-		NV_ERROR(dev, "failed to map fb: %d\n", ret);
+		NV_ERROR(drm, "failed to map fb: %d\n", ret);
 		nouveau_bo_unpin(nvbo);
 		nouveau_bo_ref(NULL, &nvbo);
 		goto out;
 	}
 
-	chan = nouveau_nofbaccel ? NULL : dev_priv->channel;
-	if (chan && dev_priv->card_type >= NV_50) {
-		ret = nouveau_bo_vma_add(nvbo, chan->vm, &nfbdev->nouveau_fb.vma);
+	chan = nouveau_nofbaccel ? NULL : drm->channel;
+	if (chan && device->card_type >= NV_50) {
+		ret = nouveau_bo_vma_add(nvbo, nv_client(chan->cli)->vm,
+					&fbcon->nouveau_fb.vma);
 		if (ret) {
-			NV_ERROR(dev, "failed to map fb into chan: %d\n", ret);
+			NV_ERROR(drm, "failed to map fb into chan: %d\n", ret);
 			chan = NULL;
 		}
 	}
 
 	mutex_lock(&dev->struct_mutex);
 
-	info = framebuffer_alloc(0, device);
+	info = framebuffer_alloc(0, &pdev->dev);
 	if (!info) {
 		ret = -ENOMEM;
 		goto out_unref;
@@ -313,16 +324,16 @@ nouveau_fbcon_create(struct nouveau_fbdev *nfbdev,
 		goto out_unref;
 	}
 
-	info->par = nfbdev;
+	info->par = fbcon;
 
-	nouveau_framebuffer_init(dev, &nfbdev->nouveau_fb, &mode_cmd, nvbo);
+	nouveau_framebuffer_init(dev, &fbcon->nouveau_fb, &mode_cmd, nvbo);
 
-	nouveau_fb = &nfbdev->nouveau_fb;
+	nouveau_fb = &fbcon->nouveau_fb;
 	fb = &nouveau_fb->base;
 
 	/* setup helper */
-	nfbdev->helper.fb = fb;
-	nfbdev->helper.fbdev = info;
+	fbcon->helper.fb = fb;
+	fbcon->helper.fbdev = info;
 
 	strcpy(info->fix.id, "nouveaufb");
 	if (nouveau_nofbaccel)
@@ -341,25 +352,18 @@ nouveau_fbcon_create(struct nouveau_fbdev *nfbdev,
 	info->screen_size = size;
 
 	drm_fb_helper_fill_fix(info, fb->pitches[0], fb->depth);
-	drm_fb_helper_fill_var(info, &nfbdev->helper, sizes->fb_width, sizes->fb_height);
-
-	/* Set aperture base/size for vesafb takeover */
-	info->apertures = dev_priv->apertures;
-	if (!info->apertures) {
-		ret = -ENOMEM;
-		goto out_unref;
-	}
+	drm_fb_helper_fill_var(info, &fbcon->helper, sizes->fb_width, sizes->fb_height);
 
 	/* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */
 
 	mutex_unlock(&dev->struct_mutex);
 
-	if (dev_priv->channel && !nouveau_nofbaccel) {
+	if (chan) {
 		ret = -ENODEV;
-		if (dev_priv->card_type < NV_50)
+		if (device->card_type < NV_50)
 			ret = nv04_fbcon_accel_init(info);
 		else
-		if (dev_priv->card_type < NV_C0)
+		if (device->card_type < NV_C0)
 			ret = nv50_fbcon_accel_init(info);
 		else
 			ret = nvc0_fbcon_accel_init(info);
@@ -368,13 +372,12 @@ nouveau_fbcon_create(struct nouveau_fbdev *nfbdev,
 			info->fbops = &nouveau_fbcon_ops;
 	}
 
-	nouveau_fbcon_zfill(dev, nfbdev);
+	nouveau_fbcon_zfill(dev, fbcon);
 
 	/* To allow resizeing without swapping buffers */
-	NV_INFO(dev, "allocated %dx%d fb: 0x%lx, bo %p\n",
-						nouveau_fb->base.width,
-						nouveau_fb->base.height,
-						nvbo->bo.offset, nvbo);
+	NV_INFO(drm, "allocated %dx%d fb: 0x%lx, bo %p\n",
+		nouveau_fb->base.width, nouveau_fb->base.height,
+		nvbo->bo.offset, nvbo);
 
 	vga_switcheroo_client_fb_set(dev->pdev, info);
 	return 0;
@@ -389,12 +392,12 @@ static int
 nouveau_fbcon_find_or_create_single(struct drm_fb_helper *helper,
 				    struct drm_fb_helper_surface_size *sizes)
 {
-	struct nouveau_fbdev *nfbdev = (struct nouveau_fbdev *)helper;
+	struct nouveau_fbdev *fbcon = (struct nouveau_fbdev *)helper;
 	int new_fb = 0;
 	int ret;
 
 	if (!helper->fb) {
-		ret = nouveau_fbcon_create(nfbdev, sizes);
+		ret = nouveau_fbcon_create(fbcon, sizes);
 		if (ret)
 			return ret;
 		new_fb = 1;
@@ -405,18 +408,18 @@ nouveau_fbcon_find_or_create_single(struct drm_fb_helper *helper,
 void
 nouveau_fbcon_output_poll_changed(struct drm_device *dev)
 {
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	drm_fb_helper_hotplug_event(&dev_priv->nfbdev->helper);
+	struct nouveau_drm *drm = nouveau_drm(dev);
+	drm_fb_helper_hotplug_event(&drm->fbcon->helper);
 }
 
 static int
-nouveau_fbcon_destroy(struct drm_device *dev, struct nouveau_fbdev *nfbdev)
+nouveau_fbcon_destroy(struct drm_device *dev, struct nouveau_fbdev *fbcon)
 {
-	struct nouveau_framebuffer *nouveau_fb = &nfbdev->nouveau_fb;
+	struct nouveau_framebuffer *nouveau_fb = &fbcon->nouveau_fb;
 	struct fb_info *info;
 
-	if (nfbdev->helper.fbdev) {
-		info = nfbdev->helper.fbdev;
+	if (fbcon->helper.fbdev) {
+		info = fbcon->helper.fbdev;
 		unregister_framebuffer(info);
 		if (info->cmap.len)
 			fb_dealloc_cmap(&info->cmap);
@@ -429,17 +432,17 @@ nouveau_fbcon_destroy(struct drm_device *dev, struct nouveau_fbdev *nfbdev)
 		drm_gem_object_unreference_unlocked(nouveau_fb->nvbo->gem);
 		nouveau_fb->nvbo = NULL;
 	}
-	drm_fb_helper_fini(&nfbdev->helper);
+	drm_fb_helper_fini(&fbcon->helper);
 	drm_framebuffer_cleanup(&nouveau_fb->base);
 	return 0;
 }
 
 void nouveau_fbcon_gpu_lockup(struct fb_info *info)
 {
-	struct nouveau_fbdev *nfbdev = info->par;
-	struct drm_device *dev = nfbdev->dev;
+	struct nouveau_fbdev *fbcon = info->par;
+	struct nouveau_drm *drm = nouveau_drm(fbcon->dev);
 
-	NV_ERROR(dev, "GPU lockup - switching to software fbcon\n");
+	NV_ERROR(drm, "GPU lockup - switching to software fbcon\n");
 	info->flags |= FBINFO_HWACCEL_DISABLED;
 }
 
@@ -450,74 +453,81 @@ static struct drm_fb_helper_funcs nouveau_fbcon_helper_funcs = {
 };
 
 
-int nouveau_fbcon_init(struct drm_device *dev)
+int
+nouveau_fbcon_init(struct drm_device *dev)
 {
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct nouveau_fbdev *nfbdev;
+	struct nouveau_drm *drm = nouveau_drm(dev);
+	struct nouveau_fb *pfb = nouveau_fb(drm->device);
+	struct nouveau_fbdev *fbcon;
 	int preferred_bpp;
 	int ret;
 
-	nfbdev = kzalloc(sizeof(struct nouveau_fbdev), GFP_KERNEL);
-	if (!nfbdev)
+	if (!dev->mode_config.num_crtc)
+		return 0;
+
+	fbcon = kzalloc(sizeof(struct nouveau_fbdev), GFP_KERNEL);
+	if (!fbcon)
 		return -ENOMEM;
 
-	nfbdev->dev = dev;
-	dev_priv->nfbdev = nfbdev;
-	nfbdev->helper.funcs = &nouveau_fbcon_helper_funcs;
+	fbcon->dev = dev;
+	drm->fbcon = fbcon;
+	fbcon->helper.funcs = &nouveau_fbcon_helper_funcs;
 
-	ret = drm_fb_helper_init(dev, &nfbdev->helper,
+	ret = drm_fb_helper_init(dev, &fbcon->helper,
 				 dev->mode_config.num_crtc, 4);
 	if (ret) {
-		kfree(nfbdev);
+		kfree(fbcon);
 		return ret;
 	}
 
-	drm_fb_helper_single_add_all_connectors(&nfbdev->helper);
+	drm_fb_helper_single_add_all_connectors(&fbcon->helper);
 
-	if (dev_priv->vram_size <= 32 * 1024 * 1024)
+	if (pfb->ram.size <= 32 * 1024 * 1024)
 		preferred_bpp = 8;
-	else if (dev_priv->vram_size <= 64 * 1024 * 1024)
+	else
+	if (pfb->ram.size <= 64 * 1024 * 1024)
 		preferred_bpp = 16;
 	else
 		preferred_bpp = 32;
 
-	drm_fb_helper_initial_config(&nfbdev->helper, preferred_bpp);
+	drm_fb_helper_initial_config(&fbcon->helper, preferred_bpp);
 	return 0;
 }
 
-void nouveau_fbcon_fini(struct drm_device *dev)
+void
+nouveau_fbcon_fini(struct drm_device *dev)
 {
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_drm *drm = nouveau_drm(dev);
 
-	if (!dev_priv->nfbdev)
+	if (!drm->fbcon)
 		return;
 
-	nouveau_fbcon_destroy(dev, dev_priv->nfbdev);
-	kfree(dev_priv->nfbdev);
-	dev_priv->nfbdev = NULL;
+	nouveau_fbcon_destroy(dev, drm->fbcon);
+	kfree(drm->fbcon);
+	drm->fbcon = NULL;
 }
 
 void nouveau_fbcon_save_disable_accel(struct drm_device *dev)
 {
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_drm *drm = nouveau_drm(dev);
 
-	dev_priv->nfbdev->saved_flags = dev_priv->nfbdev->helper.fbdev->flags;
-	dev_priv->nfbdev->helper.fbdev->flags |= FBINFO_HWACCEL_DISABLED;
+	drm->fbcon->saved_flags = drm->fbcon->helper.fbdev->flags;
+	drm->fbcon->helper.fbdev->flags |= FBINFO_HWACCEL_DISABLED;
 }
 
 void nouveau_fbcon_restore_accel(struct drm_device *dev)
 {
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	dev_priv->nfbdev->helper.fbdev->flags = dev_priv->nfbdev->saved_flags;
+	struct nouveau_drm *drm = nouveau_drm(dev);
+	drm->fbcon->helper.fbdev->flags = drm->fbcon->saved_flags;
 }
 
 void nouveau_fbcon_set_suspend(struct drm_device *dev, int state)
 {
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_drm *drm = nouveau_drm(dev);
 	console_lock();
 	if (state == 0)
 		nouveau_fbcon_save_disable_accel(dev);
-	fb_set_suspend(dev_priv->nfbdev->helper.fbdev, state);
+	fb_set_suspend(drm->fbcon->helper.fbdev, state);
 	if (state == 1)
 		nouveau_fbcon_restore_accel(dev);
 	console_unlock();
@@ -525,6 +535,6 @@ void nouveau_fbcon_set_suspend(struct drm_device *dev, int state)
 
 void nouveau_fbcon_zfill_all(struct drm_device *dev)
 {
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	nouveau_fbcon_zfill(dev, dev_priv->nfbdev);
+	struct nouveau_drm *drm = nouveau_drm(dev);
+	nouveau_fbcon_zfill(dev, drm->fbcon);
 }
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.h b/drivers/gpu/drm/nouveau/nouveau_fbcon.h
index 1f2d27893438..fdfc0c94fbcc 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fbcon.h
+++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.h
@@ -29,7 +29,8 @@
 
 #include <drm/drm_fb_helper.h>
 
-#include "nouveau_fb.h"
+#include "nouveau_display.h"
+
 struct nouveau_fbdev {
 	struct drm_fb_helper helper;
 	struct nouveau_framebuffer nouveau_fb;
diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c
index 614df7b958ca..1d049be79f74 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fence.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fence.c
@@ -29,11 +29,9 @@
 #include <linux/ktime.h>
 #include <linux/hrtimer.h>
 
-#include "nouveau_drv.h"
-#include "nouveau_ramht.h"
-#include "nouveau_fence.h"
-#include "nouveau_software.h"
+#include "nouveau_drm.h"
 #include "nouveau_dma.h"
+#include "nouveau_fence.h"
 
 void
 nouveau_fence_context_del(struct nouveau_fence_chan *fctx)
@@ -53,16 +51,16 @@ nouveau_fence_context_del(struct nouveau_fence_chan *fctx)
 void
 nouveau_fence_context_new(struct nouveau_fence_chan *fctx)
 {
+	INIT_LIST_HEAD(&fctx->flip);
 	INIT_LIST_HEAD(&fctx->pending);
 	spin_lock_init(&fctx->lock);
 }
 
-void
+static void
 nouveau_fence_update(struct nouveau_channel *chan)
 {
-	struct drm_device *dev = chan->dev;
-	struct nouveau_fence_priv *priv = nv_engine(dev, NVOBJ_ENGINE_FENCE);
-	struct nouveau_fence_chan *fctx = chan->engctx[NVOBJ_ENGINE_FENCE];
+	struct nouveau_fence_priv *priv = chan->drm->fence;
+	struct nouveau_fence_chan *fctx = chan->fence;
 	struct nouveau_fence *fence, *fnext;
 
 	spin_lock(&fctx->lock);
@@ -82,9 +80,8 @@ nouveau_fence_update(struct nouveau_channel *chan)
 int
 nouveau_fence_emit(struct nouveau_fence *fence, struct nouveau_channel *chan)
 {
-	struct drm_device *dev = chan->dev;
-	struct nouveau_fence_priv *priv = nv_engine(dev, NVOBJ_ENGINE_FENCE);
-	struct nouveau_fence_chan *fctx = chan->engctx[NVOBJ_ENGINE_FENCE];
+	struct nouveau_fence_priv *priv = chan->drm->fence;
+	struct nouveau_fence_chan *fctx = chan->fence;
 	int ret;
 
 	fence->channel  = chan;
@@ -146,19 +143,17 @@ nouveau_fence_wait(struct nouveau_fence *fence, bool lazy, bool intr)
 int
 nouveau_fence_sync(struct nouveau_fence *fence, struct nouveau_channel *chan)
 {
-	struct drm_device *dev = chan->dev;
-	struct nouveau_fence_priv *priv = nv_engine(dev, NVOBJ_ENGINE_FENCE);
+	struct nouveau_fence_priv *priv = chan->drm->fence;
 	struct nouveau_channel *prev;
 	int ret = 0;
 
-	prev = fence ? nouveau_channel_get_unlocked(fence->channel) : NULL;
+	prev = fence ? fence->channel : NULL;
 	if (prev) {
 		if (unlikely(prev != chan && !nouveau_fence_done(fence))) {
 			ret = priv->sync(fence, prev, chan);
 			if (unlikely(ret))
 				ret = nouveau_fence_wait(fence, true, false);
 		}
-		nouveau_channel_put_unlocked(&prev);
 	}
 
 	return ret;
@@ -192,7 +187,7 @@ nouveau_fence_new(struct nouveau_channel *chan, struct nouveau_fence **pfence)
 	struct nouveau_fence *fence;
 	int ret = 0;
 
-	if (unlikely(!chan->engctx[NVOBJ_ENGINE_FENCE]))
+	if (unlikely(!chan->fence))
 		return -ENODEV;
 
 	fence = kzalloc(sizeof(*fence), GFP_KERNEL);
diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.h b/drivers/gpu/drm/nouveau/nouveau_fence.h
index 82ba733393ae..bedafd1c9539 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fence.h
+++ b/drivers/gpu/drm/nouveau/nouveau_fence.h
@@ -1,6 +1,8 @@
 #ifndef __NOUVEAU_FENCE_H__
 #define __NOUVEAU_FENCE_H__
 
+struct nouveau_drm;
+
 struct nouveau_fence {
 	struct list_head head;
 	struct kref kref;
@@ -22,31 +24,48 @@ int  nouveau_fence_emit(struct nouveau_fence *, struct nouveau_channel *);
 bool nouveau_fence_done(struct nouveau_fence *);
 int  nouveau_fence_wait(struct nouveau_fence *, bool lazy, bool intr);
 int  nouveau_fence_sync(struct nouveau_fence *, struct nouveau_channel *);
-void nouveau_fence_idle(struct nouveau_channel *);
-void nouveau_fence_update(struct nouveau_channel *);
 
 struct nouveau_fence_chan {
 	struct list_head pending;
+	struct list_head flip;
+
 	spinlock_t lock;
 	u32 sequence;
 };
 
 struct nouveau_fence_priv {
-	struct nouveau_exec_engine engine;
-	int (*emit)(struct nouveau_fence *);
-	int (*sync)(struct nouveau_fence *, struct nouveau_channel *,
-		    struct nouveau_channel *);
-	u32 (*read)(struct nouveau_channel *);
+	void (*dtor)(struct nouveau_drm *);
+	bool (*suspend)(struct nouveau_drm *);
+	void (*resume)(struct nouveau_drm *);
+	int  (*context_new)(struct nouveau_channel *);
+	void (*context_del)(struct nouveau_channel *);
+	int  (*emit)(struct nouveau_fence *);
+	int  (*sync)(struct nouveau_fence *, struct nouveau_channel *,
+		     struct nouveau_channel *);
+	u32  (*read)(struct nouveau_channel *);
 };
 
+#define nouveau_fence(drm) ((struct nouveau_fence_priv *)(drm)->fence)
+
 void nouveau_fence_context_new(struct nouveau_fence_chan *);
 void nouveau_fence_context_del(struct nouveau_fence_chan *);
 
-int nv04_fence_create(struct drm_device *dev);
+int nv04_fence_create(struct nouveau_drm *);
 int nv04_fence_mthd(struct nouveau_channel *, u32, u32, u32);
 
-int nv10_fence_create(struct drm_device *dev);
-int nv84_fence_create(struct drm_device *dev);
-int nvc0_fence_create(struct drm_device *dev);
+int  nv10_fence_emit(struct nouveau_fence *);
+int  nv17_fence_sync(struct nouveau_fence *, struct nouveau_channel *,
+		     struct nouveau_channel *);
+u32  nv10_fence_read(struct nouveau_channel *);
+void nv10_fence_context_del(struct nouveau_channel *);
+void nv10_fence_destroy(struct nouveau_drm *);
+int  nv10_fence_create(struct nouveau_drm *);
+
+int nv50_fence_create(struct nouveau_drm *);
+int nv84_fence_create(struct nouveau_drm *);
+int nvc0_fence_create(struct nouveau_drm *);
+u64 nvc0_fence_crtc(struct nouveau_channel *, int crtc);
+
+int nouveau_flip_complete(void *chan);
 
 #endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_fifo.h b/drivers/gpu/drm/nouveau/nouveau_fifo.h
deleted file mode 100644
index ce99cab2f257..000000000000
--- a/drivers/gpu/drm/nouveau/nouveau_fifo.h
+++ /dev/null
@@ -1,32 +0,0 @@
-#ifndef __NOUVEAU_FIFO_H__
-#define __NOUVEAU_FIFO_H__
-
-struct nouveau_fifo_priv {
-	struct nouveau_exec_engine base;
-	u32 channels;
-};
-
-struct nouveau_fifo_chan {
-};
-
-bool nv04_fifo_cache_pull(struct drm_device *, bool);
-void nv04_fifo_context_del(struct nouveau_channel *, int);
-int  nv04_fifo_fini(struct drm_device *, int, bool);
-int  nv04_fifo_init(struct drm_device *, int);
-void nv04_fifo_isr(struct drm_device *);
-void nv04_fifo_destroy(struct drm_device *, int);
-
-void nv50_fifo_playlist_update(struct drm_device *);
-void nv50_fifo_destroy(struct drm_device *, int);
-void nv50_fifo_tlb_flush(struct drm_device *, int);
-
-int  nv04_fifo_create(struct drm_device *);
-int  nv10_fifo_create(struct drm_device *);
-int  nv17_fifo_create(struct drm_device *);
-int  nv40_fifo_create(struct drm_device *);
-int  nv50_fifo_create(struct drm_device *);
-int  nv84_fifo_create(struct drm_device *);
-int  nvc0_fifo_create(struct drm_device *);
-int  nve0_fifo_create(struct drm_device *);
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
index 8461a4f5710f..5e2f52158f19 100644
--- a/drivers/gpu/drm/nouveau/nouveau_gem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
@@ -23,15 +23,18 @@
  * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
  *
  */
+
 #include <linux/dma-buf.h>
-#include <drm/drmP.h>
 
-#include "nouveau_drv.h"
-#include <drm/nouveau_drm.h>
+#include <subdev/fb.h>
+
+#include "nouveau_drm.h"
 #include "nouveau_dma.h"
 #include "nouveau_fence.h"
+#include "nouveau_abi16.h"
 
-#define nouveau_gem_pushbuf_sync(chan) 0
+#include "nouveau_ttm.h"
+#include "nouveau_gem.h"
 
 int
 nouveau_gem_object_new(struct drm_gem_object *gem)
@@ -66,19 +69,19 @@ nouveau_gem_object_del(struct drm_gem_object *gem)
 int
 nouveau_gem_object_open(struct drm_gem_object *gem, struct drm_file *file_priv)
 {
-	struct nouveau_fpriv *fpriv = nouveau_fpriv(file_priv);
+	struct nouveau_cli *cli = nouveau_cli(file_priv);
 	struct nouveau_bo *nvbo = nouveau_gem_object(gem);
 	struct nouveau_vma *vma;
 	int ret;
 
-	if (!fpriv->vm)
+	if (!cli->base.vm)
 		return 0;
 
 	ret = ttm_bo_reserve(&nvbo->bo, false, false, false, 0);
 	if (ret)
 		return ret;
 
-	vma = nouveau_bo_vma_find(nvbo, fpriv->vm);
+	vma = nouveau_bo_vma_find(nvbo, cli->base.vm);
 	if (!vma) {
 		vma = kzalloc(sizeof(*vma), GFP_KERNEL);
 		if (!vma) {
@@ -86,7 +89,7 @@ nouveau_gem_object_open(struct drm_gem_object *gem, struct drm_file *file_priv)
 			goto out;
 		}
 
-		ret = nouveau_bo_vma_add(nvbo, fpriv->vm, vma);
+		ret = nouveau_bo_vma_add(nvbo, cli->base.vm, vma);
 		if (ret) {
 			kfree(vma);
 			goto out;
@@ -103,19 +106,19 @@ out:
 void
 nouveau_gem_object_close(struct drm_gem_object *gem, struct drm_file *file_priv)
 {
-	struct nouveau_fpriv *fpriv = nouveau_fpriv(file_priv);
+	struct nouveau_cli *cli = nouveau_cli(file_priv);
 	struct nouveau_bo *nvbo = nouveau_gem_object(gem);
 	struct nouveau_vma *vma;
 	int ret;
 
-	if (!fpriv->vm)
+	if (!cli->base.vm)
 		return;
 
 	ret = ttm_bo_reserve(&nvbo->bo, false, false, false, 0);
 	if (ret)
 		return;
 
-	vma = nouveau_bo_vma_find(nvbo, fpriv->vm);
+	vma = nouveau_bo_vma_find(nvbo, cli->base.vm);
 	if (vma) {
 		if (--vma->refcount == 0) {
 			nouveau_bo_vma_del(nvbo, vma);
@@ -130,7 +133,7 @@ nouveau_gem_new(struct drm_device *dev, int size, int align, uint32_t domain,
 		uint32_t tile_mode, uint32_t tile_flags,
 		struct nouveau_bo **pnvbo)
 {
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_drm *drm = nouveau_drm(dev);
 	struct nouveau_bo *nvbo;
 	u32 flags = 0;
 	int ret;
@@ -154,7 +157,7 @@ nouveau_gem_new(struct drm_device *dev, int size, int align, uint32_t domain,
 	 */
 	nvbo->valid_domains = NOUVEAU_GEM_DOMAIN_VRAM |
 			      NOUVEAU_GEM_DOMAIN_GART;
-	if (dev_priv->card_type >= NV_50)
+	if (nv_device(drm->device)->card_type >= NV_50)
 		nvbo->valid_domains &= domain;
 
 	nvbo->gem = drm_gem_object_alloc(dev, nvbo->bo.mem.size);
@@ -172,7 +175,7 @@ static int
 nouveau_gem_info(struct drm_file *file_priv, struct drm_gem_object *gem,
 		 struct drm_nouveau_gem_info *rep)
 {
-	struct nouveau_fpriv *fpriv = nouveau_fpriv(file_priv);
+	struct nouveau_cli *cli = nouveau_cli(file_priv);
 	struct nouveau_bo *nvbo = nouveau_gem_object(gem);
 	struct nouveau_vma *vma;
 
@@ -182,8 +185,8 @@ nouveau_gem_info(struct drm_file *file_priv, struct drm_gem_object *gem,
 		rep->domain = NOUVEAU_GEM_DOMAIN_VRAM;
 
 	rep->offset = nvbo->bo.offset;
-	if (fpriv->vm) {
-		vma = nouveau_bo_vma_find(nvbo, fpriv->vm);
+	if (cli->base.vm) {
+		vma = nouveau_bo_vma_find(nvbo, cli->base.vm);
 		if (!vma)
 			return -EINVAL;
 
@@ -201,15 +204,16 @@ int
 nouveau_gem_ioctl_new(struct drm_device *dev, void *data,
 		      struct drm_file *file_priv)
 {
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_drm *drm = nouveau_drm(dev);
+	struct nouveau_fb *pfb = nouveau_fb(drm->device);
 	struct drm_nouveau_gem_new *req = data;
 	struct nouveau_bo *nvbo = NULL;
 	int ret = 0;
 
-	dev_priv->ttm.bdev.dev_mapping = dev->dev_mapping;
+	drm->ttm.bdev.dev_mapping = drm->dev->dev_mapping;
 
-	if (!dev_priv->engine.vram.flags_valid(dev, req->info.tile_flags)) {
-		NV_ERROR(dev, "bad page flags: 0x%08x\n", req->info.tile_flags);
+	if (!pfb->memtype_valid(pfb, req->info.tile_flags)) {
+		NV_ERROR(drm, "bad page flags: 0x%08x\n", req->info.tile_flags);
 		return -EINVAL;
 	}
 
@@ -311,16 +315,16 @@ validate_init(struct nouveau_channel *chan, struct drm_file *file_priv,
 	      struct drm_nouveau_gem_pushbuf_bo *pbbo,
 	      int nr_buffers, struct validate_op *op)
 {
-	struct drm_device *dev = chan->dev;
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct drm_device *dev = chan->drm->dev;
+	struct nouveau_drm *drm = nouveau_drm(dev);
 	uint32_t sequence;
 	int trycnt = 0;
 	int ret, i;
 
-	sequence = atomic_add_return(1, &dev_priv->ttm.validate_sequence);
+	sequence = atomic_add_return(1, &drm->ttm.validate_sequence);
 retry:
 	if (++trycnt > 100000) {
-		NV_ERROR(dev, "%s failed and gave up.\n", __func__);
+		NV_ERROR(drm, "%s failed and gave up.\n", __func__);
 		return -EINVAL;
 	}
 
@@ -331,14 +335,14 @@ retry:
 
 		gem = drm_gem_object_lookup(dev, file_priv, b->handle);
 		if (!gem) {
-			NV_ERROR(dev, "Unknown handle 0x%08x\n", b->handle);
+			NV_ERROR(drm, "Unknown handle 0x%08x\n", b->handle);
 			validate_fini(op, NULL);
 			return -ENOENT;
 		}
 		nvbo = gem->driver_private;
 
 		if (nvbo->reserved_by && nvbo->reserved_by == file_priv) {
-			NV_ERROR(dev, "multiple instances of buffer %d on "
+			NV_ERROR(drm, "multiple instances of buffer %d on "
 				      "validation list\n", b->handle);
 			drm_gem_object_unreference_unlocked(gem);
 			validate_fini(op, NULL);
@@ -353,7 +357,7 @@ retry:
 			drm_gem_object_unreference_unlocked(gem);
 			if (unlikely(ret)) {
 				if (ret != -ERESTARTSYS)
-					NV_ERROR(dev, "fail reserve\n");
+					NV_ERROR(drm, "fail reserve\n");
 				return ret;
 			}
 			goto retry;
@@ -372,7 +376,7 @@ retry:
 		if (b->valid_domains & NOUVEAU_GEM_DOMAIN_GART)
 			list_add_tail(&nvbo->entry, &op->gart_list);
 		else {
-			NV_ERROR(dev, "invalid valid domains: 0x%08x\n",
+			NV_ERROR(drm, "invalid valid domains: 0x%08x\n",
 				 b->valid_domains);
 			list_add_tail(&nvbo->entry, &op->both_list);
 			validate_fini(op, NULL);
@@ -406,10 +410,9 @@ static int
 validate_list(struct nouveau_channel *chan, struct list_head *list,
 	      struct drm_nouveau_gem_pushbuf_bo *pbbo, uint64_t user_pbbo_ptr)
 {
-	struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
+	struct nouveau_drm *drm = chan->drm;
 	struct drm_nouveau_gem_pushbuf_bo __user *upbbo =
 				(void __force __user *)(uintptr_t)user_pbbo_ptr;
-	struct drm_device *dev = chan->dev;
 	struct nouveau_bo *nvbo;
 	int ret, relocs = 0;
 
@@ -418,7 +421,7 @@ validate_list(struct nouveau_channel *chan, struct list_head *list,
 
 		ret = validate_sync(chan, nvbo);
 		if (unlikely(ret)) {
-			NV_ERROR(dev, "fail pre-validate sync\n");
+			NV_ERROR(drm, "fail pre-validate sync\n");
 			return ret;
 		}
 
@@ -426,24 +429,24 @@ validate_list(struct nouveau_channel *chan, struct list_head *list,
 					     b->write_domains,
 					     b->valid_domains);
 		if (unlikely(ret)) {
-			NV_ERROR(dev, "fail set_domain\n");
+			NV_ERROR(drm, "fail set_domain\n");
 			return ret;
 		}
 
 		ret = nouveau_bo_validate(nvbo, true, false, false);
 		if (unlikely(ret)) {
 			if (ret != -ERESTARTSYS)
-				NV_ERROR(dev, "fail ttm_validate\n");
+				NV_ERROR(drm, "fail ttm_validate\n");
 			return ret;
 		}
 
 		ret = validate_sync(chan, nvbo);
 		if (unlikely(ret)) {
-			NV_ERROR(dev, "fail post-validate sync\n");
+			NV_ERROR(drm, "fail post-validate sync\n");
 			return ret;
 		}
 
-		if (dev_priv->card_type < NV_50) {
+		if (nv_device(drm->device)->card_type < NV_50) {
 			if (nvbo->bo.offset == b->presumed.offset &&
 			    ((nvbo->bo.mem.mem_type == TTM_PL_VRAM &&
 			      b->presumed.domain & NOUVEAU_GEM_DOMAIN_VRAM) ||
@@ -475,7 +478,7 @@ nouveau_gem_pushbuf_validate(struct nouveau_channel *chan,
 			     uint64_t user_buffers, int nr_buffers,
 			     struct validate_op *op, int *apply_relocs)
 {
-	struct drm_device *dev = chan->dev;
+	struct nouveau_drm *drm = chan->drm;
 	int ret, relocs = 0;
 
 	INIT_LIST_HEAD(&op->vram_list);
@@ -488,14 +491,14 @@ nouveau_gem_pushbuf_validate(struct nouveau_channel *chan,
 	ret = validate_init(chan, file_priv, pbbo, nr_buffers, op);
 	if (unlikely(ret)) {
 		if (ret != -ERESTARTSYS)
-			NV_ERROR(dev, "validate_init\n");
+			NV_ERROR(drm, "validate_init\n");
 		return ret;
 	}
 
 	ret = validate_list(chan, &op->vram_list, pbbo, user_buffers);
 	if (unlikely(ret < 0)) {
 		if (ret != -ERESTARTSYS)
-			NV_ERROR(dev, "validate vram_list\n");
+			NV_ERROR(drm, "validate vram_list\n");
 		validate_fini(op, NULL);
 		return ret;
 	}
@@ -504,7 +507,7 @@ nouveau_gem_pushbuf_validate(struct nouveau_channel *chan,
 	ret = validate_list(chan, &op->gart_list, pbbo, user_buffers);
 	if (unlikely(ret < 0)) {
 		if (ret != -ERESTARTSYS)
-			NV_ERROR(dev, "validate gart_list\n");
+			NV_ERROR(drm, "validate gart_list\n");
 		validate_fini(op, NULL);
 		return ret;
 	}
@@ -513,7 +516,7 @@ nouveau_gem_pushbuf_validate(struct nouveau_channel *chan,
 	ret = validate_list(chan, &op->both_list, pbbo, user_buffers);
 	if (unlikely(ret < 0)) {
 		if (ret != -ERESTARTSYS)
-			NV_ERROR(dev, "validate both_list\n");
+			NV_ERROR(drm, "validate both_list\n");
 		validate_fini(op, NULL);
 		return ret;
 	}
@@ -546,6 +549,7 @@ nouveau_gem_pushbuf_reloc_apply(struct drm_device *dev,
 				struct drm_nouveau_gem_pushbuf *req,
 				struct drm_nouveau_gem_pushbuf_bo *bo)
 {
+	struct nouveau_drm *drm = nouveau_drm(dev);
 	struct drm_nouveau_gem_pushbuf_reloc *reloc = NULL;
 	int ret = 0;
 	unsigned i;
@@ -561,7 +565,7 @@ nouveau_gem_pushbuf_reloc_apply(struct drm_device *dev,
 		uint32_t data;
 
 		if (unlikely(r->bo_index > req->nr_buffers)) {
-			NV_ERROR(dev, "reloc bo index invalid\n");
+			NV_ERROR(drm, "reloc bo index invalid\n");
 			ret = -EINVAL;
 			break;
 		}
@@ -571,7 +575,7 @@ nouveau_gem_pushbuf_reloc_apply(struct drm_device *dev,
 			continue;
 
 		if (unlikely(r->reloc_bo_index > req->nr_buffers)) {
-			NV_ERROR(dev, "reloc container bo index invalid\n");
+			NV_ERROR(drm, "reloc container bo index invalid\n");
 			ret = -EINVAL;
 			break;
 		}
@@ -579,7 +583,7 @@ nouveau_gem_pushbuf_reloc_apply(struct drm_device *dev,
 
 		if (unlikely(r->reloc_bo_offset + 4 >
 			     nvbo->bo.mem.num_pages << PAGE_SHIFT)) {
-			NV_ERROR(dev, "reloc outside of bo\n");
+			NV_ERROR(drm, "reloc outside of bo\n");
 			ret = -EINVAL;
 			break;
 		}
@@ -588,7 +592,7 @@ nouveau_gem_pushbuf_reloc_apply(struct drm_device *dev,
 			ret = ttm_bo_kmap(&nvbo->bo, 0, nvbo->bo.mem.num_pages,
 					  &nvbo->kmap);
 			if (ret) {
-				NV_ERROR(dev, "failed kmap for reloc\n");
+				NV_ERROR(drm, "failed kmap for reloc\n");
 				break;
 			}
 			nvbo->validate_mapped = true;
@@ -613,7 +617,7 @@ nouveau_gem_pushbuf_reloc_apply(struct drm_device *dev,
 		ret = ttm_bo_wait(&nvbo->bo, false, false, false);
 		spin_unlock(&nvbo->bo.bdev->fence_lock);
 		if (ret) {
-			NV_ERROR(dev, "reloc wait_idle failed: %d\n", ret);
+			NV_ERROR(drm, "reloc wait_idle failed: %d\n", ret);
 			break;
 		}
 
@@ -628,62 +632,67 @@ int
 nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
 			  struct drm_file *file_priv)
 {
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_abi16 *abi16 = nouveau_abi16_get(file_priv, dev);
+	struct nouveau_abi16_chan *temp;
+	struct nouveau_drm *drm = nouveau_drm(dev);
 	struct drm_nouveau_gem_pushbuf *req = data;
 	struct drm_nouveau_gem_pushbuf_push *push;
 	struct drm_nouveau_gem_pushbuf_bo *bo;
-	struct nouveau_channel *chan;
+	struct nouveau_channel *chan = NULL;
 	struct validate_op op;
 	struct nouveau_fence *fence = NULL;
 	int i, j, ret = 0, do_reloc = 0;
 
-	chan = nouveau_channel_get(file_priv, req->channel);
-	if (IS_ERR(chan))
-		return PTR_ERR(chan);
+	if (unlikely(!abi16))
+		return -ENOMEM;
+
+	list_for_each_entry(temp, &abi16->channels, head) {
+		if (temp->chan->handle == (NVDRM_CHAN | req->channel)) {
+			chan = temp->chan;
+			break;
+		}
+	}
 
-	req->vram_available = dev_priv->fb_aper_free;
-	req->gart_available = dev_priv->gart_info.aper_free;
+	if (!chan)
+		return nouveau_abi16_put(abi16, -ENOENT);
+
+	req->vram_available = drm->gem.vram_available;
+	req->gart_available = drm->gem.gart_available;
 	if (unlikely(req->nr_push == 0))
 		goto out_next;
 
 	if (unlikely(req->nr_push > NOUVEAU_GEM_MAX_PUSH)) {
-		NV_ERROR(dev, "pushbuf push count exceeds limit: %d max %d\n",
+		NV_ERROR(drm, "pushbuf push count exceeds limit: %d max %d\n",
 			 req->nr_push, NOUVEAU_GEM_MAX_PUSH);
-		nouveau_channel_put(&chan);
-		return -EINVAL;
+		return nouveau_abi16_put(abi16, -EINVAL);
 	}
 
 	if (unlikely(req->nr_buffers > NOUVEAU_GEM_MAX_BUFFERS)) {
-		NV_ERROR(dev, "pushbuf bo count exceeds limit: %d max %d\n",
+		NV_ERROR(drm, "pushbuf bo count exceeds limit: %d max %d\n",
 			 req->nr_buffers, NOUVEAU_GEM_MAX_BUFFERS);
-		nouveau_channel_put(&chan);
-		return -EINVAL;
+		return nouveau_abi16_put(abi16, -EINVAL);
 	}
 
 	if (unlikely(req->nr_relocs > NOUVEAU_GEM_MAX_RELOCS)) {
-		NV_ERROR(dev, "pushbuf reloc count exceeds limit: %d max %d\n",
+		NV_ERROR(drm, "pushbuf reloc count exceeds limit: %d max %d\n",
 			 req->nr_relocs, NOUVEAU_GEM_MAX_RELOCS);
-		nouveau_channel_put(&chan);
-		return -EINVAL;
+		return nouveau_abi16_put(abi16, -EINVAL);
 	}
 
 	push = u_memcpya(req->push, req->nr_push, sizeof(*push));
-	if (IS_ERR(push)) {
-		nouveau_channel_put(&chan);
-		return PTR_ERR(push);
-	}
+	if (IS_ERR(push))
+		return nouveau_abi16_put(abi16, PTR_ERR(push));
 
 	bo = u_memcpya(req->buffers, req->nr_buffers, sizeof(*bo));
 	if (IS_ERR(bo)) {
 		kfree(push);
-		nouveau_channel_put(&chan);
-		return PTR_ERR(bo);
+		return nouveau_abi16_put(abi16, PTR_ERR(bo));
 	}
 
 	/* Ensure all push buffers are on validate list */
 	for (i = 0; i < req->nr_push; i++) {
 		if (push[i].bo_index >= req->nr_buffers) {
-			NV_ERROR(dev, "push %d buffer not in list\n", i);
+			NV_ERROR(drm, "push %d buffer not in list\n", i);
 			ret = -EINVAL;
 			goto out_prevalid;
 		}
@@ -694,7 +703,7 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
 					   req->nr_buffers, &op, &do_reloc);
 	if (ret) {
 		if (ret != -ERESTARTSYS)
-			NV_ERROR(dev, "validate: %d\n", ret);
+			NV_ERROR(drm, "validate: %d\n", ret);
 		goto out_prevalid;
 	}
 
@@ -702,7 +711,7 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
 	if (do_reloc) {
 		ret = nouveau_gem_pushbuf_reloc_apply(dev, req, bo);
 		if (ret) {
-			NV_ERROR(dev, "reloc apply: %d\n", ret);
+			NV_ERROR(drm, "reloc apply: %d\n", ret);
 			goto out;
 		}
 	}
@@ -710,7 +719,7 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
 	if (chan->dma.ib_max) {
 		ret = nouveau_dma_wait(chan, req->nr_push + 1, 16);
 		if (ret) {
-			NV_INFO(dev, "nv50cal_space: %d\n", ret);
+			NV_ERROR(drm, "nv50cal_space: %d\n", ret);
 			goto out;
 		}
 
@@ -722,36 +731,33 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
 				      push[i].length);
 		}
 	} else
-	if (dev_priv->chipset >= 0x25) {
+	if (nv_device(drm->device)->chipset >= 0x25) {
 		ret = RING_SPACE(chan, req->nr_push * 2);
 		if (ret) {
-			NV_ERROR(dev, "cal_space: %d\n", ret);
+			NV_ERROR(drm, "cal_space: %d\n", ret);
 			goto out;
 		}
 
 		for (i = 0; i < req->nr_push; i++) {
 			struct nouveau_bo *nvbo = (void *)(unsigned long)
 				bo[push[i].bo_index].user_priv;
-			struct drm_mm_node *mem = nvbo->bo.mem.mm_node;
 
-			OUT_RING(chan, ((mem->start << PAGE_SHIFT) +
-					push[i].offset) | 2);
+			OUT_RING(chan, (nvbo->bo.offset + push[i].offset) | 2);
 			OUT_RING(chan, 0);
 		}
 	} else {
 		ret = RING_SPACE(chan, req->nr_push * (2 + NOUVEAU_DMA_SKIPS));
 		if (ret) {
-			NV_ERROR(dev, "jmp_space: %d\n", ret);
+			NV_ERROR(drm, "jmp_space: %d\n", ret);
 			goto out;
 		}
 
 		for (i = 0; i < req->nr_push; i++) {
 			struct nouveau_bo *nvbo = (void *)(unsigned long)
 				bo[push[i].bo_index].user_priv;
-			struct drm_mm_node *mem = nvbo->bo.mem.mm_node;
 			uint32_t cmd;
 
-			cmd = chan->pushbuf_base + ((chan->dma.cur + 2) << 2);
+			cmd = chan->push.vma.offset + ((chan->dma.cur + 2) << 2);
 			cmd |= 0x20000000;
 			if (unlikely(cmd != req->suffix0)) {
 				if (!nvbo->kmap.virtual) {
@@ -770,8 +776,8 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
 						push[i].length - 8) / 4, cmd);
 			}
 
-			OUT_RING(chan, ((mem->start << PAGE_SHIFT) +
-					push[i].offset) | 0x20000000);
+			OUT_RING(chan, 0x20000000 |
+				      (nvbo->bo.offset + push[i].offset));
 			OUT_RING(chan, 0);
 			for (j = 0; j < NOUVEAU_DMA_SKIPS; j++)
 				OUT_RING(chan, 0);
@@ -780,7 +786,7 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
 
 	ret = nouveau_fence_new(chan, &fence);
 	if (ret) {
-		NV_ERROR(dev, "error fencing pushbuf: %d\n", ret);
+		NV_ERROR(drm, "error fencing pushbuf: %d\n", ret);
 		WIND_RING(chan);
 		goto out;
 	}
@@ -798,17 +804,16 @@ out_next:
 		req->suffix0 = 0x00000000;
 		req->suffix1 = 0x00000000;
 	} else
-	if (dev_priv->chipset >= 0x25) {
+	if (nv_device(drm->device)->chipset >= 0x25) {
 		req->suffix0 = 0x00020000;
 		req->suffix1 = 0x00000000;
 	} else {
 		req->suffix0 = 0x20000000 |
-			      (chan->pushbuf_base + ((chan->dma.cur + 2) << 2));
+			      (chan->push.vma.offset + ((chan->dma.cur + 2) << 2));
 		req->suffix1 = 0x00000000;
 	}
 
-	nouveau_channel_put(&chan);
-	return ret;
+	return nouveau_abi16_put(abi16, ret);
 }
 
 static inline uint32_t
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.h b/drivers/gpu/drm/nouveau/nouveau_gem.h
new file mode 100644
index 000000000000..5c1049236d22
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_gem.h
@@ -0,0 +1,43 @@
+#ifndef __NOUVEAU_GEM_H__
+#define __NOUVEAU_GEM_H__
+
+#include <drm/drmP.h>
+
+#include "nouveau_drm.h"
+#include "nouveau_bo.h"
+
+#define nouveau_bo_tile_layout(nvbo)				\
+	((nvbo)->tile_flags & NOUVEAU_GEM_TILE_LAYOUT_MASK)
+
+static inline struct nouveau_bo *
+nouveau_gem_object(struct drm_gem_object *gem)
+{
+	return gem ? gem->driver_private : NULL;
+}
+
+/* nouveau_gem.c */
+extern int nouveau_gem_new(struct drm_device *, int size, int align,
+			   uint32_t domain, uint32_t tile_mode,
+			   uint32_t tile_flags, struct nouveau_bo **);
+extern int nouveau_gem_object_new(struct drm_gem_object *);
+extern void nouveau_gem_object_del(struct drm_gem_object *);
+extern int nouveau_gem_object_open(struct drm_gem_object *, struct drm_file *);
+extern void nouveau_gem_object_close(struct drm_gem_object *,
+				     struct drm_file *);
+extern int nouveau_gem_ioctl_new(struct drm_device *, void *,
+				 struct drm_file *);
+extern int nouveau_gem_ioctl_pushbuf(struct drm_device *, void *,
+				     struct drm_file *);
+extern int nouveau_gem_ioctl_cpu_prep(struct drm_device *, void *,
+				      struct drm_file *);
+extern int nouveau_gem_ioctl_cpu_fini(struct drm_device *, void *,
+				      struct drm_file *);
+extern int nouveau_gem_ioctl_info(struct drm_device *, void *,
+				  struct drm_file *);
+
+extern struct dma_buf *nouveau_gem_prime_export(struct drm_device *dev,
+				struct drm_gem_object *obj, int flags);
+extern struct drm_gem_object *nouveau_gem_prime_import(struct drm_device *dev,
+				struct dma_buf *dma_buf);
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_gpio.c b/drivers/gpu/drm/nouveau/nouveau_gpio.c
deleted file mode 100644
index ded74e555e5f..000000000000
--- a/drivers/gpu/drm/nouveau/nouveau_gpio.c
+++ /dev/null
@@ -1,400 +0,0 @@
-/*
- * Copyright 2011 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Ben Skeggs
- */
-
-#include <drm/drmP.h>
-#include "nouveau_drv.h"
-#include "nouveau_i2c.h"
-#include "nouveau_gpio.h"
-
-static u8 *
-dcb_gpio_table(struct drm_device *dev)
-{
-	u8 *dcb = dcb_table(dev);
-	if (dcb) {
-		if (dcb[0] >= 0x30 && dcb[1] >= 0x0c)
-			return ROMPTR(dev, dcb[0x0a]);
-		if (dcb[0] >= 0x22 && dcb[-1] >= 0x13)
-			return ROMPTR(dev, dcb[-15]);
-	}
-	return NULL;
-}
-
-static u8 *
-dcb_gpio_entry(struct drm_device *dev, int idx, int ent, u8 *version)
-{
-	u8 *table = dcb_gpio_table(dev);
-	if (table) {
-		*version = table[0];
-		if (*version < 0x30 && ent < table[2])
-			return table + 3 + (ent * table[1]);
-		else if (ent < table[2])
-			return table + table[1] + (ent * table[3]);
-	}
-	return NULL;
-}
-
-int
-nouveau_gpio_drive(struct drm_device *dev, int idx, int line, int dir, int out)
-{
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio;
-
-	return pgpio->drive ? pgpio->drive(dev, line, dir, out) : -ENODEV;
-}
-
-int
-nouveau_gpio_sense(struct drm_device *dev, int idx, int line)
-{
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio;
-
-	return pgpio->sense ? pgpio->sense(dev, line) : -ENODEV;
-}
-
-int
-nouveau_gpio_find(struct drm_device *dev, int idx, u8 func, u8 line,
-		  struct gpio_func *gpio)
-{
-	u8 *table, *entry, version;
-	int i = -1;
-
-	if (line == 0xff && func == 0xff)
-		return -EINVAL;
-
-	while ((entry = dcb_gpio_entry(dev, idx, ++i, &version))) {
-		if (version < 0x40) {
-			u16 data = ROM16(entry[0]);
-			*gpio = (struct gpio_func) {
-				.line = (data & 0x001f) >> 0,
-				.func = (data & 0x07e0) >> 5,
-				.log[0] = (data & 0x1800) >> 11,
-				.log[1] = (data & 0x6000) >> 13,
-			};
-		} else
-		if (version < 0x41) {
-			*gpio = (struct gpio_func) {
-				.line = entry[0] & 0x1f,
-				.func = entry[1],
-				.log[0] = (entry[3] & 0x18) >> 3,
-				.log[1] = (entry[3] & 0x60) >> 5,
-			};
-		} else {
-			*gpio = (struct gpio_func) {
-				.line = entry[0] & 0x3f,
-				.func = entry[1],
-				.log[0] = (entry[4] & 0x30) >> 4,
-				.log[1] = (entry[4] & 0xc0) >> 6,
-			};
-		}
-
-		if ((line == 0xff || line == gpio->line) &&
-		    (func == 0xff || func == gpio->func))
-			return 0;
-	}
-
-	/* DCB 2.2, fixed TVDAC GPIO data */
-	if ((table = dcb_table(dev)) && table[0] >= 0x22) {
-		if (func == DCB_GPIO_TVDAC0) {
-			*gpio = (struct gpio_func) {
-				.func = DCB_GPIO_TVDAC0,
-				.line = table[-4] >> 4,
-				.log[0] = !!(table[-5] & 2),
-				.log[1] =  !(table[-5] & 2),
-			};
-			return 0;
-		}
-	}
-
-	/* Apple iMac G4 NV18 */
-	if (nv_match_device(dev, 0x0189, 0x10de, 0x0010)) {
-		if (func == DCB_GPIO_TVDAC0) {
-			*gpio = (struct gpio_func) {
-				.func = DCB_GPIO_TVDAC0,
-				.line = 4,
-				.log[0] = 0,
-				.log[1] = 1,
-			};
-			return 0;
-		}
-	}
-
-	return -EINVAL;
-}
-
-int
-nouveau_gpio_set(struct drm_device *dev, int idx, u8 tag, u8 line, int state)
-{
-	struct gpio_func gpio;
-	int ret;
-
-	ret = nouveau_gpio_find(dev, idx, tag, line, &gpio);
-	if (ret == 0) {
-		int dir = !!(gpio.log[state] & 0x02);
-		int out = !!(gpio.log[state] & 0x01);
-		ret = nouveau_gpio_drive(dev, idx, gpio.line, dir, out);
-	}
-
-	return ret;
-}
-
-int
-nouveau_gpio_get(struct drm_device *dev, int idx, u8 tag, u8 line)
-{
-	struct gpio_func gpio;
-	int ret;
-
-	ret = nouveau_gpio_find(dev, idx, tag, line, &gpio);
-	if (ret == 0) {
-		ret = nouveau_gpio_sense(dev, idx, gpio.line);
-		if (ret >= 0)
-			ret = (ret == (gpio.log[1] & 1));
-	}
-
-	return ret;
-}
-
-int
-nouveau_gpio_irq(struct drm_device *dev, int idx, u8 tag, u8 line, bool on)
-{
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio;
-	struct gpio_func gpio;
-	int ret;
-
-	ret = nouveau_gpio_find(dev, idx, tag, line, &gpio);
-	if (ret == 0) {
-		if (idx == 0 && pgpio->irq_enable)
-			pgpio->irq_enable(dev, gpio.line, on);
-		else
-			ret = -ENODEV;
-	}
-
-	return ret;
-}
-
-struct gpio_isr {
-	struct drm_device *dev;
-	struct list_head head;
-	struct work_struct work;
-	int idx;
-	struct gpio_func func;
-	void (*handler)(void *, int);
-	void *data;
-	bool inhibit;
-};
-
-static void
-nouveau_gpio_isr_bh(struct work_struct *work)
-{
-	struct gpio_isr *isr = container_of(work, struct gpio_isr, work);
-	struct drm_device *dev = isr->dev;
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio;
-	unsigned long flags;
-	int state;
-
-	state = nouveau_gpio_get(dev, isr->idx, isr->func.func, isr->func.line);
-	if (state >= 0)
-		isr->handler(isr->data, state);
-
-	spin_lock_irqsave(&pgpio->lock, flags);
-	isr->inhibit = false;
-	spin_unlock_irqrestore(&pgpio->lock, flags);
-}
-
-void
-nouveau_gpio_isr(struct drm_device *dev, int idx, u32 line_mask)
-{
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio;
-	struct gpio_isr *isr;
-
-	if (idx != 0)
-		return;
-
-	spin_lock(&pgpio->lock);
-	list_for_each_entry(isr, &pgpio->isr, head) {
-		if (line_mask & (1 << isr->func.line)) {
-			if (isr->inhibit)
-				continue;
-			isr->inhibit = true;
-			schedule_work(&isr->work);
-		}
-	}
-	spin_unlock(&pgpio->lock);
-}
-
-int
-nouveau_gpio_isr_add(struct drm_device *dev, int idx, u8 tag, u8 line,
-		     void (*handler)(void *, int), void *data)
-{
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio;
-	struct gpio_isr *isr;
-	unsigned long flags;
-	int ret;
-
-	isr = kzalloc(sizeof(*isr), GFP_KERNEL);
-	if (!isr)
-		return -ENOMEM;
-
-	ret = nouveau_gpio_find(dev, idx, tag, line, &isr->func);
-	if (ret) {
-		kfree(isr);
-		return ret;
-	}
-
-	INIT_WORK(&isr->work, nouveau_gpio_isr_bh);
-	isr->dev = dev;
-	isr->handler = handler;
-	isr->data = data;
-	isr->idx = idx;
-
-	spin_lock_irqsave(&pgpio->lock, flags);
-	list_add(&isr->head, &pgpio->isr);
-	spin_unlock_irqrestore(&pgpio->lock, flags);
-	return 0;
-}
-
-void
-nouveau_gpio_isr_del(struct drm_device *dev, int idx, u8 tag, u8 line,
-		     void (*handler)(void *, int), void *data)
-{
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio;
-	struct gpio_isr *isr, *tmp;
-	struct gpio_func func;
-	unsigned long flags;
-	LIST_HEAD(tofree);
-	int ret;
-
-	ret = nouveau_gpio_find(dev, idx, tag, line, &func);
-	if (ret == 0) {
-		spin_lock_irqsave(&pgpio->lock, flags);
-		list_for_each_entry_safe(isr, tmp, &pgpio->isr, head) {
-			if (memcmp(&isr->func, &func, sizeof(func)) ||
-			    isr->idx != idx ||
-			    isr->handler != handler || isr->data != data)
-				continue;
-			list_move(&isr->head, &tofree);
-		}
-		spin_unlock_irqrestore(&pgpio->lock, flags);
-
-		list_for_each_entry_safe(isr, tmp, &tofree, head) {
-			flush_work(&isr->work);
-			kfree(isr);
-		}
-	}
-}
-
-int
-nouveau_gpio_create(struct drm_device *dev)
-{
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio;
-
-	INIT_LIST_HEAD(&pgpio->isr);
-	spin_lock_init(&pgpio->lock);
-
-	return nouveau_gpio_init(dev);
-}
-
-void
-nouveau_gpio_destroy(struct drm_device *dev)
-{
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio;
-
-	nouveau_gpio_fini(dev);
-	BUG_ON(!list_empty(&pgpio->isr));
-}
-
-int
-nouveau_gpio_init(struct drm_device *dev)
-{
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio;
-	int ret = 0;
-
-	if (pgpio->init)
-		ret = pgpio->init(dev);
-
-	return ret;
-}
-
-void
-nouveau_gpio_fini(struct drm_device *dev)
-{
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio;
-
-	if (pgpio->fini)
-		pgpio->fini(dev);
-}
-
-void
-nouveau_gpio_reset(struct drm_device *dev)
-{
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	u8 *entry, version;
-	int ent = -1;
-
-	while ((entry = dcb_gpio_entry(dev, 0, ++ent, &version))) {
-		u8 func = 0xff, line, defs, unk0, unk1;
-		if (version >= 0x41) {
-			defs = !!(entry[0] & 0x80);
-			line = entry[0] & 0x3f;
-			func = entry[1];
-			unk0 = entry[2];
-			unk1 = entry[3] & 0x1f;
-		} else
-		if (version >= 0x40) {
-			line = entry[0] & 0x1f;
-			func = entry[1];
-			defs = !!(entry[3] & 0x01);
-			unk0 = !!(entry[3] & 0x02);
-			unk1 = !!(entry[3] & 0x04);
-		} else {
-			break;
-		}
-
-		if (func == 0xff)
-			continue;
-
-		nouveau_gpio_func_set(dev, func, defs);
-
-		if (dev_priv->card_type >= NV_D0) {
-			nv_mask(dev, 0x00d610 + (line * 4), 0xff, unk0);
-			if (unk1--)
-				nv_mask(dev, 0x00d740 + (unk1 * 4), 0xff, line);
-		} else
-		if (dev_priv->card_type >= NV_50) {
-			static const u32 regs[] = { 0xe100, 0xe28c };
-			u32 val = (unk1 << 16) | unk0;
-			u32 reg = regs[line >> 4]; line &= 0x0f;
-
-			nv_mask(dev, reg, 0x00010001 << line, val << line);
-		}
-	}
-}
diff --git a/drivers/gpu/drm/nouveau/nouveau_gpio.h b/drivers/gpu/drm/nouveau/nouveau_gpio.h
deleted file mode 100644
index 64c5cb077ace..000000000000
--- a/drivers/gpu/drm/nouveau/nouveau_gpio.h
+++ /dev/null
@@ -1,71 +0,0 @@
-/*
- * Copyright 2011 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- */
-
-#ifndef __NOUVEAU_GPIO_H__
-#define __NOUVEAU_GPIO_H__
-
-struct gpio_func {
-	u8 func;
-	u8 line;
-	u8 log[2];
-};
-
-/* nouveau_gpio.c */
-int  nouveau_gpio_create(struct drm_device *);
-void nouveau_gpio_destroy(struct drm_device *);
-int  nouveau_gpio_init(struct drm_device *);
-void nouveau_gpio_fini(struct drm_device *);
-void nouveau_gpio_reset(struct drm_device *);
-int  nouveau_gpio_drive(struct drm_device *, int idx, int line,
-			int dir, int out);
-int  nouveau_gpio_sense(struct drm_device *, int idx, int line);
-int  nouveau_gpio_find(struct drm_device *, int idx, u8 tag, u8 line,
-		       struct gpio_func *);
-int  nouveau_gpio_set(struct drm_device *, int idx, u8 tag, u8 line, int state);
-int  nouveau_gpio_get(struct drm_device *, int idx, u8 tag, u8 line);
-int  nouveau_gpio_irq(struct drm_device *, int idx, u8 tag, u8 line, bool on);
-void nouveau_gpio_isr(struct drm_device *, int idx, u32 mask);
-int  nouveau_gpio_isr_add(struct drm_device *, int idx, u8 tag, u8 line,
-			  void (*)(void *, int state), void *data);
-void nouveau_gpio_isr_del(struct drm_device *, int idx, u8 tag, u8 line,
-			  void (*)(void *, int state), void *data);
-
-static inline bool
-nouveau_gpio_func_valid(struct drm_device *dev, u8 tag)
-{
-	struct gpio_func func;
-	return (nouveau_gpio_find(dev, 0, tag, 0xff, &func)) == 0;
-}
-
-static inline int
-nouveau_gpio_func_set(struct drm_device *dev, u8 tag, int state)
-{
-	return nouveau_gpio_set(dev, 0, tag, 0xff, state);
-}
-
-static inline int
-nouveau_gpio_func_get(struct drm_device *dev, u8 tag)
-{
-	return nouveau_gpio_get(dev, 0, tag, 0xff);
-}
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_gpuobj.c b/drivers/gpu/drm/nouveau/nouveau_gpuobj.c
deleted file mode 100644
index 1af7a39e0350..000000000000
--- a/drivers/gpu/drm/nouveau/nouveau_gpuobj.c
+++ /dev/null
@@ -1,807 +0,0 @@
-/*
- * Copyright (C) 2006 Ben Skeggs.
- *
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining
- * a copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sublicense, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial
- * portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
- * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
- * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
- * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
- * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-
-/*
- * Authors:
- *   Ben Skeggs <darktama@iinet.net.au>
- */
-
-#include <drm/drmP.h>
-#include "nouveau_drv.h"
-#include <drm/nouveau_drm.h>
-#include "nouveau_fifo.h"
-#include "nouveau_ramht.h"
-#include "nouveau_software.h"
-#include "nouveau_vm.h"
-
-struct nouveau_gpuobj_method {
-	struct list_head head;
-	u32 mthd;
-	int (*exec)(struct nouveau_channel *, u32 class, u32 mthd, u32 data);
-};
-
-struct nouveau_gpuobj_class {
-	struct list_head head;
-	struct list_head methods;
-	u32 id;
-	u32 engine;
-};
-
-int
-nouveau_gpuobj_class_new(struct drm_device *dev, u32 class, u32 engine)
-{
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct nouveau_gpuobj_class *oc;
-
-	oc = kzalloc(sizeof(*oc), GFP_KERNEL);
-	if (!oc)
-		return -ENOMEM;
-
-	INIT_LIST_HEAD(&oc->methods);
-	oc->id = class;
-	oc->engine = engine;
-	list_add(&oc->head, &dev_priv->classes);
-	return 0;
-}
-
-int
-nouveau_gpuobj_mthd_new(struct drm_device *dev, u32 class, u32 mthd,
-			int (*exec)(struct nouveau_channel *, u32, u32, u32))
-{
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct nouveau_gpuobj_method *om;
-	struct nouveau_gpuobj_class *oc;
-
-	list_for_each_entry(oc, &dev_priv->classes, head) {
-		if (oc->id == class)
-			goto found;
-	}
-
-	return -EINVAL;
-
-found:
-	om = kzalloc(sizeof(*om), GFP_KERNEL);
-	if (!om)
-		return -ENOMEM;
-
-	om->mthd = mthd;
-	om->exec = exec;
-	list_add(&om->head, &oc->methods);
-	return 0;
-}
-
-int
-nouveau_gpuobj_mthd_call(struct nouveau_channel *chan,
-			 u32 class, u32 mthd, u32 data)
-{
-	struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
-	struct nouveau_gpuobj_method *om;
-	struct nouveau_gpuobj_class *oc;
-
-	list_for_each_entry(oc, &dev_priv->classes, head) {
-		if (oc->id != class)
-			continue;
-
-		list_for_each_entry(om, &oc->methods, head) {
-			if (om->mthd == mthd)
-				return om->exec(chan, class, mthd, data);
-		}
-	}
-
-	return -ENOENT;
-}
-
-int
-nouveau_gpuobj_mthd_call2(struct drm_device *dev, int chid,
-			  u32 class, u32 mthd, u32 data)
-{
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct nouveau_fifo_priv *pfifo = nv_engine(dev, NVOBJ_ENGINE_FIFO);
-	struct nouveau_channel *chan = NULL;
-	unsigned long flags;
-	int ret = -EINVAL;
-
-	spin_lock_irqsave(&dev_priv->channels.lock, flags);
-	if (chid >= 0 && chid < pfifo->channels)
-		chan = dev_priv->channels.ptr[chid];
-	if (chan)
-		ret = nouveau_gpuobj_mthd_call(chan, class, mthd, data);
-	spin_unlock_irqrestore(&dev_priv->channels.lock, flags);
-	return ret;
-}
-
-int
-nouveau_gpuobj_new(struct drm_device *dev, struct nouveau_channel *chan,
-		   uint32_t size, int align, uint32_t flags,
-		   struct nouveau_gpuobj **gpuobj_ret)
-{
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct nouveau_instmem_engine *instmem = &dev_priv->engine.instmem;
-	struct nouveau_gpuobj *gpuobj;
-	struct drm_mm_node *ramin = NULL;
-	int ret, i;
-
-	NV_DEBUG(dev, "ch%d size=%u align=%d flags=0x%08x\n",
-		 chan ? chan->id : -1, size, align, flags);
-
-	gpuobj = kzalloc(sizeof(*gpuobj), GFP_KERNEL);
-	if (!gpuobj)
-		return -ENOMEM;
-	NV_DEBUG(dev, "gpuobj %p\n", gpuobj);
-	gpuobj->dev = dev;
-	gpuobj->flags = flags;
-	kref_init(&gpuobj->refcount);
-	gpuobj->size = size;
-
-	spin_lock(&dev_priv->ramin_lock);
-	list_add_tail(&gpuobj->list, &dev_priv->gpuobj_list);
-	spin_unlock(&dev_priv->ramin_lock);
-
-	if (!(flags & NVOBJ_FLAG_VM) && chan) {
-		ramin = drm_mm_search_free(&chan->ramin_heap, size, align, 0);
-		if (ramin)
-			ramin = drm_mm_get_block(ramin, size, align);
-		if (!ramin) {
-			nouveau_gpuobj_ref(NULL, &gpuobj);
-			return -ENOMEM;
-		}
-
-		gpuobj->pinst = chan->ramin->pinst;
-		if (gpuobj->pinst != ~0)
-			gpuobj->pinst += ramin->start;
-
-		gpuobj->cinst = ramin->start;
-		gpuobj->vinst = ramin->start + chan->ramin->vinst;
-		gpuobj->node  = ramin;
-	} else {
-		ret = instmem->get(gpuobj, chan, size, align);
-		if (ret) {
-			nouveau_gpuobj_ref(NULL, &gpuobj);
-			return ret;
-		}
-
-		ret = -ENOSYS;
-		if (!(flags & NVOBJ_FLAG_DONT_MAP))
-			ret = instmem->map(gpuobj);
-		if (ret)
-			gpuobj->pinst = ~0;
-
-		gpuobj->cinst = NVOBJ_CINST_GLOBAL;
-	}
-
-	if (gpuobj->flags & NVOBJ_FLAG_ZERO_ALLOC) {
-		for (i = 0; i < gpuobj->size; i += 4)
-			nv_wo32(gpuobj, i, 0);
-		instmem->flush(dev);
-	}
-
-
-	*gpuobj_ret = gpuobj;
-	return 0;
-}
-
-int
-nouveau_gpuobj_init(struct drm_device *dev)
-{
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-
-	NV_DEBUG(dev, "\n");
-
-	INIT_LIST_HEAD(&dev_priv->gpuobj_list);
-	INIT_LIST_HEAD(&dev_priv->classes);
-	spin_lock_init(&dev_priv->ramin_lock);
-	dev_priv->ramin_base = ~0;
-
-	return 0;
-}
-
-void
-nouveau_gpuobj_takedown(struct drm_device *dev)
-{
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct nouveau_gpuobj_method *om, *tm;
-	struct nouveau_gpuobj_class *oc, *tc;
-
-	NV_DEBUG(dev, "\n");
-
-	list_for_each_entry_safe(oc, tc, &dev_priv->classes, head) {
-		list_for_each_entry_safe(om, tm, &oc->methods, head) {
-			list_del(&om->head);
-			kfree(om);
-		}
-		list_del(&oc->head);
-		kfree(oc);
-	}
-
-	WARN_ON(!list_empty(&dev_priv->gpuobj_list));
-}
-
-
-static void
-nouveau_gpuobj_del(struct kref *ref)
-{
-	struct nouveau_gpuobj *gpuobj =
-		container_of(ref, struct nouveau_gpuobj, refcount);
-	struct drm_device *dev = gpuobj->dev;
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct nouveau_instmem_engine *instmem = &dev_priv->engine.instmem;
-	int i;
-
-	NV_DEBUG(dev, "gpuobj %p\n", gpuobj);
-
-	if (gpuobj->node && (gpuobj->flags & NVOBJ_FLAG_ZERO_FREE)) {
-		for (i = 0; i < gpuobj->size; i += 4)
-			nv_wo32(gpuobj, i, 0);
-		instmem->flush(dev);
-	}
-
-	if (gpuobj->dtor)
-		gpuobj->dtor(dev, gpuobj);
-
-	if (gpuobj->cinst == NVOBJ_CINST_GLOBAL) {
-		if (gpuobj->node) {
-			instmem->unmap(gpuobj);
-			instmem->put(gpuobj);
-		}
-	} else {
-		if (gpuobj->node) {
-			spin_lock(&dev_priv->ramin_lock);
-			drm_mm_put_block(gpuobj->node);
-			spin_unlock(&dev_priv->ramin_lock);
-		}
-	}
-
-	spin_lock(&dev_priv->ramin_lock);
-	list_del(&gpuobj->list);
-	spin_unlock(&dev_priv->ramin_lock);
-
-	kfree(gpuobj);
-}
-
-void
-nouveau_gpuobj_ref(struct nouveau_gpuobj *ref, struct nouveau_gpuobj **ptr)
-{
-	if (ref)
-		kref_get(&ref->refcount);
-
-	if (*ptr)
-		kref_put(&(*ptr)->refcount, nouveau_gpuobj_del);
-
-	*ptr = ref;
-}
-
-int
-nouveau_gpuobj_new_fake(struct drm_device *dev, u32 pinst, u64 vinst,
-			u32 size, u32 flags, struct nouveau_gpuobj **pgpuobj)
-{
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct nouveau_gpuobj *gpuobj = NULL;
-	int i;
-
-	NV_DEBUG(dev,
-		 "pinst=0x%08x vinst=0x%010llx size=0x%08x flags=0x%08x\n",
-		 pinst, vinst, size, flags);
-
-	gpuobj = kzalloc(sizeof(*gpuobj), GFP_KERNEL);
-	if (!gpuobj)
-		return -ENOMEM;
-	NV_DEBUG(dev, "gpuobj %p\n", gpuobj);
-	gpuobj->dev = dev;
-	gpuobj->flags = flags;
-	kref_init(&gpuobj->refcount);
-	gpuobj->size  = size;
-	gpuobj->pinst = pinst;
-	gpuobj->cinst = NVOBJ_CINST_GLOBAL;
-	gpuobj->vinst = vinst;
-
-	if (gpuobj->flags & NVOBJ_FLAG_ZERO_ALLOC) {
-		for (i = 0; i < gpuobj->size; i += 4)
-			nv_wo32(gpuobj, i, 0);
-		dev_priv->engine.instmem.flush(dev);
-	}
-
-	spin_lock(&dev_priv->ramin_lock);
-	list_add_tail(&gpuobj->list, &dev_priv->gpuobj_list);
-	spin_unlock(&dev_priv->ramin_lock);
-	*pgpuobj = gpuobj;
-	return 0;
-}
-
-void
-nv50_gpuobj_dma_init(struct nouveau_gpuobj *obj, u32 offset, int class,
-		     u64 base, u64 size, int target, int access,
-		     u32 type, u32 comp)
-{
-	struct drm_nouveau_private *dev_priv = obj->dev->dev_private;
-	struct nouveau_instmem_engine *pinstmem = &dev_priv->engine.instmem;
-	u32 flags0;
-
-	flags0  = (comp << 29) | (type << 22) | class;
-	flags0 |= 0x00100000;
-
-	switch (access) {
-	case NV_MEM_ACCESS_RO: flags0 |= 0x00040000; break;
-	case NV_MEM_ACCESS_RW:
-	case NV_MEM_ACCESS_WO: flags0 |= 0x00080000; break;
-	default:
-		break;
-	}
-
-	switch (target) {
-	case NV_MEM_TARGET_VRAM:
-		flags0 |= 0x00010000;
-		break;
-	case NV_MEM_TARGET_PCI:
-		flags0 |= 0x00020000;
-		break;
-	case NV_MEM_TARGET_PCI_NOSNOOP:
-		flags0 |= 0x00030000;
-		break;
-	case NV_MEM_TARGET_GART:
-		base += dev_priv->gart_info.aper_base;
-	default:
-		flags0 &= ~0x00100000;
-		break;
-	}
-
-	/* convert to base + limit */
-	size = (base + size) - 1;
-
-	nv_wo32(obj, offset + 0x00, flags0);
-	nv_wo32(obj, offset + 0x04, lower_32_bits(size));
-	nv_wo32(obj, offset + 0x08, lower_32_bits(base));
-	nv_wo32(obj, offset + 0x0c, upper_32_bits(size) << 24 |
-				    upper_32_bits(base));
-	nv_wo32(obj, offset + 0x10, 0x00000000);
-	nv_wo32(obj, offset + 0x14, 0x00000000);
-
-	pinstmem->flush(obj->dev);
-}
-
-int
-nv50_gpuobj_dma_new(struct nouveau_channel *chan, int class, u64 base, u64 size,
-		    int target, int access, u32 type, u32 comp,
-		    struct nouveau_gpuobj **pobj)
-{
-	struct drm_device *dev = chan->dev;
-	int ret;
-
-	ret = nouveau_gpuobj_new(dev, chan, 24, 16, NVOBJ_FLAG_ZERO_FREE, pobj);
-	if (ret)
-		return ret;
-
-	nv50_gpuobj_dma_init(*pobj, 0, class, base, size, target,
-			     access, type, comp);
-	return 0;
-}
-
-int
-nouveau_gpuobj_dma_new(struct nouveau_channel *chan, int class, u64 base,
-		       u64 size, int access, int target,
-		       struct nouveau_gpuobj **pobj)
-{
-	struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
-	struct drm_device *dev = chan->dev;
-	struct nouveau_gpuobj *obj;
-	u32 flags0, flags2;
-	int ret;
-
-	if (dev_priv->card_type >= NV_50) {
-		u32 comp = (target == NV_MEM_TARGET_VM) ? NV_MEM_COMP_VM : 0;
-		u32 type = (target == NV_MEM_TARGET_VM) ? NV_MEM_TYPE_VM : 0;
-
-		return nv50_gpuobj_dma_new(chan, class, base, size,
-					   target, access, type, comp, pobj);
-	}
-
-	if (target == NV_MEM_TARGET_GART) {
-		struct nouveau_gpuobj *gart = dev_priv->gart_info.sg_ctxdma;
-
-		if (dev_priv->gart_info.type == NOUVEAU_GART_PDMA) {
-			if (base == 0) {
-				nouveau_gpuobj_ref(gart, pobj);
-				return 0;
-			}
-
-			base   = nouveau_sgdma_get_physical(dev, base);
-			target = NV_MEM_TARGET_PCI;
-		} else {
-			base += dev_priv->gart_info.aper_base;
-			if (dev_priv->gart_info.type == NOUVEAU_GART_AGP)
-				target = NV_MEM_TARGET_PCI_NOSNOOP;
-			else
-				target = NV_MEM_TARGET_PCI;
-		}
-	}
-
-	flags0  = class;
-	flags0 |= 0x00003000; /* PT present, PT linear */
-	flags2  = 0;
-
-	switch (target) {
-	case NV_MEM_TARGET_PCI:
-		flags0 |= 0x00020000;
-		break;
-	case NV_MEM_TARGET_PCI_NOSNOOP:
-		flags0 |= 0x00030000;
-		break;
-	default:
-		break;
-	}
-
-	switch (access) {
-	case NV_MEM_ACCESS_RO:
-		flags0 |= 0x00004000;
-		break;
-	case NV_MEM_ACCESS_WO:
-		flags0 |= 0x00008000;
-	default:
-		flags2 |= 0x00000002;
-		break;
-	}
-
-	flags0 |= (base & 0x00000fff) << 20;
-	flags2 |= (base & 0xfffff000);
-
-	ret = nouveau_gpuobj_new(dev, chan, 16, 16, NVOBJ_FLAG_ZERO_FREE, &obj);
-	if (ret)
-		return ret;
-
-	nv_wo32(obj, 0x00, flags0);
-	nv_wo32(obj, 0x04, size - 1);
-	nv_wo32(obj, 0x08, flags2);
-	nv_wo32(obj, 0x0c, flags2);
-
-	obj->engine = NVOBJ_ENGINE_SW;
-	obj->class  = class;
-	*pobj = obj;
-	return 0;
-}
-
-int
-nouveau_gpuobj_gr_new(struct nouveau_channel *chan, u32 handle, int class)
-{
-	struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
-	struct drm_device *dev = chan->dev;
-	struct nouveau_gpuobj_class *oc;
-	int ret;
-
-	NV_DEBUG(dev, "ch%d class=0x%04x\n", chan->id, class);
-
-	list_for_each_entry(oc, &dev_priv->classes, head) {
-		struct nouveau_exec_engine *eng = dev_priv->eng[oc->engine];
-
-		if (oc->id != class)
-			continue;
-
-		if (!chan->engctx[oc->engine]) {
-			ret = eng->context_new(chan, oc->engine);
-			if (ret)
-				return ret;
-		}
-
-		return eng->object_new(chan, oc->engine, handle, class);
-	}
-
-	return -EINVAL;
-}
-
-static int
-nouveau_gpuobj_channel_init_pramin(struct nouveau_channel *chan)
-{
-	struct drm_device *dev = chan->dev;
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	uint32_t size;
-	uint32_t base;
-	int ret;
-
-	NV_DEBUG(dev, "ch%d\n", chan->id);
-
-	/* Base amount for object storage (4KiB enough?) */
-	size = 0x2000;
-	base = 0;
-
-	if (dev_priv->card_type == NV_50) {
-		/* Various fixed table thingos */
-		size += 0x1400; /* mostly unknown stuff */
-		size += 0x4000; /* vm pd */
-		base  = 0x6000;
-		/* RAMHT, not sure about setting size yet, 32KiB to be safe */
-		size += 0x8000;
-		/* RAMFC */
-		size += 0x1000;
-	}
-
-	ret = nouveau_gpuobj_new(dev, NULL, size, 0x1000, 0, &chan->ramin);
-	if (ret) {
-		NV_ERROR(dev, "Error allocating channel PRAMIN: %d\n", ret);
-		return ret;
-	}
-
-	ret = drm_mm_init(&chan->ramin_heap, base, size - base);
-	if (ret) {
-		NV_ERROR(dev, "Error creating PRAMIN heap: %d\n", ret);
-		nouveau_gpuobj_ref(NULL, &chan->ramin);
-		return ret;
-	}
-
-	return 0;
-}
-
-static int
-nvc0_gpuobj_channel_init(struct nouveau_channel *chan, struct nouveau_vm *vm)
-{
-	struct drm_device *dev = chan->dev;
-	struct nouveau_gpuobj *pgd = NULL;
-	struct nouveau_vm_pgd *vpgd;
-	int ret;
-
-	ret = nouveau_gpuobj_new(dev, NULL, 4096, 0x1000, 0, &chan->ramin);
-	if (ret)
-		return ret;
-
-	/* create page directory for this vm if none currently exists,
-	 * will be destroyed automagically when last reference to the
-	 * vm is removed
-	 */
-	if (list_empty(&vm->pgd_list)) {
-		ret = nouveau_gpuobj_new(dev, NULL, 65536, 0x1000, 0, &pgd);
-		if (ret)
-			return ret;
-	}
-	nouveau_vm_ref(vm, &chan->vm, pgd);
-	nouveau_gpuobj_ref(NULL, &pgd);
-
-	/* point channel at vm's page directory */
-	vpgd = list_first_entry(&vm->pgd_list, struct nouveau_vm_pgd, head);
-	nv_wo32(chan->ramin, 0x0200, lower_32_bits(vpgd->obj->vinst));
-	nv_wo32(chan->ramin, 0x0204, upper_32_bits(vpgd->obj->vinst));
-	nv_wo32(chan->ramin, 0x0208, 0xffffffff);
-	nv_wo32(chan->ramin, 0x020c, 0x000000ff);
-
-	return 0;
-}
-
-int
-nouveau_gpuobj_channel_init(struct nouveau_channel *chan,
-			    uint32_t vram_h, uint32_t tt_h)
-{
-	struct drm_device *dev = chan->dev;
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct nouveau_fpriv *fpriv = nouveau_fpriv(chan->file_priv);
-	struct nouveau_vm *vm = fpriv ? fpriv->vm : dev_priv->chan_vm;
-	struct nouveau_gpuobj *vram = NULL, *tt = NULL;
-	int ret;
-
-	NV_DEBUG(dev, "ch%d vram=0x%08x tt=0x%08x\n", chan->id, vram_h, tt_h);
-	if (dev_priv->card_type >= NV_C0)
-		return nvc0_gpuobj_channel_init(chan, vm);
-
-	/* Allocate a chunk of memory for per-channel object storage */
-	ret = nouveau_gpuobj_channel_init_pramin(chan);
-	if (ret) {
-		NV_ERROR(dev, "init pramin\n");
-		return ret;
-	}
-
-	/* NV50 VM
-	 *  - Allocate per-channel page-directory
-	 *  - Link with shared channel VM
-	 */
-	if (vm) {
-		u32 pgd_offs = (dev_priv->chipset == 0x50) ? 0x1400 : 0x0200;
-		u64 vm_vinst = chan->ramin->vinst + pgd_offs;
-		u32 vm_pinst = chan->ramin->pinst;
-
-		if (vm_pinst != ~0)
-			vm_pinst += pgd_offs;
-
-		ret = nouveau_gpuobj_new_fake(dev, vm_pinst, vm_vinst, 0x4000,
-					      0, &chan->vm_pd);
-		if (ret)
-			return ret;
-
-		nouveau_vm_ref(vm, &chan->vm, chan->vm_pd);
-	}
-
-	/* RAMHT */
-	if (dev_priv->card_type < NV_50) {
-		nouveau_ramht_ref(dev_priv->ramht, &chan->ramht, NULL);
-	} else {
-		struct nouveau_gpuobj *ramht = NULL;
-
-		ret = nouveau_gpuobj_new(dev, chan, 0x8000, 16,
-					 NVOBJ_FLAG_ZERO_ALLOC, &ramht);
-		if (ret)
-			return ret;
-
-		ret = nouveau_ramht_new(dev, ramht, &chan->ramht);
-		nouveau_gpuobj_ref(NULL, &ramht);
-		if (ret)
-			return ret;
-	}
-
-	/* VRAM ctxdma */
-	if (dev_priv->card_type >= NV_50) {
-		ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY,
-					     0, (1ULL << 40), NV_MEM_ACCESS_RW,
-					     NV_MEM_TARGET_VM, &vram);
-		if (ret) {
-			NV_ERROR(dev, "Error creating VRAM ctxdma: %d\n", ret);
-			return ret;
-		}
-	} else {
-		ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY,
-					     0, dev_priv->fb_available_size,
-					     NV_MEM_ACCESS_RW,
-					     NV_MEM_TARGET_VRAM, &vram);
-		if (ret) {
-			NV_ERROR(dev, "Error creating VRAM ctxdma: %d\n", ret);
-			return ret;
-		}
-	}
-
-	ret = nouveau_ramht_insert(chan, vram_h, vram);
-	nouveau_gpuobj_ref(NULL, &vram);
-	if (ret) {
-		NV_ERROR(dev, "Error adding VRAM ctxdma to RAMHT: %d\n", ret);
-		return ret;
-	}
-
-	/* TT memory ctxdma */
-	if (dev_priv->card_type >= NV_50) {
-		ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY,
-					     0, (1ULL << 40), NV_MEM_ACCESS_RW,
-					     NV_MEM_TARGET_VM, &tt);
-	} else {
-		ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY,
-					     0, dev_priv->gart_info.aper_size,
-					     NV_MEM_ACCESS_RW,
-					     NV_MEM_TARGET_GART, &tt);
-	}
-
-	if (ret) {
-		NV_ERROR(dev, "Error creating TT ctxdma: %d\n", ret);
-		return ret;
-	}
-
-	ret = nouveau_ramht_insert(chan, tt_h, tt);
-	nouveau_gpuobj_ref(NULL, &tt);
-	if (ret) {
-		NV_ERROR(dev, "Error adding TT ctxdma to RAMHT: %d\n", ret);
-		return ret;
-	}
-
-	return 0;
-}
-
-void
-nouveau_gpuobj_channel_takedown(struct nouveau_channel *chan)
-{
-	NV_DEBUG(chan->dev, "ch%d\n", chan->id);
-
-	nouveau_vm_ref(NULL, &chan->vm, chan->vm_pd);
-	nouveau_gpuobj_ref(NULL, &chan->vm_pd);
-
-	if (drm_mm_initialized(&chan->ramin_heap))
-		drm_mm_takedown(&chan->ramin_heap);
-	nouveau_gpuobj_ref(NULL, &chan->ramin);
-}
-
-int
-nouveau_gpuobj_suspend(struct drm_device *dev)
-{
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct nouveau_gpuobj *gpuobj;
-	int i;
-
-	list_for_each_entry(gpuobj, &dev_priv->gpuobj_list, list) {
-		if (gpuobj->cinst != NVOBJ_CINST_GLOBAL)
-			continue;
-
-		gpuobj->suspend = vmalloc(gpuobj->size);
-		if (!gpuobj->suspend) {
-			nouveau_gpuobj_resume(dev);
-			return -ENOMEM;
-		}
-
-		for (i = 0; i < gpuobj->size; i += 4)
-			gpuobj->suspend[i/4] = nv_ro32(gpuobj, i);
-	}
-
-	return 0;
-}
-
-void
-nouveau_gpuobj_resume(struct drm_device *dev)
-{
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct nouveau_gpuobj *gpuobj;
-	int i;
-
-	list_for_each_entry(gpuobj, &dev_priv->gpuobj_list, list) {
-		if (!gpuobj->suspend)
-			continue;
-
-		for (i = 0; i < gpuobj->size; i += 4)
-			nv_wo32(gpuobj, i, gpuobj->suspend[i/4]);
-
-		vfree(gpuobj->suspend);
-		gpuobj->suspend = NULL;
-	}
-
-	dev_priv->engine.instmem.flush(dev);
-}
-
-u32
-nv_ro32(struct nouveau_gpuobj *gpuobj, u32 offset)
-{
-	struct drm_nouveau_private *dev_priv = gpuobj->dev->dev_private;
-	struct drm_device *dev = gpuobj->dev;
-	unsigned long flags;
-
-	if (gpuobj->pinst == ~0 || !dev_priv->ramin_available) {
-		u64  ptr = gpuobj->vinst + offset;
-		u32 base = ptr >> 16;
-		u32  val;
-
-		spin_lock_irqsave(&dev_priv->vm_lock, flags);
-		if (dev_priv->ramin_base != base) {
-			dev_priv->ramin_base = base;
-			nv_wr32(dev, 0x001700, dev_priv->ramin_base);
-		}
-		val = nv_rd32(dev, 0x700000 + (ptr & 0xffff));
-		spin_unlock_irqrestore(&dev_priv->vm_lock, flags);
-		return val;
-	}
-
-	return nv_ri32(dev, gpuobj->pinst + offset);
-}
-
-void
-nv_wo32(struct nouveau_gpuobj *gpuobj, u32 offset, u32 val)
-{
-	struct drm_nouveau_private *dev_priv = gpuobj->dev->dev_private;
-	struct drm_device *dev = gpuobj->dev;
-	unsigned long flags;
-
-	if (gpuobj->pinst == ~0 || !dev_priv->ramin_available) {
-		u64  ptr = gpuobj->vinst + offset;
-		u32 base = ptr >> 16;
-
-		spin_lock_irqsave(&dev_priv->vm_lock, flags);
-		if (dev_priv->ramin_base != base) {
-			dev_priv->ramin_base = base;
-			nv_wr32(dev, 0x001700, dev_priv->ramin_base);
-		}
-		nv_wr32(dev, 0x700000 + (ptr & 0xffff), val);
-		spin_unlock_irqrestore(&dev_priv->vm_lock, flags);
-		return;
-	}
-
-	nv_wi32(dev, gpuobj->pinst + offset, val);
-}
diff --git a/drivers/gpu/drm/nouveau/nouveau_hdmi.c b/drivers/gpu/drm/nouveau/nouveau_hdmi.c
index 1e942cfb9644..2c672cebc889 100644
--- a/drivers/gpu/drm/nouveau/nouveau_hdmi.c
+++ b/drivers/gpu/drm/nouveau/nouveau_hdmi.c
@@ -23,7 +23,7 @@
  */
 
 #include <drm/drmP.h>
-#include "nouveau_drv.h"
+#include "nouveau_drm.h"
 #include "nouveau_connector.h"
 #include "nouveau_encoder.h"
 #include "nouveau_crtc.h"
@@ -31,10 +31,10 @@
 static bool
 hdmi_sor(struct drm_encoder *encoder)
 {
-	struct drm_nouveau_private *dev_priv = encoder->dev->dev_private;
-	if (dev_priv->chipset <  0xa3 ||
-	    dev_priv->chipset == 0xaa ||
-	    dev_priv->chipset == 0xac)
+	struct nouveau_drm *drm = nouveau_drm(encoder->dev);
+	if (nv_device(drm->device)->chipset <  0xa3 ||
+	    nv_device(drm->device)->chipset == 0xaa ||
+	    nv_device(drm->device)->chipset == 0xac)
 		return false;
 	return true;
 }
@@ -52,13 +52,15 @@ hdmi_base(struct drm_encoder *encoder)
 static void
 hdmi_wr32(struct drm_encoder *encoder, u32 reg, u32 val)
 {
-	nv_wr32(encoder->dev, hdmi_base(encoder) + reg, val);
+	struct nouveau_device *device = nouveau_dev(encoder->dev);
+	nv_wr32(device, hdmi_base(encoder) + reg, val);
 }
 
 static u32
 hdmi_rd32(struct drm_encoder *encoder, u32 reg)
 {
-	return nv_rd32(encoder->dev, hdmi_base(encoder) + reg);
+	struct nouveau_device *device = nouveau_dev(encoder->dev);
+	return nv_rd32(device, hdmi_base(encoder) + reg);
 }
 
 static u32
@@ -73,12 +75,11 @@ static void
 nouveau_audio_disconnect(struct drm_encoder *encoder)
 {
 	struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
-	struct drm_device *dev = encoder->dev;
+	struct nouveau_device *device = nouveau_dev(encoder->dev);
 	u32 or = nv_encoder->or * 0x800;
 
-	if (hdmi_sor(encoder)) {
-		nv_mask(dev, 0x61c448 + or, 0x00000003, 0x00000000);
-	}
+	if (hdmi_sor(encoder))
+		nv_mask(device, 0x61c448 + or, 0x00000003, 0x00000000);
 }
 
 static void
@@ -86,8 +87,8 @@ nouveau_audio_mode_set(struct drm_encoder *encoder,
 		       struct drm_display_mode *mode)
 {
 	struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+	struct nouveau_device *device = nouveau_dev(encoder->dev);
 	struct nouveau_connector *nv_connector;
-	struct drm_device *dev = encoder->dev;
 	u32 or = nv_encoder->or * 0x800;
 	int i;
 
@@ -98,16 +99,16 @@ nouveau_audio_mode_set(struct drm_encoder *encoder,
 	}
 
 	if (hdmi_sor(encoder)) {
-		nv_mask(dev, 0x61c448 + or, 0x00000001, 0x00000001);
+		nv_mask(device, 0x61c448 + or, 0x00000001, 0x00000001);
 
 		drm_edid_to_eld(&nv_connector->base, nv_connector->edid);
 		if (nv_connector->base.eld[0]) {
 			u8 *eld = nv_connector->base.eld;
 			for (i = 0; i < eld[2] * 4; i++)
-				nv_wr32(dev, 0x61c440 + or, (i << 8) | eld[i]);
+				nv_wr32(device, 0x61c440 + or, (i << 8) | eld[i]);
 			for (i = eld[2] * 4; i < 0x60; i++)
-				nv_wr32(dev, 0x61c440 + or, (i << 8) | 0x00);
-			nv_mask(dev, 0x61c448 + or, 0x00000002, 0x00000002);
+				nv_wr32(device, 0x61c440 + or, (i << 8) | 0x00);
+			nv_mask(device, 0x61c448 + or, 0x00000002, 0x00000002);
 		}
 	}
 }
@@ -219,9 +220,9 @@ void
 nouveau_hdmi_mode_set(struct drm_encoder *encoder,
 		      struct drm_display_mode *mode)
 {
+	struct nouveau_device *device = nouveau_dev(encoder->dev);
 	struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
 	struct nouveau_connector *nv_connector;
-	struct drm_device *dev = encoder->dev;
 	u32 max_ac_packet, rekey;
 
 	nv_connector = nouveau_encoder_connector_get(nv_encoder);
@@ -238,9 +239,9 @@ nouveau_hdmi_mode_set(struct drm_encoder *encoder,
 	hdmi_mask(encoder, 0x068, 0x00010101, 0x00000000); /* ACR_CTRL, ?? */
 	hdmi_mask(encoder, 0x078, 0x80000000, 0x80000000); /* ACR_0441_ENABLE */
 
-	nv_mask(dev, 0x61733c, 0x00100000, 0x00100000); /* RESETF */
-	nv_mask(dev, 0x61733c, 0x10000000, 0x10000000); /* LOOKUP_EN */
-	nv_mask(dev, 0x61733c, 0x00100000, 0x00000000); /* !RESETF */
+	nv_mask(device, 0x61733c, 0x00100000, 0x00100000); /* RESETF */
+	nv_mask(device, 0x61733c, 0x10000000, 0x10000000); /* LOOKUP_EN */
+	nv_mask(device, 0x61733c, 0x00100000, 0x00000000); /* !RESETF */
 
 	/* value matches nvidia binary driver, and tegra constant */
 	rekey = 56;
diff --git a/drivers/gpu/drm/nouveau/nouveau_hw.c b/drivers/gpu/drm/nouveau/nouveau_hw.c
index 6eabc2ea0c7d..617a06ffdb46 100644
--- a/drivers/gpu/drm/nouveau/nouveau_hw.c
+++ b/drivers/gpu/drm/nouveau/nouveau_hw.c
@@ -23,9 +23,13 @@
  */
 
 #include <drm/drmP.h>
-#include "nouveau_drv.h"
+#include "nouveau_drm.h"
 #include "nouveau_hw.h"
 
+#include <subdev/bios/pll.h>
+#include <subdev/clock.h>
+#include <subdev/timer.h>
+
 #define CHIPSET_NFORCE 0x01a0
 #define CHIPSET_NFORCE2 0x01f0
 
@@ -82,12 +86,12 @@ NVReadVgaGr(struct drm_device *dev, int head, uint8_t index)
 void
 NVSetOwner(struct drm_device *dev, int owner)
 {
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_drm *drm = nouveau_drm(dev);
 
 	if (owner == 1)
 		owner *= 3;
 
-	if (dev_priv->chipset == 0x11) {
+	if (nv_device(drm->device)->chipset == 0x11) {
 		/* This might seem stupid, but the blob does it and
 		 * omitting it often locks the system up.
 		 */
@@ -98,7 +102,7 @@ NVSetOwner(struct drm_device *dev, int owner)
 	/* CR44 is always changed on CRTC0 */
 	NVWriteVgaCrtc(dev, 0, NV_CIO_CRE_44, owner);
 
-	if (dev_priv->chipset == 0x11) {	/* set me harder */
+	if (nv_device(drm->device)->chipset == 0x11) {	/* set me harder */
 		NVWriteVgaCrtc(dev, 0, NV_CIO_CRE_2E, owner);
 		NVWriteVgaCrtc(dev, 0, NV_CIO_CRE_2E, owner);
 	}
@@ -123,270 +127,6 @@ NVBlankScreen(struct drm_device *dev, int head, bool blank)
 }
 
 /*
- * PLL setting
- */
-
-static int
-powerctrl_1_shift(int chip_version, int reg)
-{
-	int shift = -4;
-
-	if (chip_version < 0x17 || chip_version == 0x1a || chip_version == 0x20)
-		return shift;
-
-	switch (reg) {
-	case NV_RAMDAC_VPLL2:
-		shift += 4;
-	case NV_PRAMDAC_VPLL_COEFF:
-		shift += 4;
-	case NV_PRAMDAC_MPLL_COEFF:
-		shift += 4;
-	case NV_PRAMDAC_NVPLL_COEFF:
-		shift += 4;
-	}
-
-	/*
-	 * the shift for vpll regs is only used for nv3x chips with a single
-	 * stage pll
-	 */
-	if (shift > 4 && (chip_version < 0x32 || chip_version == 0x35 ||
-			  chip_version == 0x36 || chip_version >= 0x40))
-		shift = -4;
-
-	return shift;
-}
-
-static void
-setPLL_single(struct drm_device *dev, uint32_t reg, struct nouveau_pll_vals *pv)
-{
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	int chip_version = dev_priv->vbios.chip_version;
-	uint32_t oldpll = NVReadRAMDAC(dev, 0, reg);
-	int oldN = (oldpll >> 8) & 0xff, oldM = oldpll & 0xff;
-	uint32_t pll = (oldpll & 0xfff80000) | pv->log2P << 16 | pv->NM1;
-	uint32_t saved_powerctrl_1 = 0;
-	int shift_powerctrl_1 = powerctrl_1_shift(chip_version, reg);
-
-	if (oldpll == pll)
-		return;	/* already set */
-
-	if (shift_powerctrl_1 >= 0) {
-		saved_powerctrl_1 = nvReadMC(dev, NV_PBUS_POWERCTRL_1);
-		nvWriteMC(dev, NV_PBUS_POWERCTRL_1,
-			(saved_powerctrl_1 & ~(0xf << shift_powerctrl_1)) |
-			1 << shift_powerctrl_1);
-	}
-
-	if (oldM && pv->M1 && (oldN / oldM < pv->N1 / pv->M1))
-		/* upclock -- write new post divider first */
-		NVWriteRAMDAC(dev, 0, reg, pv->log2P << 16 | (oldpll & 0xffff));
-	else
-		/* downclock -- write new NM first */
-		NVWriteRAMDAC(dev, 0, reg, (oldpll & 0xffff0000) | pv->NM1);
-
-	if (chip_version < 0x17 && chip_version != 0x11)
-		/* wait a bit on older chips */
-		msleep(64);
-	NVReadRAMDAC(dev, 0, reg);
-
-	/* then write the other half as well */
-	NVWriteRAMDAC(dev, 0, reg, pll);
-
-	if (shift_powerctrl_1 >= 0)
-		nvWriteMC(dev, NV_PBUS_POWERCTRL_1, saved_powerctrl_1);
-}
-
-static uint32_t
-new_ramdac580(uint32_t reg1, bool ss, uint32_t ramdac580)
-{
-	bool head_a = (reg1 == NV_PRAMDAC_VPLL_COEFF);
-
-	if (ss)	/* single stage pll mode */
-		ramdac580 |= head_a ? NV_RAMDAC_580_VPLL1_ACTIVE :
-				      NV_RAMDAC_580_VPLL2_ACTIVE;
-	else
-		ramdac580 &= head_a ? ~NV_RAMDAC_580_VPLL1_ACTIVE :
-				      ~NV_RAMDAC_580_VPLL2_ACTIVE;
-
-	return ramdac580;
-}
-
-static void
-setPLL_double_highregs(struct drm_device *dev, uint32_t reg1,
-		       struct nouveau_pll_vals *pv)
-{
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	int chip_version = dev_priv->vbios.chip_version;
-	bool nv3035 = chip_version == 0x30 || chip_version == 0x35;
-	uint32_t reg2 = reg1 + ((reg1 == NV_RAMDAC_VPLL2) ? 0x5c : 0x70);
-	uint32_t oldpll1 = NVReadRAMDAC(dev, 0, reg1);
-	uint32_t oldpll2 = !nv3035 ? NVReadRAMDAC(dev, 0, reg2) : 0;
-	uint32_t pll1 = (oldpll1 & 0xfff80000) | pv->log2P << 16 | pv->NM1;
-	uint32_t pll2 = (oldpll2 & 0x7fff0000) | 1 << 31 | pv->NM2;
-	uint32_t oldramdac580 = 0, ramdac580 = 0;
-	bool single_stage = !pv->NM2 || pv->N2 == pv->M2;	/* nv41+ only */
-	uint32_t saved_powerctrl_1 = 0, savedc040 = 0;
-	int shift_powerctrl_1 = powerctrl_1_shift(chip_version, reg1);
-
-	/* model specific additions to generic pll1 and pll2 set up above */
-	if (nv3035) {
-		pll1 = (pll1 & 0xfcc7ffff) | (pv->N2 & 0x18) << 21 |
-		       (pv->N2 & 0x7) << 19 | 8 << 4 | (pv->M2 & 7) << 4;
-		pll2 = 0;
-	}
-	if (chip_version > 0x40 && reg1 >= NV_PRAMDAC_VPLL_COEFF) { /* !nv40 */
-		oldramdac580 = NVReadRAMDAC(dev, 0, NV_PRAMDAC_580);
-		ramdac580 = new_ramdac580(reg1, single_stage, oldramdac580);
-		if (oldramdac580 != ramdac580)
-			oldpll1 = ~0;	/* force mismatch */
-		if (single_stage)
-			/* magic value used by nvidia in single stage mode */
-			pll2 |= 0x011f;
-	}
-	if (chip_version > 0x70)
-		/* magic bits set by the blob (but not the bios) on g71-73 */
-		pll1 = (pll1 & 0x7fffffff) | (single_stage ? 0x4 : 0xc) << 28;
-
-	if (oldpll1 == pll1 && oldpll2 == pll2)
-		return;	/* already set */
-
-	if (shift_powerctrl_1 >= 0) {
-		saved_powerctrl_1 = nvReadMC(dev, NV_PBUS_POWERCTRL_1);
-		nvWriteMC(dev, NV_PBUS_POWERCTRL_1,
-			(saved_powerctrl_1 & ~(0xf << shift_powerctrl_1)) |
-			1 << shift_powerctrl_1);
-	}
-
-	if (chip_version >= 0x40) {
-		int shift_c040 = 14;
-
-		switch (reg1) {
-		case NV_PRAMDAC_MPLL_COEFF:
-			shift_c040 += 2;
-		case NV_PRAMDAC_NVPLL_COEFF:
-			shift_c040 += 2;
-		case NV_RAMDAC_VPLL2:
-			shift_c040 += 2;
-		case NV_PRAMDAC_VPLL_COEFF:
-			shift_c040 += 2;
-		}
-
-		savedc040 = nvReadMC(dev, 0xc040);
-		if (shift_c040 != 14)
-			nvWriteMC(dev, 0xc040, savedc040 & ~(3 << shift_c040));
-	}
-
-	if (oldramdac580 != ramdac580)
-		NVWriteRAMDAC(dev, 0, NV_PRAMDAC_580, ramdac580);
-
-	if (!nv3035)
-		NVWriteRAMDAC(dev, 0, reg2, pll2);
-	NVWriteRAMDAC(dev, 0, reg1, pll1);
-
-	if (shift_powerctrl_1 >= 0)
-		nvWriteMC(dev, NV_PBUS_POWERCTRL_1, saved_powerctrl_1);
-	if (chip_version >= 0x40)
-		nvWriteMC(dev, 0xc040, savedc040);
-}
-
-static void
-setPLL_double_lowregs(struct drm_device *dev, uint32_t NMNMreg,
-		      struct nouveau_pll_vals *pv)
-{
-	/* When setting PLLs, there is a merry game of disabling and enabling
-	 * various bits of hardware during the process. This function is a
-	 * synthesis of six nv4x traces, nearly each card doing a subtly
-	 * different thing. With luck all the necessary bits for each card are
-	 * combined herein. Without luck it deviates from each card's formula
-	 * so as to not work on any :)
-	 */
-
-	uint32_t Preg = NMNMreg - 4;
-	bool mpll = Preg == 0x4020;
-	uint32_t oldPval = nvReadMC(dev, Preg);
-	uint32_t NMNM = pv->NM2 << 16 | pv->NM1;
-	uint32_t Pval = (oldPval & (mpll ? ~(0x77 << 16) : ~(7 << 16))) |
-			0xc << 28 | pv->log2P << 16;
-	uint32_t saved4600 = 0;
-	/* some cards have different maskc040s */
-	uint32_t maskc040 = ~(3 << 14), savedc040;
-	bool single_stage = !pv->NM2 || pv->N2 == pv->M2;
-
-	if (nvReadMC(dev, NMNMreg) == NMNM && (oldPval & 0xc0070000) == Pval)
-		return;
-
-	if (Preg == 0x4000)
-		maskc040 = ~0x333;
-	if (Preg == 0x4058)
-		maskc040 = ~(0xc << 24);
-
-	if (mpll) {
-		struct pll_lims pll_lim;
-		uint8_t Pval2;
-
-		if (get_pll_limits(dev, Preg, &pll_lim))
-			return;
-
-		Pval2 = pv->log2P + pll_lim.log2p_bias;
-		if (Pval2 > pll_lim.max_log2p)
-			Pval2 = pll_lim.max_log2p;
-		Pval |= 1 << 28 | Pval2 << 20;
-
-		saved4600 = nvReadMC(dev, 0x4600);
-		nvWriteMC(dev, 0x4600, saved4600 | 8 << 28);
-	}
-	if (single_stage)
-		Pval |= mpll ? 1 << 12 : 1 << 8;
-
-	nvWriteMC(dev, Preg, oldPval | 1 << 28);
-	nvWriteMC(dev, Preg, Pval & ~(4 << 28));
-	if (mpll) {
-		Pval |= 8 << 20;
-		nvWriteMC(dev, 0x4020, Pval & ~(0xc << 28));
-		nvWriteMC(dev, 0x4038, Pval & ~(0xc << 28));
-	}
-
-	savedc040 = nvReadMC(dev, 0xc040);
-	nvWriteMC(dev, 0xc040, savedc040 & maskc040);
-
-	nvWriteMC(dev, NMNMreg, NMNM);
-	if (NMNMreg == 0x4024)
-		nvWriteMC(dev, 0x403c, NMNM);
-
-	nvWriteMC(dev, Preg, Pval);
-	if (mpll) {
-		Pval &= ~(8 << 20);
-		nvWriteMC(dev, 0x4020, Pval);
-		nvWriteMC(dev, 0x4038, Pval);
-		nvWriteMC(dev, 0x4600, saved4600);
-	}
-
-	nvWriteMC(dev, 0xc040, savedc040);
-
-	if (mpll) {
-		nvWriteMC(dev, 0x4020, Pval & ~(1 << 28));
-		nvWriteMC(dev, 0x4038, Pval & ~(1 << 28));
-	}
-}
-
-void
-nouveau_hw_setpll(struct drm_device *dev, uint32_t reg1,
-		  struct nouveau_pll_vals *pv)
-{
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	int cv = dev_priv->vbios.chip_version;
-
-	if (cv == 0x30 || cv == 0x31 || cv == 0x35 || cv == 0x36 ||
-	    cv >= 0x40) {
-		if (reg1 > 0x405c)
-			setPLL_double_highregs(dev, reg1, pv);
-		else
-			setPLL_double_lowregs(dev, reg1, pv);
-	} else
-		setPLL_single(dev, reg1, pv);
-}
-
-/*
  * PLL getting
  */
 
@@ -394,7 +134,7 @@ static void
 nouveau_hw_decode_pll(struct drm_device *dev, uint32_t reg1, uint32_t pll1,
 		      uint32_t pll2, struct nouveau_pll_vals *pllvals)
 {
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_drm *drm = nouveau_drm(dev);
 
 	/* to force parsing as single stage (i.e. nv40 vplls) pass pll2 as 0 */
 
@@ -411,7 +151,7 @@ nouveau_hw_decode_pll(struct drm_device *dev, uint32_t reg1, uint32_t pll1,
 		pllvals->NM1 = pll1 & 0xffff;
 		if (nv_two_reg_pll(dev) && pll2 & NV31_RAMDAC_ENABLE_VCO2)
 			pllvals->NM2 = pll2 & 0xffff;
-		else if (dev_priv->chipset == 0x30 || dev_priv->chipset == 0x35) {
+		else if (nv_device(drm->device)->chipset == 0x30 || nv_device(drm->device)->chipset == 0x35) {
 			pllvals->M1 &= 0xf; /* only 4 bits */
 			if (pll1 & NV30_RAMDAC_ENABLE_VCO2) {
 				pllvals->M2 = (pll1 >> 4) & 0x7;
@@ -423,28 +163,30 @@ nouveau_hw_decode_pll(struct drm_device *dev, uint32_t reg1, uint32_t pll1,
 }
 
 int
-nouveau_hw_get_pllvals(struct drm_device *dev, enum pll_types plltype,
+nouveau_hw_get_pllvals(struct drm_device *dev, enum nvbios_pll_type plltype,
 		       struct nouveau_pll_vals *pllvals)
 {
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	uint32_t reg1 = get_pll_register(dev, plltype), pll1, pll2 = 0;
-	struct pll_lims pll_lim;
+	struct nouveau_drm *drm = nouveau_drm(dev);
+	struct nouveau_device *device = nv_device(drm->device);
+	struct nouveau_bios *bios = nouveau_bios(device);
+	uint32_t reg1, pll1, pll2 = 0;
+	struct nvbios_pll pll_lim;
 	int ret;
 
-	if (reg1 == 0)
+	ret = nvbios_pll_parse(bios, plltype, &pll_lim);
+	if (ret || !(reg1 = pll_lim.reg))
 		return -ENOENT;
 
-	pll1 = nvReadMC(dev, reg1);
-
+	pll1 = nv_rd32(device, reg1);
 	if (reg1 <= 0x405c)
-		pll2 = nvReadMC(dev, reg1 + 4);
+		pll2 = nv_rd32(device, reg1 + 4);
 	else if (nv_two_reg_pll(dev)) {
 		uint32_t reg2 = reg1 + (reg1 == NV_RAMDAC_VPLL2 ? 0x5c : 0x70);
 
-		pll2 = nvReadMC(dev, reg2);
+		pll2 = nv_rd32(device, reg2);
 	}
 
-	if (dev_priv->card_type == 0x40 && reg1 >= NV_PRAMDAC_VPLL_COEFF) {
+	if (nv_device(drm->device)->card_type == 0x40 && reg1 >= NV_PRAMDAC_VPLL_COEFF) {
 		uint32_t ramdac580 = NVReadRAMDAC(dev, 0, NV_PRAMDAC_580);
 
 		/* check whether vpll has been forced into single stage mode */
@@ -457,13 +199,7 @@ nouveau_hw_get_pllvals(struct drm_device *dev, enum pll_types plltype,
 	}
 
 	nouveau_hw_decode_pll(dev, reg1, pll1, pll2, pllvals);
-
-	ret = get_pll_limits(dev, plltype, &pll_lim);
-	if (ret)
-		return ret;
-
 	pllvals->refclk = pll_lim.refclk;
-
 	return 0;
 }
 
@@ -478,7 +214,7 @@ nouveau_hw_pllvals_to_clk(struct nouveau_pll_vals *pv)
 }
 
 int
-nouveau_hw_get_clock(struct drm_device *dev, enum pll_types plltype)
+nouveau_hw_get_clock(struct drm_device *dev, enum nvbios_pll_type plltype)
 {
 	struct nouveau_pll_vals pllvals;
 	int ret;
@@ -517,26 +253,30 @@ nouveau_hw_fix_bad_vpll(struct drm_device *dev, int head)
 	 * when such a condition detected.  only seen on nv11 to date
 	 */
 
-	struct pll_lims pll_lim;
+	struct nouveau_drm *drm = nouveau_drm(dev);
+	struct nouveau_device *device = nv_device(drm->device);
+	struct nouveau_clock *clk = nouveau_clock(device);
+	struct nouveau_bios *bios = nouveau_bios(device);
+	struct nvbios_pll pll_lim;
 	struct nouveau_pll_vals pv;
-	enum pll_types pll = head ? PLL_VPLL1 : PLL_VPLL0;
+	enum nvbios_pll_type pll = head ? PLL_VPLL1 : PLL_VPLL0;
 
-	if (get_pll_limits(dev, pll, &pll_lim))
+	if (nvbios_pll_parse(bios, pll, &pll_lim))
 		return;
 	nouveau_hw_get_pllvals(dev, pll, &pv);
 
 	if (pv.M1 >= pll_lim.vco1.min_m && pv.M1 <= pll_lim.vco1.max_m &&
 	    pv.N1 >= pll_lim.vco1.min_n && pv.N1 <= pll_lim.vco1.max_n &&
-	    pv.log2P <= pll_lim.max_log2p)
+	    pv.log2P <= pll_lim.max_p)
 		return;
 
-	NV_WARN(dev, "VPLL %d outwith limits, attempting to fix\n", head + 1);
+	NV_WARN(drm, "VPLL %d outwith limits, attempting to fix\n", head + 1);
 
 	/* set lowest clock within static limits */
 	pv.M1 = pll_lim.vco1.max_m;
 	pv.N1 = pll_lim.vco1.min_n;
-	pv.log2P = pll_lim.max_usable_log2p;
-	nouveau_hw_setpll(dev, pll_lim.reg, &pv);
+	pv.log2P = pll_lim.max_p_usable;
+	clk->pll_prog(clk, pll_lim.reg, &pv);
 }
 
 /*
@@ -547,17 +287,16 @@ static void nouveau_vga_font_io(struct drm_device *dev,
 				void __iomem *iovram,
 				bool save, unsigned plane)
 {
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
 	unsigned i;
 
 	NVWriteVgaSeq(dev, 0, NV_VIO_SR_PLANE_MASK_INDEX, 1 << plane);
 	NVWriteVgaGr(dev, 0, NV_VIO_GX_READ_MAP_INDEX, plane);
 	for (i = 0; i < 16384; i++) {
 		if (save) {
-			dev_priv->saved_vga_font[plane][i] =
+			nv04_display(dev)->saved_vga_font[plane][i] =
 					ioread32_native(iovram + i * 4);
 		} else {
-			iowrite32_native(dev_priv->saved_vga_font[plane][i],
+			iowrite32_native(nv04_display(dev)->saved_vga_font[plane][i],
 							iovram + i * 4);
 		}
 	}
@@ -566,6 +305,7 @@ static void nouveau_vga_font_io(struct drm_device *dev,
 void
 nouveau_hw_save_vga_fonts(struct drm_device *dev, bool save)
 {
+	struct nouveau_drm *drm = nouveau_drm(dev);
 	uint8_t misc, gr4, gr5, gr6, seq2, seq4;
 	bool graphicsmode;
 	unsigned plane;
@@ -581,12 +321,12 @@ nouveau_hw_save_vga_fonts(struct drm_device *dev, bool save)
 	if (graphicsmode) /* graphics mode => framebuffer => no need to save */
 		return;
 
-	NV_INFO(dev, "%sing VGA fonts\n", save ? "Sav" : "Restor");
+	NV_INFO(drm, "%sing VGA fonts\n", save ? "Sav" : "Restor");
 
 	/* map first 64KiB of VRAM, holds VGA fonts etc */
 	iovram = ioremap(pci_resource_start(dev->pdev, 1), 65536);
 	if (!iovram) {
-		NV_ERROR(dev, "Failed to map VRAM, "
+		NV_ERROR(drm, "Failed to map VRAM, "
 					"cannot save/restore VGA fonts.\n");
 		return;
 	}
@@ -649,25 +389,25 @@ static void
 nv_save_state_ramdac(struct drm_device *dev, int head,
 		     struct nv04_mode_state *state)
 {
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_drm *drm = nouveau_drm(dev);
 	struct nv04_crtc_reg *regp = &state->crtc_reg[head];
 	int i;
 
-	if (dev_priv->card_type >= NV_10)
+	if (nv_device(drm->device)->card_type >= NV_10)
 		regp->nv10_cursync = NVReadRAMDAC(dev, head, NV_RAMDAC_NV10_CURSYNC);
 
 	nouveau_hw_get_pllvals(dev, head ? PLL_VPLL1 : PLL_VPLL0, &regp->pllvals);
 	state->pllsel = NVReadRAMDAC(dev, 0, NV_PRAMDAC_PLL_COEFF_SELECT);
 	if (nv_two_heads(dev))
 		state->sel_clk = NVReadRAMDAC(dev, 0, NV_PRAMDAC_SEL_CLK);
-	if (dev_priv->chipset == 0x11)
+	if (nv_device(drm->device)->chipset == 0x11)
 		regp->dither = NVReadRAMDAC(dev, head, NV_RAMDAC_DITHER_NV11);
 
 	regp->ramdac_gen_ctrl = NVReadRAMDAC(dev, head, NV_PRAMDAC_GENERAL_CONTROL);
 
 	if (nv_gf4_disp_arch(dev))
 		regp->ramdac_630 = NVReadRAMDAC(dev, head, NV_PRAMDAC_630);
-	if (dev_priv->chipset >= 0x30)
+	if (nv_device(drm->device)->chipset >= 0x30)
 		regp->ramdac_634 = NVReadRAMDAC(dev, head, NV_PRAMDAC_634);
 
 	regp->tv_setup = NVReadRAMDAC(dev, head, NV_PRAMDAC_TV_SETUP);
@@ -709,7 +449,7 @@ nv_save_state_ramdac(struct drm_device *dev, int head,
 	if (nv_gf4_disp_arch(dev))
 		regp->ramdac_8c0 = NVReadRAMDAC(dev, head, NV_PRAMDAC_8C0);
 
-	if (dev_priv->card_type == NV_40) {
+	if (nv_device(drm->device)->card_type == NV_40) {
 		regp->ramdac_a20 = NVReadRAMDAC(dev, head, NV_PRAMDAC_A20);
 		regp->ramdac_a24 = NVReadRAMDAC(dev, head, NV_PRAMDAC_A24);
 		regp->ramdac_a34 = NVReadRAMDAC(dev, head, NV_PRAMDAC_A34);
@@ -724,26 +464,27 @@ static void
 nv_load_state_ramdac(struct drm_device *dev, int head,
 		     struct nv04_mode_state *state)
 {
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_drm *drm = nouveau_drm(dev);
+	struct nouveau_clock *clk = nouveau_clock(drm->device);
 	struct nv04_crtc_reg *regp = &state->crtc_reg[head];
 	uint32_t pllreg = head ? NV_RAMDAC_VPLL2 : NV_PRAMDAC_VPLL_COEFF;
 	int i;
 
-	if (dev_priv->card_type >= NV_10)
+	if (nv_device(drm->device)->card_type >= NV_10)
 		NVWriteRAMDAC(dev, head, NV_RAMDAC_NV10_CURSYNC, regp->nv10_cursync);
 
-	nouveau_hw_setpll(dev, pllreg, &regp->pllvals);
+	clk->pll_prog(clk, pllreg, &regp->pllvals);
 	NVWriteRAMDAC(dev, 0, NV_PRAMDAC_PLL_COEFF_SELECT, state->pllsel);
 	if (nv_two_heads(dev))
 		NVWriteRAMDAC(dev, 0, NV_PRAMDAC_SEL_CLK, state->sel_clk);
-	if (dev_priv->chipset == 0x11)
+	if (nv_device(drm->device)->chipset == 0x11)
 		NVWriteRAMDAC(dev, head, NV_RAMDAC_DITHER_NV11, regp->dither);
 
 	NVWriteRAMDAC(dev, head, NV_PRAMDAC_GENERAL_CONTROL, regp->ramdac_gen_ctrl);
 
 	if (nv_gf4_disp_arch(dev))
 		NVWriteRAMDAC(dev, head, NV_PRAMDAC_630, regp->ramdac_630);
-	if (dev_priv->chipset >= 0x30)
+	if (nv_device(drm->device)->chipset >= 0x30)
 		NVWriteRAMDAC(dev, head, NV_PRAMDAC_634, regp->ramdac_634);
 
 	NVWriteRAMDAC(dev, head, NV_PRAMDAC_TV_SETUP, regp->tv_setup);
@@ -780,7 +521,7 @@ nv_load_state_ramdac(struct drm_device *dev, int head,
 	if (nv_gf4_disp_arch(dev))
 		NVWriteRAMDAC(dev, head, NV_PRAMDAC_8C0, regp->ramdac_8c0);
 
-	if (dev_priv->card_type == NV_40) {
+	if (nv_device(drm->device)->card_type == NV_40) {
 		NVWriteRAMDAC(dev, head, NV_PRAMDAC_A20, regp->ramdac_a20);
 		NVWriteRAMDAC(dev, head, NV_PRAMDAC_A24, regp->ramdac_a24);
 		NVWriteRAMDAC(dev, head, NV_PRAMDAC_A34, regp->ramdac_a34);
@@ -845,7 +586,7 @@ static void
 nv_save_state_ext(struct drm_device *dev, int head,
 		  struct nv04_mode_state *state)
 {
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_drm *drm = nouveau_drm(dev);
 	struct nv04_crtc_reg *regp = &state->crtc_reg[head];
 	int i;
 
@@ -861,10 +602,10 @@ nv_save_state_ext(struct drm_device *dev, int head,
 	rd_cio_state(dev, head, regp, NV_CIO_CRE_FFLWM__INDEX);
 	rd_cio_state(dev, head, regp, NV_CIO_CRE_21);
 
-	if (dev_priv->card_type >= NV_20)
+	if (nv_device(drm->device)->card_type >= NV_20)
 		rd_cio_state(dev, head, regp, NV_CIO_CRE_47);
 
-	if (dev_priv->card_type >= NV_30)
+	if (nv_device(drm->device)->card_type >= NV_30)
 		rd_cio_state(dev, head, regp, 0x9f);
 
 	rd_cio_state(dev, head, regp, NV_CIO_CRE_49);
@@ -873,14 +614,14 @@ nv_save_state_ext(struct drm_device *dev, int head,
 	rd_cio_state(dev, head, regp, NV_CIO_CRE_HCUR_ADDR2_INDEX);
 	rd_cio_state(dev, head, regp, NV_CIO_CRE_ILACE__INDEX);
 
-	if (dev_priv->card_type >= NV_10) {
+	if (nv_device(drm->device)->card_type >= NV_10) {
 		regp->crtc_830 = NVReadCRTC(dev, head, NV_PCRTC_830);
 		regp->crtc_834 = NVReadCRTC(dev, head, NV_PCRTC_834);
 
-		if (dev_priv->card_type >= NV_30)
+		if (nv_device(drm->device)->card_type >= NV_30)
 			regp->gpio_ext = NVReadCRTC(dev, head, NV_PCRTC_GPIO_EXT);
 
-		if (dev_priv->card_type == NV_40)
+		if (nv_device(drm->device)->card_type == NV_40)
 			regp->crtc_850 = NVReadCRTC(dev, head, NV_PCRTC_850);
 
 		if (nv_two_heads(dev))
@@ -892,7 +633,7 @@ nv_save_state_ext(struct drm_device *dev, int head,
 
 	rd_cio_state(dev, head, regp, NV_CIO_CRE_SCRATCH3__INDEX);
 	rd_cio_state(dev, head, regp, NV_CIO_CRE_SCRATCH4__INDEX);
-	if (dev_priv->card_type >= NV_10) {
+	if (nv_device(drm->device)->card_type >= NV_10) {
 		rd_cio_state(dev, head, regp, NV_CIO_CRE_EBR_INDEX);
 		rd_cio_state(dev, head, regp, NV_CIO_CRE_CSB);
 		rd_cio_state(dev, head, regp, NV_CIO_CRE_4B);
@@ -920,12 +661,14 @@ static void
 nv_load_state_ext(struct drm_device *dev, int head,
 		  struct nv04_mode_state *state)
 {
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_drm *drm = nouveau_drm(dev);
+	struct nouveau_device *device = nv_device(drm->device);
+	struct nouveau_timer *ptimer = nouveau_timer(device);
 	struct nv04_crtc_reg *regp = &state->crtc_reg[head];
 	uint32_t reg900;
 	int i;
 
-	if (dev_priv->card_type >= NV_10) {
+	if (nv_device(drm->device)->card_type >= NV_10) {
 		if (nv_two_heads(dev))
 			/* setting ENGINE_CTRL (EC) *must* come before
 			 * CIO_CRE_LCD, as writing CRE_LCD sets bits 16 & 17 in
@@ -933,24 +676,24 @@ nv_load_state_ext(struct drm_device *dev, int head,
 			 */
 			NVWriteCRTC(dev, head, NV_PCRTC_ENGINE_CTRL, regp->crtc_eng_ctrl);
 
-		nvWriteVIDEO(dev, NV_PVIDEO_STOP, 1);
-		nvWriteVIDEO(dev, NV_PVIDEO_INTR_EN, 0);
-		nvWriteVIDEO(dev, NV_PVIDEO_OFFSET_BUFF(0), 0);
-		nvWriteVIDEO(dev, NV_PVIDEO_OFFSET_BUFF(1), 0);
-		nvWriteVIDEO(dev, NV_PVIDEO_LIMIT(0), dev_priv->fb_available_size - 1);
-		nvWriteVIDEO(dev, NV_PVIDEO_LIMIT(1), dev_priv->fb_available_size - 1);
-		nvWriteVIDEO(dev, NV_PVIDEO_UVPLANE_LIMIT(0), dev_priv->fb_available_size - 1);
-		nvWriteVIDEO(dev, NV_PVIDEO_UVPLANE_LIMIT(1), dev_priv->fb_available_size - 1);
-		nvWriteMC(dev, NV_PBUS_POWERCTRL_2, 0);
+		nv_wr32(device, NV_PVIDEO_STOP, 1);
+		nv_wr32(device, NV_PVIDEO_INTR_EN, 0);
+		nv_wr32(device, NV_PVIDEO_OFFSET_BUFF(0), 0);
+		nv_wr32(device, NV_PVIDEO_OFFSET_BUFF(1), 0);
+		nv_wr32(device, NV_PVIDEO_LIMIT(0), 0); //drm->fb_available_size - 1);
+		nv_wr32(device, NV_PVIDEO_LIMIT(1), 0); //drm->fb_available_size - 1);
+		nv_wr32(device, NV_PVIDEO_UVPLANE_LIMIT(0), 0); //drm->fb_available_size - 1);
+		nv_wr32(device, NV_PVIDEO_UVPLANE_LIMIT(1), 0); //drm->fb_available_size - 1);
+		nv_wr32(device, NV_PBUS_POWERCTRL_2, 0);
 
 		NVWriteCRTC(dev, head, NV_PCRTC_CURSOR_CONFIG, regp->cursor_cfg);
 		NVWriteCRTC(dev, head, NV_PCRTC_830, regp->crtc_830);
 		NVWriteCRTC(dev, head, NV_PCRTC_834, regp->crtc_834);
 
-		if (dev_priv->card_type >= NV_30)
+		if (nv_device(drm->device)->card_type >= NV_30)
 			NVWriteCRTC(dev, head, NV_PCRTC_GPIO_EXT, regp->gpio_ext);
 
-		if (dev_priv->card_type == NV_40) {
+		if (nv_device(drm->device)->card_type == NV_40) {
 			NVWriteCRTC(dev, head, NV_PCRTC_850, regp->crtc_850);
 
 			reg900 = NVReadRAMDAC(dev, head, NV_PRAMDAC_900);
@@ -973,23 +716,23 @@ nv_load_state_ext(struct drm_device *dev, int head,
 	wr_cio_state(dev, head, regp, NV_CIO_CRE_FF_INDEX);
 	wr_cio_state(dev, head, regp, NV_CIO_CRE_FFLWM__INDEX);
 
-	if (dev_priv->card_type >= NV_20)
+	if (nv_device(drm->device)->card_type >= NV_20)
 		wr_cio_state(dev, head, regp, NV_CIO_CRE_47);
 
-	if (dev_priv->card_type >= NV_30)
+	if (nv_device(drm->device)->card_type >= NV_30)
 		wr_cio_state(dev, head, regp, 0x9f);
 
 	wr_cio_state(dev, head, regp, NV_CIO_CRE_49);
 	wr_cio_state(dev, head, regp, NV_CIO_CRE_HCUR_ADDR0_INDEX);
 	wr_cio_state(dev, head, regp, NV_CIO_CRE_HCUR_ADDR1_INDEX);
 	wr_cio_state(dev, head, regp, NV_CIO_CRE_HCUR_ADDR2_INDEX);
-	if (dev_priv->card_type == NV_40)
+	if (nv_device(drm->device)->card_type == NV_40)
 		nv_fix_nv40_hw_cursor(dev, head);
 	wr_cio_state(dev, head, regp, NV_CIO_CRE_ILACE__INDEX);
 
 	wr_cio_state(dev, head, regp, NV_CIO_CRE_SCRATCH3__INDEX);
 	wr_cio_state(dev, head, regp, NV_CIO_CRE_SCRATCH4__INDEX);
-	if (dev_priv->card_type >= NV_10) {
+	if (nv_device(drm->device)->card_type >= NV_10) {
 		wr_cio_state(dev, head, regp, NV_CIO_CRE_EBR_INDEX);
 		wr_cio_state(dev, head, regp, NV_CIO_CRE_CSB);
 		wr_cio_state(dev, head, regp, NV_CIO_CRE_4B);
@@ -997,11 +740,11 @@ nv_load_state_ext(struct drm_device *dev, int head,
 	}
 	/* NV11 and NV20 stop at 0x52. */
 	if (nv_gf4_disp_arch(dev)) {
-		if (dev_priv->card_type == NV_10) {
+		if (nv_device(drm->device)->card_type == NV_10) {
 			/* Not waiting for vertical retrace before modifying
 			   CRE_53/CRE_54 causes lockups. */
-			nouveau_wait_eq(dev, 650000000, NV_PRMCIO_INP0__COLOR, 0x8, 0x8);
-			nouveau_wait_eq(dev, 650000000, NV_PRMCIO_INP0__COLOR, 0x8, 0x0);
+			nouveau_timer_wait_eq(ptimer, 650000000, NV_PRMCIO_INP0__COLOR, 0x8, 0x8);
+			nouveau_timer_wait_eq(ptimer, 650000000, NV_PRMCIO_INP0__COLOR, 0x8, 0x0);
 		}
 
 		wr_cio_state(dev, head, regp, NV_CIO_CRE_42);
@@ -1024,14 +767,15 @@ static void
 nv_save_state_palette(struct drm_device *dev, int head,
 		      struct nv04_mode_state *state)
 {
+	struct nouveau_device *device = nouveau_dev(dev);
 	int head_offset = head * NV_PRMDIO_SIZE, i;
 
-	nv_wr08(dev, NV_PRMDIO_PIXEL_MASK + head_offset,
+	nv_wr08(device, NV_PRMDIO_PIXEL_MASK + head_offset,
 				NV_PRMDIO_PIXEL_MASK_MASK);
-	nv_wr08(dev, NV_PRMDIO_READ_MODE_ADDRESS + head_offset, 0x0);
+	nv_wr08(device, NV_PRMDIO_READ_MODE_ADDRESS + head_offset, 0x0);
 
 	for (i = 0; i < 768; i++) {
-		state->crtc_reg[head].DAC[i] = nv_rd08(dev,
+		state->crtc_reg[head].DAC[i] = nv_rd08(device,
 				NV_PRMDIO_PALETTE_DATA + head_offset);
 	}
 
@@ -1042,14 +786,15 @@ void
 nouveau_hw_load_state_palette(struct drm_device *dev, int head,
 			      struct nv04_mode_state *state)
 {
+	struct nouveau_device *device = nouveau_dev(dev);
 	int head_offset = head * NV_PRMDIO_SIZE, i;
 
-	nv_wr08(dev, NV_PRMDIO_PIXEL_MASK + head_offset,
+	nv_wr08(device, NV_PRMDIO_PIXEL_MASK + head_offset,
 				NV_PRMDIO_PIXEL_MASK_MASK);
-	nv_wr08(dev, NV_PRMDIO_WRITE_MODE_ADDRESS + head_offset, 0x0);
+	nv_wr08(device, NV_PRMDIO_WRITE_MODE_ADDRESS + head_offset, 0x0);
 
 	for (i = 0; i < 768; i++) {
-		nv_wr08(dev, NV_PRMDIO_PALETTE_DATA + head_offset,
+		nv_wr08(device, NV_PRMDIO_PALETTE_DATA + head_offset,
 				state->crtc_reg[head].DAC[i]);
 	}
 
@@ -1059,9 +804,9 @@ nouveau_hw_load_state_palette(struct drm_device *dev, int head,
 void nouveau_hw_save_state(struct drm_device *dev, int head,
 			   struct nv04_mode_state *state)
 {
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_drm *drm = nouveau_drm(dev);
 
-	if (dev_priv->chipset == 0x11)
+	if (nv_device(drm->device)->chipset == 0x11)
 		/* NB: no attempt is made to restore the bad pll later on */
 		nouveau_hw_fix_bad_vpll(dev, head);
 	nv_save_state_ramdac(dev, head, state);
diff --git a/drivers/gpu/drm/nouveau/nouveau_hw.h b/drivers/gpu/drm/nouveau/nouveau_hw.h
index 06a66bc84a81..7dff1021fab4 100644
--- a/drivers/gpu/drm/nouveau/nouveau_hw.h
+++ b/drivers/gpu/drm/nouveau/nouveau_hw.h
@@ -24,7 +24,9 @@
 #define __NOUVEAU_HW_H__
 
 #include <drm/drmP.h>
-#include "nouveau_drv.h"
+#include "nv04_display.h"
+
+#include <subdev/bios/pll.h>
 
 #define MASK(field) ( \
 	(0xffffffff >> (31 - ((1 ? field) - (0 ? field)))) << (0 ? field))
@@ -38,12 +40,10 @@ void NVWriteVgaGr(struct drm_device *, int head, uint8_t index, uint8_t value);
 uint8_t NVReadVgaGr(struct drm_device *, int head, uint8_t index);
 void NVSetOwner(struct drm_device *, int owner);
 void NVBlankScreen(struct drm_device *, int head, bool blank);
-void nouveau_hw_setpll(struct drm_device *, uint32_t reg1,
-		       struct nouveau_pll_vals *pv);
-int nouveau_hw_get_pllvals(struct drm_device *, enum pll_types plltype,
+int nouveau_hw_get_pllvals(struct drm_device *, enum nvbios_pll_type plltype,
 			   struct nouveau_pll_vals *pllvals);
 int nouveau_hw_pllvals_to_clk(struct nouveau_pll_vals *pllvals);
-int nouveau_hw_get_clock(struct drm_device *, enum pll_types plltype);
+int nouveau_hw_get_clock(struct drm_device *, enum nvbios_pll_type plltype);
 void nouveau_hw_save_vga_fonts(struct drm_device *, bool save);
 void nouveau_hw_save_state(struct drm_device *, int head,
 			   struct nv04_mode_state *state);
@@ -55,115 +55,51 @@ void nouveau_hw_load_state_palette(struct drm_device *, int head,
 /* nouveau_calc.c */
 extern void nouveau_calc_arb(struct drm_device *, int vclk, int bpp,
 			     int *burst, int *lwm);
-extern int nouveau_calc_pll_mnp(struct drm_device *, struct pll_lims *pll_lim,
-				int clk, struct nouveau_pll_vals *pv);
-
-static inline uint32_t
-nvReadMC(struct drm_device *dev, uint32_t reg)
-{
-	uint32_t val = nv_rd32(dev, reg);
-	NV_REG_DEBUG(MC, dev, "reg %08x val %08x\n", reg, val);
-	return val;
-}
-
-static inline void
-nvWriteMC(struct drm_device *dev, uint32_t reg, uint32_t val)
-{
-	NV_REG_DEBUG(MC, dev, "reg %08x val %08x\n", reg, val);
-	nv_wr32(dev, reg, val);
-}
-
-static inline uint32_t
-nvReadVIDEO(struct drm_device *dev, uint32_t reg)
-{
-	uint32_t val = nv_rd32(dev, reg);
-	NV_REG_DEBUG(VIDEO, dev, "reg %08x val %08x\n", reg, val);
-	return val;
-}
-
-static inline void
-nvWriteVIDEO(struct drm_device *dev, uint32_t reg, uint32_t val)
-{
-	NV_REG_DEBUG(VIDEO, dev, "reg %08x val %08x\n", reg, val);
-	nv_wr32(dev, reg, val);
-}
-
-static inline uint32_t
-nvReadFB(struct drm_device *dev, uint32_t reg)
-{
-	uint32_t val = nv_rd32(dev, reg);
-	NV_REG_DEBUG(FB, dev, "reg %08x val %08x\n", reg, val);
-	return val;
-}
-
-static inline void
-nvWriteFB(struct drm_device *dev, uint32_t reg, uint32_t val)
-{
-	NV_REG_DEBUG(FB, dev, "reg %08x val %08x\n", reg, val);
-	nv_wr32(dev, reg, val);
-}
-
-static inline uint32_t
-nvReadEXTDEV(struct drm_device *dev, uint32_t reg)
-{
-	uint32_t val = nv_rd32(dev, reg);
-	NV_REG_DEBUG(EXTDEV, dev, "reg %08x val %08x\n", reg, val);
-	return val;
-}
-
-static inline void
-nvWriteEXTDEV(struct drm_device *dev, uint32_t reg, uint32_t val)
-{
-	NV_REG_DEBUG(EXTDEV, dev, "reg %08x val %08x\n", reg, val);
-	nv_wr32(dev, reg, val);
-}
 
 static inline uint32_t NVReadCRTC(struct drm_device *dev,
 					int head, uint32_t reg)
 {
+	struct nouveau_device *device = nouveau_dev(dev);
 	uint32_t val;
 	if (head)
 		reg += NV_PCRTC0_SIZE;
-	val = nv_rd32(dev, reg);
-	NV_REG_DEBUG(CRTC, dev, "head %d reg %08x val %08x\n", head, reg, val);
+	val = nv_rd32(device, reg);
 	return val;
 }
 
 static inline void NVWriteCRTC(struct drm_device *dev,
 					int head, uint32_t reg, uint32_t val)
 {
+	struct nouveau_device *device = nouveau_dev(dev);
 	if (head)
 		reg += NV_PCRTC0_SIZE;
-	NV_REG_DEBUG(CRTC, dev, "head %d reg %08x val %08x\n", head, reg, val);
-	nv_wr32(dev, reg, val);
+	nv_wr32(device, reg, val);
 }
 
 static inline uint32_t NVReadRAMDAC(struct drm_device *dev,
 					int head, uint32_t reg)
 {
+	struct nouveau_device *device = nouveau_dev(dev);
 	uint32_t val;
 	if (head)
 		reg += NV_PRAMDAC0_SIZE;
-	val = nv_rd32(dev, reg);
-	NV_REG_DEBUG(RAMDAC, dev, "head %d reg %08x val %08x\n",
-							head, reg, val);
+	val = nv_rd32(device, reg);
 	return val;
 }
 
 static inline void NVWriteRAMDAC(struct drm_device *dev,
 					int head, uint32_t reg, uint32_t val)
 {
+	struct nouveau_device *device = nouveau_dev(dev);
 	if (head)
 		reg += NV_PRAMDAC0_SIZE;
-	NV_REG_DEBUG(RAMDAC, dev, "head %d reg %08x val %08x\n",
-							head, reg, val);
-	nv_wr32(dev, reg, val);
+	nv_wr32(device, reg, val);
 }
 
 static inline uint8_t nv_read_tmds(struct drm_device *dev,
 					int or, int dl, uint8_t address)
 {
-	int ramdac = (or & OUTPUT_C) >> 2;
+	int ramdac = (or & DCB_OUTPUT_C) >> 2;
 
 	NVWriteRAMDAC(dev, ramdac, NV_PRAMDAC_FP_TMDS_CONTROL + dl * 8,
 	NV_PRAMDAC_FP_TMDS_CONTROL_WRITE_DISABLE | address);
@@ -174,7 +110,7 @@ static inline void nv_write_tmds(struct drm_device *dev,
 					int or, int dl, uint8_t address,
 					uint8_t data)
 {
-	int ramdac = (or & OUTPUT_C) >> 2;
+	int ramdac = (or & DCB_OUTPUT_C) >> 2;
 
 	NVWriteRAMDAC(dev, ramdac, NV_PRAMDAC_FP_TMDS_DATA + dl * 8, data);
 	NVWriteRAMDAC(dev, ramdac, NV_PRAMDAC_FP_TMDS_CONTROL + dl * 8, address);
@@ -183,20 +119,18 @@ static inline void nv_write_tmds(struct drm_device *dev,
 static inline void NVWriteVgaCrtc(struct drm_device *dev,
 					int head, uint8_t index, uint8_t value)
 {
-	NV_REG_DEBUG(VGACRTC, dev, "head %d index 0x%02x data 0x%02x\n",
-							head, index, value);
-	nv_wr08(dev, NV_PRMCIO_CRX__COLOR + head * NV_PRMCIO_SIZE, index);
-	nv_wr08(dev, NV_PRMCIO_CR__COLOR + head * NV_PRMCIO_SIZE, value);
+	struct nouveau_device *device = nouveau_dev(dev);
+	nv_wr08(device, NV_PRMCIO_CRX__COLOR + head * NV_PRMCIO_SIZE, index);
+	nv_wr08(device, NV_PRMCIO_CR__COLOR + head * NV_PRMCIO_SIZE, value);
 }
 
 static inline uint8_t NVReadVgaCrtc(struct drm_device *dev,
 					int head, uint8_t index)
 {
+	struct nouveau_device *device = nouveau_dev(dev);
 	uint8_t val;
-	nv_wr08(dev, NV_PRMCIO_CRX__COLOR + head * NV_PRMCIO_SIZE, index);
-	val = nv_rd08(dev, NV_PRMCIO_CR__COLOR + head * NV_PRMCIO_SIZE);
-	NV_REG_DEBUG(VGACRTC, dev, "head %d index 0x%02x data 0x%02x\n",
-							head, index, val);
+	nv_wr08(device, NV_PRMCIO_CRX__COLOR + head * NV_PRMCIO_SIZE, index);
+	val = nv_rd08(device, NV_PRMCIO_CR__COLOR + head * NV_PRMCIO_SIZE);
 	return val;
 }
 
@@ -230,75 +164,74 @@ static inline uint8_t NVReadVgaCrtc5758(struct drm_device *dev, int head, uint8_
 static inline uint8_t NVReadPRMVIO(struct drm_device *dev,
 					int head, uint32_t reg)
 {
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_device *device = nouveau_dev(dev);
+	struct nouveau_drm *drm = nouveau_drm(dev);
 	uint8_t val;
 
 	/* Only NV4x have two pvio ranges; other twoHeads cards MUST call
 	 * NVSetOwner for the relevant head to be programmed */
-	if (head && dev_priv->card_type == NV_40)
+	if (head && nv_device(drm->device)->card_type == NV_40)
 		reg += NV_PRMVIO_SIZE;
 
-	val = nv_rd08(dev, reg);
-	NV_REG_DEBUG(RMVIO, dev, "head %d reg %08x val %02x\n", head, reg, val);
+	val = nv_rd08(device, reg);
 	return val;
 }
 
 static inline void NVWritePRMVIO(struct drm_device *dev,
 					int head, uint32_t reg, uint8_t value)
 {
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_device *device = nouveau_dev(dev);
+	struct nouveau_drm *drm = nouveau_drm(dev);
 
 	/* Only NV4x have two pvio ranges; other twoHeads cards MUST call
 	 * NVSetOwner for the relevant head to be programmed */
-	if (head && dev_priv->card_type == NV_40)
+	if (head && nv_device(drm->device)->card_type == NV_40)
 		reg += NV_PRMVIO_SIZE;
 
-	NV_REG_DEBUG(RMVIO, dev, "head %d reg %08x val %02x\n",
-						head, reg, value);
-	nv_wr08(dev, reg, value);
+	nv_wr08(device, reg, value);
 }
 
 static inline void NVSetEnablePalette(struct drm_device *dev, int head, bool enable)
 {
-	nv_rd08(dev, NV_PRMCIO_INP0__COLOR + head * NV_PRMCIO_SIZE);
-	nv_wr08(dev, NV_PRMCIO_ARX + head * NV_PRMCIO_SIZE, enable ? 0 : 0x20);
+	struct nouveau_device *device = nouveau_dev(dev);
+	nv_rd08(device, NV_PRMCIO_INP0__COLOR + head * NV_PRMCIO_SIZE);
+	nv_wr08(device, NV_PRMCIO_ARX + head * NV_PRMCIO_SIZE, enable ? 0 : 0x20);
 }
 
 static inline bool NVGetEnablePalette(struct drm_device *dev, int head)
 {
-	nv_rd08(dev, NV_PRMCIO_INP0__COLOR + head * NV_PRMCIO_SIZE);
-	return !(nv_rd08(dev, NV_PRMCIO_ARX + head * NV_PRMCIO_SIZE) & 0x20);
+	struct nouveau_device *device = nouveau_dev(dev);
+	nv_rd08(device, NV_PRMCIO_INP0__COLOR + head * NV_PRMCIO_SIZE);
+	return !(nv_rd08(device, NV_PRMCIO_ARX + head * NV_PRMCIO_SIZE) & 0x20);
 }
 
 static inline void NVWriteVgaAttr(struct drm_device *dev,
 					int head, uint8_t index, uint8_t value)
 {
+	struct nouveau_device *device = nouveau_dev(dev);
 	if (NVGetEnablePalette(dev, head))
 		index &= ~0x20;
 	else
 		index |= 0x20;
 
-	nv_rd08(dev, NV_PRMCIO_INP0__COLOR + head * NV_PRMCIO_SIZE);
-	NV_REG_DEBUG(VGAATTR, dev, "head %d index 0x%02x data 0x%02x\n",
-							head, index, value);
-	nv_wr08(dev, NV_PRMCIO_ARX + head * NV_PRMCIO_SIZE, index);
-	nv_wr08(dev, NV_PRMCIO_AR__WRITE + head * NV_PRMCIO_SIZE, value);
+	nv_rd08(device, NV_PRMCIO_INP0__COLOR + head * NV_PRMCIO_SIZE);
+	nv_wr08(device, NV_PRMCIO_ARX + head * NV_PRMCIO_SIZE, index);
+	nv_wr08(device, NV_PRMCIO_AR__WRITE + head * NV_PRMCIO_SIZE, value);
 }
 
 static inline uint8_t NVReadVgaAttr(struct drm_device *dev,
 					int head, uint8_t index)
 {
+	struct nouveau_device *device = nouveau_dev(dev);
 	uint8_t val;
 	if (NVGetEnablePalette(dev, head))
 		index &= ~0x20;
 	else
 		index |= 0x20;
 
-	nv_rd08(dev, NV_PRMCIO_INP0__COLOR + head * NV_PRMCIO_SIZE);
-	nv_wr08(dev, NV_PRMCIO_ARX + head * NV_PRMCIO_SIZE, index);
-	val = nv_rd08(dev, NV_PRMCIO_AR__READ + head * NV_PRMCIO_SIZE);
-	NV_REG_DEBUG(VGAATTR, dev, "head %d index 0x%02x data 0x%02x\n",
-							head, index, val);
+	nv_rd08(device, NV_PRMCIO_INP0__COLOR + head * NV_PRMCIO_SIZE);
+	nv_wr08(device, NV_PRMCIO_ARX + head * NV_PRMCIO_SIZE, index);
+	val = nv_rd08(device, NV_PRMCIO_AR__READ + head * NV_PRMCIO_SIZE);
 	return val;
 }
 
@@ -325,10 +258,11 @@ static inline void NVVgaProtect(struct drm_device *dev, int head, bool protect)
 static inline bool
 nv_heads_tied(struct drm_device *dev)
 {
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_device *device = nouveau_dev(dev);
+	struct nouveau_drm *drm = nouveau_drm(dev);
 
-	if (dev_priv->chipset == 0x11)
-		return !!(nvReadMC(dev, NV_PBUS_DEBUG_1) & (1 << 28));
+	if (nv_device(drm->device)->chipset == 0x11)
+		return !!(nv_rd32(device, NV_PBUS_DEBUG_1) & (1 << 28));
 
 	return NVReadVgaCrtc(dev, 0, NV_CIO_CRE_44) & 0x4;
 }
@@ -377,13 +311,13 @@ nv_lock_vga_crtc_shadow(struct drm_device *dev, int head, int lock)
 static inline bool
 NVLockVgaCrtcs(struct drm_device *dev, bool lock)
 {
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_drm *drm = nouveau_drm(dev);
 	bool waslocked = !NVReadVgaCrtc(dev, 0, NV_CIO_SR_LOCK_INDEX);
 
 	NVWriteVgaCrtc(dev, 0, NV_CIO_SR_LOCK_INDEX,
 		       lock ? NV_CIO_SR_LOCK_VALUE : NV_CIO_SR_UNLOCK_RW_VALUE);
 	/* NV11 has independently lockable extended crtcs, except when tied */
-	if (dev_priv->chipset == 0x11 && !nv_heads_tied(dev))
+	if (nv_device(drm->device)->chipset == 0x11 && !nv_heads_tied(dev))
 		NVWriteVgaCrtc(dev, 1, NV_CIO_SR_LOCK_INDEX,
 			       lock ? NV_CIO_SR_LOCK_VALUE :
 				      NV_CIO_SR_UNLOCK_RW_VALUE);
@@ -398,9 +332,9 @@ NVLockVgaCrtcs(struct drm_device *dev, bool lock)
 
 static inline int nv_cursor_width(struct drm_device *dev)
 {
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_drm *drm = nouveau_drm(dev);
 
-	return dev_priv->card_type >= NV_10 ? NV10_CURSOR_SIZE : NV04_CURSOR_SIZE;
+	return nv_device(drm->device)->card_type >= NV_10 ? NV10_CURSOR_SIZE : NV04_CURSOR_SIZE;
 }
 
 static inline void
@@ -418,11 +352,11 @@ nv_fix_nv40_hw_cursor(struct drm_device *dev, int head)
 static inline void
 nv_set_crtc_base(struct drm_device *dev, int head, uint32_t offset)
 {
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_drm *drm = nouveau_drm(dev);
 
 	NVWriteCRTC(dev, head, NV_PCRTC_START, offset);
 
-	if (dev_priv->card_type == NV_04) {
+	if (nv_device(drm->device)->card_type == NV_04) {
 		/*
 		 * Hilarious, the 24th bit doesn't want to stick to
 		 * PCRTC_START...
@@ -437,9 +371,9 @@ nv_set_crtc_base(struct drm_device *dev, int head, uint32_t offset)
 static inline void
 nv_show_cursor(struct drm_device *dev, int head, bool show)
 {
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_drm *drm = nouveau_drm(dev);
 	uint8_t *curctl1 =
-		&dev_priv->mode_reg.crtc_reg[head].CRTC[NV_CIO_CRE_HCUR_ADDR1_INDEX];
+		&nv04_display(dev)->mode_reg.crtc_reg[head].CRTC[NV_CIO_CRE_HCUR_ADDR1_INDEX];
 
 	if (show)
 		*curctl1 |= MASK(NV_CIO_CRE_HCUR_ADDR1_ENABLE);
@@ -447,14 +381,14 @@ nv_show_cursor(struct drm_device *dev, int head, bool show)
 		*curctl1 &= ~MASK(NV_CIO_CRE_HCUR_ADDR1_ENABLE);
 	NVWriteVgaCrtc(dev, head, NV_CIO_CRE_HCUR_ADDR1_INDEX, *curctl1);
 
-	if (dev_priv->card_type == NV_40)
+	if (nv_device(drm->device)->card_type == NV_40)
 		nv_fix_nv40_hw_cursor(dev, head);
 }
 
 static inline uint32_t
 nv_pitch_align(struct drm_device *dev, uint32_t width, int bpp)
 {
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_drm *drm = nouveau_drm(dev);
 	int mask;
 
 	if (bpp == 15)
@@ -463,7 +397,7 @@ nv_pitch_align(struct drm_device *dev, uint32_t width, int bpp)
 		bpp = 8;
 
 	/* Alignment requirements taken from the Haiku driver */
-	if (dev_priv->card_type == NV_04)
+	if (nv_device(drm->device)->card_type == NV_04)
 		mask = 128 / bpp - 1;
 	else
 		mask = 512 / bpp - 1;
diff --git a/drivers/gpu/drm/nouveau/nouveau_i2c.c b/drivers/gpu/drm/nouveau/nouveau_i2c.c
deleted file mode 100644
index baf2fa25d077..000000000000
--- a/drivers/gpu/drm/nouveau/nouveau_i2c.c
+++ /dev/null
@@ -1,394 +0,0 @@
-/*
- * Copyright 2009 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Ben Skeggs
- */
-
-#include <linux/module.h>
-
-#include <drm/drmP.h>
-#include "nouveau_drv.h"
-#include "nouveau_i2c.h"
-#include "nouveau_hw.h"
-
-static void
-i2c_drive_scl(void *data, int state)
-{
-	struct nouveau_i2c_chan *port = data;
-	if (port->type == 0) {
-		u8 val = NVReadVgaCrtc(port->dev, 0, port->drive);
-		if (state) val |= 0x20;
-		else	   val &= 0xdf;
-		NVWriteVgaCrtc(port->dev, 0, port->drive, val | 0x01);
-	} else
-	if (port->type == 4) {
-		nv_mask(port->dev, port->drive, 0x2f, state ? 0x21 : 0x01);
-	} else
-	if (port->type == 5) {
-		if (state) port->state |= 0x01;
-		else	   port->state &= 0xfe;
-		nv_wr32(port->dev, port->drive, 4 | port->state);
-	}
-}
-
-static void
-i2c_drive_sda(void *data, int state)
-{
-	struct nouveau_i2c_chan *port = data;
-	if (port->type == 0) {
-		u8 val = NVReadVgaCrtc(port->dev, 0, port->drive);
-		if (state) val |= 0x10;
-		else	   val &= 0xef;
-		NVWriteVgaCrtc(port->dev, 0, port->drive, val | 0x01);
-	} else
-	if (port->type == 4) {
-		nv_mask(port->dev, port->drive, 0x1f, state ? 0x11 : 0x01);
-	} else
-	if (port->type == 5) {
-		if (state) port->state |= 0x02;
-		else	   port->state &= 0xfd;
-		nv_wr32(port->dev, port->drive, 4 | port->state);
-	}
-}
-
-static int
-i2c_sense_scl(void *data)
-{
-	struct nouveau_i2c_chan *port = data;
-	struct drm_nouveau_private *dev_priv = port->dev->dev_private;
-	if (port->type == 0) {
-		return !!(NVReadVgaCrtc(port->dev, 0, port->sense) & 0x04);
-	} else
-	if (port->type == 4) {
-		return !!(nv_rd32(port->dev, port->sense) & 0x00040000);
-	} else
-	if (port->type == 5) {
-		if (dev_priv->card_type < NV_D0)
-			return !!(nv_rd32(port->dev, port->sense) & 0x01);
-		else
-			return !!(nv_rd32(port->dev, port->sense) & 0x10);
-	}
-	return 0;
-}
-
-static int
-i2c_sense_sda(void *data)
-{
-	struct nouveau_i2c_chan *port = data;
-	struct drm_nouveau_private *dev_priv = port->dev->dev_private;
-	if (port->type == 0) {
-		return !!(NVReadVgaCrtc(port->dev, 0, port->sense) & 0x08);
-	} else
-	if (port->type == 4) {
-		return !!(nv_rd32(port->dev, port->sense) & 0x00080000);
-	} else
-	if (port->type == 5) {
-		if (dev_priv->card_type < NV_D0)
-			return !!(nv_rd32(port->dev, port->sense) & 0x02);
-		else
-			return !!(nv_rd32(port->dev, port->sense) & 0x20);
-	}
-	return 0;
-}
-
-static const uint32_t nv50_i2c_port[] = {
-	0x00e138, 0x00e150, 0x00e168, 0x00e180,
-	0x00e254, 0x00e274, 0x00e764, 0x00e780,
-	0x00e79c, 0x00e7b8
-};
-
-static u8 *
-i2c_table(struct drm_device *dev, u8 *version)
-{
-	u8 *dcb = dcb_table(dev), *i2c = NULL;
-	if (dcb) {
-		if (dcb[0] >= 0x15)
-			i2c = ROMPTR(dev, dcb[2]);
-		if (dcb[0] >= 0x30)
-			i2c = ROMPTR(dev, dcb[4]);
-	}
-
-	/* early revisions had no version number, use dcb version */
-	if (i2c) {
-		*version = dcb[0];
-		if (*version >= 0x30)
-			*version = i2c[0];
-	}
-
-	return i2c;
-}
-
-int
-nouveau_i2c_init(struct drm_device *dev)
-{
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct nvbios *bios = &dev_priv->vbios;
-	struct nouveau_i2c_chan *port;
-	u8 version = 0x00, entries, recordlen;
-	u8 *i2c, *entry, legacy[2][4] = {};
-	int ret, i;
-
-	INIT_LIST_HEAD(&dev_priv->i2c_ports);
-
-	i2c = i2c_table(dev, &version);
-	if (!i2c) {
-		u8 *bmp = &bios->data[bios->offset];
-		if (bios->type != NVBIOS_BMP)
-			return -ENODEV;
-
-		legacy[0][0] = NV_CIO_CRE_DDC_WR__INDEX;
-		legacy[0][1] = NV_CIO_CRE_DDC_STATUS__INDEX;
-		legacy[1][0] = NV_CIO_CRE_DDC0_WR__INDEX;
-		legacy[1][1] = NV_CIO_CRE_DDC0_STATUS__INDEX;
-
-		/* BMP (from v4.0) has i2c info in the structure, it's in a
-		 * fixed location on earlier VBIOS
-		 */
-		if (bmp[5] < 4)
-			i2c = &bios->data[0x48];
-		else
-			i2c = &bmp[0x36];
-
-		if (i2c[4]) legacy[0][0] = i2c[4];
-		if (i2c[5]) legacy[0][1] = i2c[5];
-		if (i2c[6]) legacy[1][0] = i2c[6];
-		if (i2c[7]) legacy[1][1] = i2c[7];
-	}
-
-	if (version >= 0x30) {
-		entry     = i2c[1] + i2c;
-		entries   = i2c[2];
-		recordlen = i2c[3];
-	} else
-	if (version) {
-		entry     = i2c;
-		entries   = 16;
-		recordlen = 4;
-	} else {
-		entry     = legacy[0];
-		entries   = 2;
-		recordlen = 4;
-	}
-
-	for (i = 0; i < entries; i++, entry += recordlen) {
-		port = kzalloc(sizeof(*port), GFP_KERNEL);
-		if (port == NULL) {
-			nouveau_i2c_fini(dev);
-			return -ENOMEM;
-		}
-
-		port->type = entry[3];
-		if (version < 0x30) {
-			port->type &= 0x07;
-			if (port->type == 0x07)
-				port->type = 0xff;
-		}
-
-		if (port->type == 0xff) {
-			kfree(port);
-			continue;
-		}
-
-		switch (port->type) {
-		case 0: /* NV04:NV50 */
-			port->drive = entry[0];
-			port->sense = entry[1];
-			break;
-		case 4: /* NV4E */
-			port->drive = 0x600800 + entry[1];
-			port->sense = port->drive;
-			break;
-		case 5: /* NV50- */
-			port->drive = entry[0] & 0x0f;
-			if (dev_priv->card_type < NV_D0) {
-				if (port->drive >= ARRAY_SIZE(nv50_i2c_port))
-					break;
-				port->drive = nv50_i2c_port[port->drive];
-				port->sense = port->drive;
-			} else {
-				port->drive = 0x00d014 + (port->drive * 0x20);
-				port->sense = port->drive;
-			}
-			break;
-		case 6: /* NV50- DP AUX */
-			port->drive = entry[0] & 0x0f;
-			port->sense = port->drive;
-			port->adapter.algo = &nouveau_dp_i2c_algo;
-			break;
-		default:
-			break;
-		}
-
-		if (!port->adapter.algo && !port->drive) {
-			NV_ERROR(dev, "I2C%d: type %d index %x/%x unknown\n",
-				 i, port->type, port->drive, port->sense);
-			kfree(port);
-			continue;
-		}
-
-		snprintf(port->adapter.name, sizeof(port->adapter.name),
-			 "nouveau-%s-%d", pci_name(dev->pdev), i);
-		port->adapter.owner = THIS_MODULE;
-		port->adapter.dev.parent = &dev->pdev->dev;
-		port->dev = dev;
-		port->index = i;
-		port->dcb = ROM32(entry[0]);
-		i2c_set_adapdata(&port->adapter, i2c);
-
-		if (port->adapter.algo != &nouveau_dp_i2c_algo) {
-			port->adapter.algo_data = &port->bit;
-			port->bit.udelay = 10;
-			port->bit.timeout = usecs_to_jiffies(2200);
-			port->bit.data = port;
-			port->bit.setsda = i2c_drive_sda;
-			port->bit.setscl = i2c_drive_scl;
-			port->bit.getsda = i2c_sense_sda;
-			port->bit.getscl = i2c_sense_scl;
-
-			i2c_drive_scl(port, 0);
-			i2c_drive_sda(port, 1);
-			i2c_drive_scl(port, 1);
-
-			ret = i2c_bit_add_bus(&port->adapter);
-		} else {
-			port->adapter.algo = &nouveau_dp_i2c_algo;
-			ret = i2c_add_adapter(&port->adapter);
-		}
-
-		if (ret) {
-			NV_ERROR(dev, "I2C%d: failed register: %d\n", i, ret);
-			kfree(port);
-			continue;
-		}
-
-		list_add_tail(&port->head, &dev_priv->i2c_ports);
-	}
-
-	return 0;
-}
-
-void
-nouveau_i2c_fini(struct drm_device *dev)
-{
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct nouveau_i2c_chan *port, *tmp;
-
-	list_for_each_entry_safe(port, tmp, &dev_priv->i2c_ports, head) {
-		i2c_del_adapter(&port->adapter);
-		kfree(port);
-	}
-}
-
-struct nouveau_i2c_chan *
-nouveau_i2c_find(struct drm_device *dev, u8 index)
-{
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct nouveau_i2c_chan *port;
-
-	if (index == NV_I2C_DEFAULT(0) ||
-	    index == NV_I2C_DEFAULT(1)) {
-		u8 version, *i2c = i2c_table(dev, &version);
-		if (i2c && version >= 0x30) {
-			if (index == NV_I2C_DEFAULT(0))
-				index = (i2c[4] & 0x0f);
-			else
-				index = (i2c[4] & 0xf0) >> 4;
-		} else {
-			index = 2;
-		}
-	}
-
-	list_for_each_entry(port, &dev_priv->i2c_ports, head) {
-		if (port->index == index)
-			break;
-	}
-
-	if (&port->head == &dev_priv->i2c_ports)
-		return NULL;
-
-	if (dev_priv->card_type >= NV_50 && (port->dcb & 0x00000100)) {
-		u32 reg = 0x00e500, val;
-		if (port->type == 6) {
-			reg += port->drive * 0x50;
-			val  = 0x2002;
-		} else {
-			reg += ((port->dcb & 0x1e00) >> 9) * 0x50;
-			val  = 0xe001;
-		}
-
-		/* nfi, but neither auxch or i2c work if it's 1 */
-		nv_mask(dev, reg + 0x0c, 0x00000001, 0x00000000);
-		/* nfi, but switches auxch vs normal i2c */
-		nv_mask(dev, reg + 0x00, 0x0000f003, val);
-	}
-
-	return port;
-}
-
-bool
-nouveau_probe_i2c_addr(struct nouveau_i2c_chan *i2c, int addr)
-{
-	uint8_t buf[] = { 0 };
-	struct i2c_msg msgs[] = {
-		{
-			.addr = addr,
-			.flags = 0,
-			.len = 1,
-			.buf = buf,
-		},
-		{
-			.addr = addr,
-			.flags = I2C_M_RD,
-			.len = 1,
-			.buf = buf,
-		}
-	};
-
-	return i2c_transfer(&i2c->adapter, msgs, 2) == 2;
-}
-
-int
-nouveau_i2c_identify(struct drm_device *dev, const char *what,
-		     struct i2c_board_info *info,
-		     bool (*match)(struct nouveau_i2c_chan *,
-				   struct i2c_board_info *),
-		     int index)
-{
-	struct nouveau_i2c_chan *i2c = nouveau_i2c_find(dev, index);
-	int i;
-
-	if (!i2c) {
-		NV_DEBUG(dev, "No bus when probing %s on %d\n", what, index);
-		return -ENODEV;
-	}
-
-	NV_DEBUG(dev, "Probing %ss on I2C bus: %d\n", what, i2c->index);
-	for (i = 0; info[i].addr; i++) {
-		if (nouveau_probe_i2c_addr(i2c, info[i].addr) &&
-		    (!match || match(i2c, &info[i]))) {
-			NV_INFO(dev, "Detected %s: %s\n", what, info[i].type);
-			return i;
-		}
-	}
-
-	NV_DEBUG(dev, "No devices found.\n");
-	return -ENODEV;
-}
diff --git a/drivers/gpu/drm/nouveau/nouveau_ioc32.c b/drivers/gpu/drm/nouveau/nouveau_ioc32.c
index aa3a067c707b..08214bcdcb12 100644
--- a/drivers/gpu/drm/nouveau/nouveau_ioc32.c
+++ b/drivers/gpu/drm/nouveau/nouveau_ioc32.c
@@ -35,7 +35,7 @@
 
 #include <drm/drmP.h>
 
-#include "nouveau_drv.h"
+#include "nouveau_ioctl.h"
 
 /**
  * Called whenever a 32-bit process running under a 64-bit kernel
diff --git a/drivers/gpu/drm/nouveau/nouveau_ioctl.h b/drivers/gpu/drm/nouveau/nouveau_ioctl.h
new file mode 100644
index 000000000000..ef2b2906d9e6
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_ioctl.h
@@ -0,0 +1,6 @@
+#ifndef __NOUVEAU_IOCTL_H__
+#define __NOUVEAU_IOCTL_H__
+
+long nouveau_compat_ioctl(struct file *, unsigned int cmd, unsigned long arg);
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_irq.c b/drivers/gpu/drm/nouveau/nouveau_irq.c
index 6273b7763cd6..9ca8afdb5549 100644
--- a/drivers/gpu/drm/nouveau/nouveau_irq.c
+++ b/drivers/gpu/drm/nouveau/nouveau_irq.c
@@ -1,146 +1,86 @@
 /*
- * Copyright (C) 2006 Ben Skeggs.
+ * Copyright 2012 Red Hat Inc.
  *
- * All Rights Reserved.
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
  *
- * Permission is hereby granted, free of charge, to any person obtaining
- * a copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sublicense, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
  *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial
- * portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
- * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
- * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
- * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
- * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
  *
+ * Authors: Ben Skeggs
  */
 
-/*
- * Authors:
- *   Ben Skeggs <darktama@iinet.net.au>
- */
+#include <subdev/mc.h>
 
-#include <drm/drmP.h>
-#include <drm/nouveau_drm.h>
-#include "nouveau_drv.h"
-#include "nouveau_reg.h"
-#include "nouveau_ramht.h"
-#include "nouveau_util.h"
+#include "nouveau_drm.h"
+#include "nouveau_irq.h"
+#include "nv50_display.h"
 
 void
 nouveau_irq_preinstall(struct drm_device *dev)
 {
-	/* Master disable */
-	nv_wr32(dev, NV03_PMC_INTR_EN_0, 0);
+	nv_wr32(nouveau_dev(dev), 0x000140, 0x00000000);
 }
 
 int
 nouveau_irq_postinstall(struct drm_device *dev)
 {
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-
-	/* Master enable */
-	nv_wr32(dev, NV03_PMC_INTR_EN_0, NV_PMC_INTR_EN_0_MASTER_ENABLE);
-	if (dev_priv->msi_enabled)
-		nv_wr08(dev, 0x00088068, 0xff);
-
+	nv_wr32(nouveau_dev(dev), 0x000140, 0x00000001);
 	return 0;
 }
 
 void
 nouveau_irq_uninstall(struct drm_device *dev)
 {
-	/* Master disable */
-	nv_wr32(dev, NV03_PMC_INTR_EN_0, 0);
+	nv_wr32(nouveau_dev(dev), 0x000140, 0x00000000);
 }
 
 irqreturn_t
 nouveau_irq_handler(DRM_IRQ_ARGS)
 {
-	struct drm_device *dev = (struct drm_device *)arg;
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	unsigned long flags;
+	struct drm_device *dev = arg;
+	struct nouveau_device *device = nouveau_dev(dev);
+	struct nouveau_mc *pmc = nouveau_mc(device);
 	u32 stat;
-	int i;
 
-	stat = nv_rd32(dev, NV03_PMC_INTR_0);
+	stat = nv_rd32(device, 0x000100);
 	if (stat == 0 || stat == ~0)
 		return IRQ_NONE;
 
-	spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
-	for (i = 0; i < 32 && stat; i++) {
-		if (!(stat & (1 << i)) || !dev_priv->irq_handler[i])
-			continue;
+	nv_subdev(pmc)->intr(nv_subdev(pmc));
 
-		dev_priv->irq_handler[i](dev);
-		stat &= ~(1 << i);
+	if (device->card_type >= NV_D0) {
+		if (nv_rd32(device, 0x000100) & 0x04000000)
+			nvd0_display_intr(dev);
+	} else
+	if (device->card_type >= NV_50) {
+		if (nv_rd32(device, 0x000100) & 0x04000000)
+			nv50_display_intr(dev);
 	}
 
-	if (dev_priv->msi_enabled)
-		nv_wr08(dev, 0x00088068, 0xff);
-	spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
-
-	if (stat && nouveau_ratelimit())
-		NV_ERROR(dev, "PMC - unhandled INTR 0x%08x\n", stat);
 	return IRQ_HANDLED;
 }
 
 int
 nouveau_irq_init(struct drm_device *dev)
 {
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	int ret;
-
-	if (nouveau_msi != 0 && dev_priv->card_type >= NV_50) {
-		ret = pci_enable_msi(dev->pdev);
-		if (ret == 0) {
-			NV_INFO(dev, "enabled MSI\n");
-			dev_priv->msi_enabled = true;
-		}
-	}
-
 	return drm_irq_install(dev);
 }
 
 void
 nouveau_irq_fini(struct drm_device *dev)
 {
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-
 	drm_irq_uninstall(dev);
-	if (dev_priv->msi_enabled)
-		pci_disable_msi(dev->pdev);
-}
-
-void
-nouveau_irq_register(struct drm_device *dev, int status_bit,
-		     void (*handler)(struct drm_device *))
-{
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	unsigned long flags;
-
-	spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
-	dev_priv->irq_handler[status_bit] = handler;
-	spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
-}
-
-void
-nouveau_irq_unregister(struct drm_device *dev, int status_bit)
-{
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	unsigned long flags;
-
-	spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
-	dev_priv->irq_handler[status_bit] = NULL;
-	spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
 }
diff --git a/drivers/gpu/drm/nouveau/nouveau_irq.h b/drivers/gpu/drm/nouveau/nouveau_irq.h
new file mode 100644
index 000000000000..06714ad857bb
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_irq.h
@@ -0,0 +1,11 @@
+#ifndef __NOUVEAU_IRQ_H__
+#define __NOUVEAU_IRQ_H__
+
+extern int         nouveau_irq_init(struct drm_device *);
+extern void        nouveau_irq_fini(struct drm_device *);
+extern irqreturn_t nouveau_irq_handler(DRM_IRQ_ARGS);
+extern void        nouveau_irq_preinstall(struct drm_device *);
+extern int         nouveau_irq_postinstall(struct drm_device *);
+extern void        nouveau_irq_uninstall(struct drm_device *);
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_mem.c b/drivers/gpu/drm/nouveau/nouveau_mem.c
index 7f0afad13653..7e0ff10a2759 100644
--- a/drivers/gpu/drm/nouveau/nouveau_mem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_mem.c
@@ -30,446 +30,10 @@
  *    Roy Spliet <r.spliet@student.tudelft.nl>
  */
 
-
-#include <drm/drmP.h>
-
-#include "nouveau_drv.h"
+#include "nouveau_drm.h"
 #include "nouveau_pm.h"
-#include "nouveau_mm.h"
-#include "nouveau_vm.h"
-#include "nouveau_fifo.h"
-#include "nouveau_fence.h"
-
-/*
- * NV10-NV40 tiling helpers
- */
-
-static void
-nv10_mem_update_tile_region(struct drm_device *dev,
-			    struct nouveau_tile_reg *tile, uint32_t addr,
-			    uint32_t size, uint32_t pitch, uint32_t flags)
-{
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct nouveau_fb_engine *pfb = &dev_priv->engine.fb;
-	int i = tile - dev_priv->tile.reg, j;
-	unsigned long save;
-
-	nouveau_fence_unref(&tile->fence);
-
-	if (tile->pitch)
-		pfb->free_tile_region(dev, i);
-
-	if (pitch)
-		pfb->init_tile_region(dev, i, addr, size, pitch, flags);
-
-	spin_lock_irqsave(&dev_priv->context_switch_lock, save);
-	nv_wr32(dev, NV03_PFIFO_CACHES, 0);
-	nv04_fifo_cache_pull(dev, false);
-
-	nouveau_wait_for_idle(dev);
-
-	pfb->set_tile_region(dev, i);
-	for (j = 0; j < NVOBJ_ENGINE_NR; j++) {
-		if (dev_priv->eng[j] && dev_priv->eng[j]->set_tile_region)
-			dev_priv->eng[j]->set_tile_region(dev, i);
-	}
-
-	nv04_fifo_cache_pull(dev, true);
-	nv_wr32(dev, NV03_PFIFO_CACHES, 1);
-	spin_unlock_irqrestore(&dev_priv->context_switch_lock, save);
-}
-
-static struct nouveau_tile_reg *
-nv10_mem_get_tile_region(struct drm_device *dev, int i)
-{
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i];
-
-	spin_lock(&dev_priv->tile.lock);
-
-	if (!tile->used &&
-	    (!tile->fence || nouveau_fence_done(tile->fence)))
-		tile->used = true;
-	else
-		tile = NULL;
-
-	spin_unlock(&dev_priv->tile.lock);
-	return tile;
-}
-
-void
-nv10_mem_put_tile_region(struct drm_device *dev, struct nouveau_tile_reg *tile,
-			 struct nouveau_fence *fence)
-{
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-
-	if (tile) {
-		spin_lock(&dev_priv->tile.lock);
-		if (fence) {
-			/* Mark it as pending. */
-			tile->fence = fence;
-			nouveau_fence_ref(fence);
-		}
-
-		tile->used = false;
-		spin_unlock(&dev_priv->tile.lock);
-	}
-}
-
-struct nouveau_tile_reg *
-nv10_mem_set_tiling(struct drm_device *dev, uint32_t addr, uint32_t size,
-		    uint32_t pitch, uint32_t flags)
-{
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct nouveau_fb_engine *pfb = &dev_priv->engine.fb;
-	struct nouveau_tile_reg *tile, *found = NULL;
-	int i;
-
-	for (i = 0; i < pfb->num_tiles; i++) {
-		tile = nv10_mem_get_tile_region(dev, i);
-
-		if (pitch && !found) {
-			found = tile;
-			continue;
-
-		} else if (tile && tile->pitch) {
-			/* Kill an unused tile region. */
-			nv10_mem_update_tile_region(dev, tile, 0, 0, 0, 0);
-		}
-
-		nv10_mem_put_tile_region(dev, tile, NULL);
-	}
-
-	if (found)
-		nv10_mem_update_tile_region(dev, found, addr, size,
-					    pitch, flags);
-	return found;
-}
-
-/*
- * Cleanup everything
- */
-void
-nouveau_mem_vram_fini(struct drm_device *dev)
-{
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-
-	ttm_bo_device_release(&dev_priv->ttm.bdev);
-
-	nouveau_ttm_global_release(dev_priv);
-
-	if (dev_priv->fb_mtrr >= 0) {
-		drm_mtrr_del(dev_priv->fb_mtrr,
-			     pci_resource_start(dev->pdev, 1),
-			     pci_resource_len(dev->pdev, 1), DRM_MTRR_WC);
-		dev_priv->fb_mtrr = -1;
-	}
-}
-
-void
-nouveau_mem_gart_fini(struct drm_device *dev)
-{
-	nouveau_sgdma_takedown(dev);
-
-	if (drm_core_has_AGP(dev) && dev->agp) {
-		struct drm_agp_mem *entry, *tempe;
-
-		/* Remove AGP resources, but leave dev->agp
-		   intact until drv_cleanup is called. */
-		list_for_each_entry_safe(entry, tempe, &dev->agp->memory, head) {
-			if (entry->bound)
-				drm_unbind_agp(entry->memory);
-			drm_free_agp(entry->memory, entry->pages);
-			kfree(entry);
-		}
-		INIT_LIST_HEAD(&dev->agp->memory);
 
-		if (dev->agp->acquired)
-			drm_agp_release(dev);
-
-		dev->agp->acquired = 0;
-		dev->agp->enabled = 0;
-	}
-}
-
-bool
-nouveau_mem_flags_valid(struct drm_device *dev, u32 tile_flags)
-{
-	if (!(tile_flags & NOUVEAU_GEM_TILE_LAYOUT_MASK))
-		return true;
-
-	return false;
-}
-
-#if __OS_HAS_AGP
-static unsigned long
-get_agp_mode(struct drm_device *dev, unsigned long mode)
-{
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-
-	/*
-	 * FW seems to be broken on nv18, it makes the card lock up
-	 * randomly.
-	 */
-	if (dev_priv->chipset == 0x18)
-		mode &= ~PCI_AGP_COMMAND_FW;
-
-	/*
-	 * AGP mode set in the command line.
-	 */
-	if (nouveau_agpmode > 0) {
-		bool agpv3 = mode & 0x8;
-		int rate = agpv3 ? nouveau_agpmode / 4 : nouveau_agpmode;
-
-		mode = (mode & ~0x7) | (rate & 0x7);
-	}
-
-	return mode;
-}
-#endif
-
-int
-nouveau_mem_reset_agp(struct drm_device *dev)
-{
-#if __OS_HAS_AGP
-	uint32_t saved_pci_nv_1, pmc_enable;
-	int ret;
-
-	/* First of all, disable fast writes, otherwise if it's
-	 * already enabled in the AGP bridge and we disable the card's
-	 * AGP controller we might be locking ourselves out of it. */
-	if ((nv_rd32(dev, NV04_PBUS_PCI_NV_19) |
-	     dev->agp->mode) & PCI_AGP_COMMAND_FW) {
-		struct drm_agp_info info;
-		struct drm_agp_mode mode;
-
-		ret = drm_agp_info(dev, &info);
-		if (ret)
-			return ret;
-
-		mode.mode = get_agp_mode(dev, info.mode) & ~PCI_AGP_COMMAND_FW;
-		ret = drm_agp_enable(dev, mode);
-		if (ret)
-			return ret;
-	}
-
-	saved_pci_nv_1 = nv_rd32(dev, NV04_PBUS_PCI_NV_1);
-
-	/* clear busmaster bit */
-	nv_wr32(dev, NV04_PBUS_PCI_NV_1, saved_pci_nv_1 & ~0x4);
-	/* disable AGP */
-	nv_wr32(dev, NV04_PBUS_PCI_NV_19, 0);
-
-	/* power cycle pgraph, if enabled */
-	pmc_enable = nv_rd32(dev, NV03_PMC_ENABLE);
-	if (pmc_enable & NV_PMC_ENABLE_PGRAPH) {
-		nv_wr32(dev, NV03_PMC_ENABLE,
-				pmc_enable & ~NV_PMC_ENABLE_PGRAPH);
-		nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) |
-				NV_PMC_ENABLE_PGRAPH);
-	}
-
-	/* and restore (gives effect of resetting AGP) */
-	nv_wr32(dev, NV04_PBUS_PCI_NV_1, saved_pci_nv_1);
-#endif
-
-	return 0;
-}
-
-int
-nouveau_mem_init_agp(struct drm_device *dev)
-{
-#if __OS_HAS_AGP
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct drm_agp_info info;
-	struct drm_agp_mode mode;
-	int ret;
-
-	if (!dev->agp->acquired) {
-		ret = drm_agp_acquire(dev);
-		if (ret) {
-			NV_ERROR(dev, "Unable to acquire AGP: %d\n", ret);
-			return ret;
-		}
-	}
-
-	nouveau_mem_reset_agp(dev);
-
-	ret = drm_agp_info(dev, &info);
-	if (ret) {
-		NV_ERROR(dev, "Unable to get AGP info: %d\n", ret);
-		return ret;
-	}
-
-	/* see agp.h for the AGPSTAT_* modes available */
-	mode.mode = get_agp_mode(dev, info.mode);
-	ret = drm_agp_enable(dev, mode);
-	if (ret) {
-		NV_ERROR(dev, "Unable to enable AGP: %d\n", ret);
-		return ret;
-	}
-
-	dev_priv->gart_info.type	= NOUVEAU_GART_AGP;
-	dev_priv->gart_info.aper_base	= info.aperture_base;
-	dev_priv->gart_info.aper_size	= info.aperture_size;
-#endif
-	return 0;
-}
-
-static const struct vram_types {
-	int value;
-	const char *name;
-} vram_type_map[] = {
-	{ NV_MEM_TYPE_STOLEN , "stolen system memory" },
-	{ NV_MEM_TYPE_SGRAM  , "SGRAM" },
-	{ NV_MEM_TYPE_SDRAM  , "SDRAM" },
-	{ NV_MEM_TYPE_DDR1   , "DDR1" },
-	{ NV_MEM_TYPE_DDR2   , "DDR2" },
-	{ NV_MEM_TYPE_DDR3   , "DDR3" },
-	{ NV_MEM_TYPE_GDDR2  , "GDDR2" },
-	{ NV_MEM_TYPE_GDDR3  , "GDDR3" },
-	{ NV_MEM_TYPE_GDDR4  , "GDDR4" },
-	{ NV_MEM_TYPE_GDDR5  , "GDDR5" },
-	{ NV_MEM_TYPE_UNKNOWN, "unknown type" }
-};
-
-int
-nouveau_mem_vram_init(struct drm_device *dev)
-{
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct ttm_bo_device *bdev = &dev_priv->ttm.bdev;
-	const struct vram_types *vram_type;
-	int ret, dma_bits;
-
-	dma_bits = 32;
-	if (dev_priv->card_type >= NV_50) {
-		if (pci_dma_supported(dev->pdev, DMA_BIT_MASK(40)))
-			dma_bits = 40;
-	} else
-	if (0 && pci_is_pcie(dev->pdev) &&
-	    dev_priv->chipset  > 0x40 &&
-	    dev_priv->chipset != 0x45) {
-		if (pci_dma_supported(dev->pdev, DMA_BIT_MASK(39)))
-			dma_bits = 39;
-	}
-
-	ret = pci_set_dma_mask(dev->pdev, DMA_BIT_MASK(dma_bits));
-	if (ret)
-		return ret;
-	ret = pci_set_consistent_dma_mask(dev->pdev, DMA_BIT_MASK(dma_bits));
-	if (ret) {
-		/* Reset to default value. */
-		pci_set_consistent_dma_mask(dev->pdev, DMA_BIT_MASK(32));
-	}
-
-
-	ret = nouveau_ttm_global_init(dev_priv);
-	if (ret)
-		return ret;
-
-	ret = ttm_bo_device_init(&dev_priv->ttm.bdev,
-				 dev_priv->ttm.bo_global_ref.ref.object,
-				 &nouveau_bo_driver, DRM_FILE_PAGE_OFFSET,
-				 dma_bits <= 32 ? true : false);
-	if (ret) {
-		NV_ERROR(dev, "Error initialising bo driver: %d\n", ret);
-		return ret;
-	}
-
-	vram_type = vram_type_map;
-	while (vram_type->value != NV_MEM_TYPE_UNKNOWN) {
-		if (nouveau_vram_type) {
-			if (!strcasecmp(nouveau_vram_type, vram_type->name))
-				break;
-			dev_priv->vram_type = vram_type->value;
-		} else {
-			if (vram_type->value == dev_priv->vram_type)
-				break;
-		}
-		vram_type++;
-	}
-
-	NV_INFO(dev, "Detected %dMiB VRAM (%s)\n",
-		(int)(dev_priv->vram_size >> 20), vram_type->name);
-	if (dev_priv->vram_sys_base) {
-		NV_INFO(dev, "Stolen system memory at: 0x%010llx\n",
-			dev_priv->vram_sys_base);
-	}
-
-	dev_priv->fb_available_size = dev_priv->vram_size;
-	dev_priv->fb_mappable_pages = dev_priv->fb_available_size;
-	if (dev_priv->fb_mappable_pages > pci_resource_len(dev->pdev, 1))
-		dev_priv->fb_mappable_pages = pci_resource_len(dev->pdev, 1);
-	dev_priv->fb_mappable_pages >>= PAGE_SHIFT;
-
-	dev_priv->fb_available_size -= dev_priv->ramin_rsvd_vram;
-	dev_priv->fb_aper_free = dev_priv->fb_available_size;
-
-	/* mappable vram */
-	ret = ttm_bo_init_mm(bdev, TTM_PL_VRAM,
-			     dev_priv->fb_available_size >> PAGE_SHIFT);
-	if (ret) {
-		NV_ERROR(dev, "Failed VRAM mm init: %d\n", ret);
-		return ret;
-	}
-
-	if (dev_priv->card_type < NV_50) {
-		ret = nouveau_bo_new(dev, 256*1024, 0, TTM_PL_FLAG_VRAM,
-				     0, 0, NULL, &dev_priv->vga_ram);
-		if (ret == 0)
-			ret = nouveau_bo_pin(dev_priv->vga_ram,
-					     TTM_PL_FLAG_VRAM);
-
-		if (ret) {
-			NV_WARN(dev, "failed to reserve VGA memory\n");
-			nouveau_bo_ref(NULL, &dev_priv->vga_ram);
-		}
-	}
-
-	dev_priv->fb_mtrr = drm_mtrr_add(pci_resource_start(dev->pdev, 1),
-					 pci_resource_len(dev->pdev, 1),
-					 DRM_MTRR_WC);
-	return 0;
-}
-
-int
-nouveau_mem_gart_init(struct drm_device *dev)
-{
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct ttm_bo_device *bdev = &dev_priv->ttm.bdev;
-	int ret;
-
-	dev_priv->gart_info.type = NOUVEAU_GART_NONE;
-
-#if !defined(__powerpc__) && !defined(__ia64__)
-	if (drm_pci_device_is_agp(dev) && dev->agp && nouveau_agpmode) {
-		ret = nouveau_mem_init_agp(dev);
-		if (ret)
-			NV_ERROR(dev, "Error initialising AGP: %d\n", ret);
-	}
-#endif
-
-	if (dev_priv->gart_info.type == NOUVEAU_GART_NONE) {
-		ret = nouveau_sgdma_init(dev);
-		if (ret) {
-			NV_ERROR(dev, "Error initialising PCI(E): %d\n", ret);
-			return ret;
-		}
-	}
-
-	NV_INFO(dev, "%d MiB GART (aperture)\n",
-		(int)(dev_priv->gart_info.aper_size >> 20));
-	dev_priv->gart_info.aper_free = dev_priv->gart_info.aper_size;
-
-	ret = ttm_bo_init_mm(bdev, TTM_PL_TT,
-			     dev_priv->gart_info.aper_size >> PAGE_SHIFT);
-	if (ret) {
-		NV_ERROR(dev, "Failed TT mm init: %d\n", ret);
-		return ret;
-	}
-
-	return 0;
-}
+#include <subdev/fb.h>
 
 static int
 nv40_mem_timing_calc(struct drm_device *dev, u32 freq,
@@ -477,6 +41,8 @@ nv40_mem_timing_calc(struct drm_device *dev, u32 freq,
 		     struct nouveau_pm_memtiming *boot,
 		     struct nouveau_pm_memtiming *t)
 {
+	struct nouveau_drm *drm = nouveau_drm(dev);
+
 	t->reg[0] = (e->tRP << 24 | e->tRAS << 16 | e->tRFC << 8 | e->tRC);
 
 	/* XXX: I don't trust the -1's and +1's... they must come
@@ -492,7 +58,7 @@ nv40_mem_timing_calc(struct drm_device *dev, u32 freq,
 		     e->tRCDWR << 8 |
 		     e->tRCDRD);
 
-	NV_DEBUG(dev, "Entry %d: 220: %08x %08x %08x\n", t->id,
+	NV_DEBUG(drm, "Entry %d: 220: %08x %08x %08x\n", t->id,
 		 t->reg[0], t->reg[1], t->reg[2]);
 	return 0;
 }
@@ -503,7 +69,9 @@ nv50_mem_timing_calc(struct drm_device *dev, u32 freq,
 		     struct nouveau_pm_memtiming *boot,
 		     struct nouveau_pm_memtiming *t)
 {
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_device *device = nouveau_dev(dev);
+	struct nouveau_fb *pfb = nouveau_fb(device);
+	struct nouveau_drm *drm = nouveau_drm(dev);
 	struct bit_entry P;
 	uint8_t unk18 = 1, unk20 = 0, unk21 = 0, tmp7_3;
 
@@ -557,7 +125,7 @@ nv50_mem_timing_calc(struct drm_device *dev, u32 freq,
 		t->reg[7] = 0x4000202 | (e->tCL - 1) << 16;
 
 		/* XXX: P.version == 1 only has DDR2 and GDDR3? */
-		if (dev_priv->vram_type == NV_MEM_TYPE_DDR2) {
+		if (pfb->ram.type == NV_MEM_TYPE_DDR2) {
 			t->reg[5] |= (e->tCL + 3) << 8;
 			t->reg[6] |= (t->tCWL - 2) << 8;
 			t->reg[8] |= (e->tCL - 4);
@@ -590,11 +158,11 @@ nv50_mem_timing_calc(struct drm_device *dev, u32 freq,
 			    0x202;
 	}
 
-	NV_DEBUG(dev, "Entry %d: 220: %08x %08x %08x %08x\n", t->id,
+	NV_DEBUG(drm, "Entry %d: 220: %08x %08x %08x %08x\n", t->id,
 		 t->reg[0], t->reg[1], t->reg[2], t->reg[3]);
-	NV_DEBUG(dev, "         230: %08x %08x %08x %08x\n",
+	NV_DEBUG(drm, "         230: %08x %08x %08x %08x\n",
 		 t->reg[4], t->reg[5], t->reg[6], t->reg[7]);
-	NV_DEBUG(dev, "         240: %08x\n", t->reg[8]);
+	NV_DEBUG(drm, "         240: %08x\n", t->reg[8]);
 	return 0;
 }
 
@@ -604,6 +172,8 @@ nvc0_mem_timing_calc(struct drm_device *dev, u32 freq,
 		     struct nouveau_pm_memtiming *boot,
 		     struct nouveau_pm_memtiming *t)
 {
+	struct nouveau_drm *drm = nouveau_drm(dev);
+
 	if (e->tCWL > 0)
 		t->tCWL = e->tCWL;
 
@@ -626,9 +196,9 @@ nvc0_mem_timing_calc(struct drm_device *dev, u32 freq,
 	t->reg[4] = (boot->reg[4] & 0xfff00fff) |
 		    (e->tRRD&0x1f) << 15;
 
-	NV_DEBUG(dev, "Entry %d: 290: %08x %08x %08x %08x\n", t->id,
+	NV_DEBUG(drm, "Entry %d: 290: %08x %08x %08x %08x\n", t->id,
 		 t->reg[0], t->reg[1], t->reg[2], t->reg[3]);
-	NV_DEBUG(dev, "         2a0: %08x\n", t->reg[4]);
+	NV_DEBUG(drm, "         2a0: %08x\n", t->reg[4]);
 	return 0;
 }
 
@@ -642,6 +212,8 @@ nouveau_mem_ddr2_mr(struct drm_device *dev, u32 freq,
 		    struct nouveau_pm_memtiming *boot,
 		    struct nouveau_pm_memtiming *t)
 {
+	struct nouveau_drm *drm = nouveau_drm(dev);
+
 	t->drive_strength = 0;
 	if (len < 15) {
 		t->odt = boot->odt;
@@ -650,17 +222,17 @@ nouveau_mem_ddr2_mr(struct drm_device *dev, u32 freq,
 	}
 
 	if (e->tCL >= NV_MEM_CL_DDR2_MAX) {
-		NV_WARN(dev, "(%u) Invalid tCL: %u", t->id, e->tCL);
+		NV_WARN(drm, "(%u) Invalid tCL: %u", t->id, e->tCL);
 		return -ERANGE;
 	}
 
 	if (e->tWR >= NV_MEM_WR_DDR2_MAX) {
-		NV_WARN(dev, "(%u) Invalid tWR: %u", t->id, e->tWR);
+		NV_WARN(drm, "(%u) Invalid tWR: %u", t->id, e->tWR);
 		return -ERANGE;
 	}
 
 	if (t->odt > 3) {
-		NV_WARN(dev, "(%u) Invalid odt value, assuming disabled: %x",
+		NV_WARN(drm, "(%u) Invalid odt value, assuming disabled: %x",
 			t->id, t->odt);
 		t->odt = 0;
 	}
@@ -672,11 +244,11 @@ nouveau_mem_ddr2_mr(struct drm_device *dev, u32 freq,
 		   (t->odt & 0x1) << 2 |
 		   (t->odt & 0x2) << 5;
 
-	NV_DEBUG(dev, "(%u) MR: %08x", t->id, t->mr[0]);
+	NV_DEBUG(drm, "(%u) MR: %08x", t->id, t->mr[0]);
 	return 0;
 }
 
-uint8_t nv_mem_wr_lut_ddr3[NV_MEM_WR_DDR3_MAX] = {
+static const uint8_t nv_mem_wr_lut_ddr3[NV_MEM_WR_DDR3_MAX] = {
 	0, 0, 0, 0, 0, 1, 2, 3, 4, 5, 5, 6, 6, 7, 7, 0, 0};
 
 static int
@@ -685,6 +257,7 @@ nouveau_mem_ddr3_mr(struct drm_device *dev, u32 freq,
 		    struct nouveau_pm_memtiming *boot,
 		    struct nouveau_pm_memtiming *t)
 {
+	struct nouveau_drm *drm = nouveau_drm(dev);
 	u8 cl = e->tCL - 4;
 
 	t->drive_strength = 0;
@@ -695,17 +268,17 @@ nouveau_mem_ddr3_mr(struct drm_device *dev, u32 freq,
 	}
 
 	if (e->tCL >= NV_MEM_CL_DDR3_MAX || e->tCL < 4) {
-		NV_WARN(dev, "(%u) Invalid tCL: %u", t->id, e->tCL);
+		NV_WARN(drm, "(%u) Invalid tCL: %u", t->id, e->tCL);
 		return -ERANGE;
 	}
 
 	if (e->tWR >= NV_MEM_WR_DDR3_MAX || e->tWR < 4) {
-		NV_WARN(dev, "(%u) Invalid tWR: %u", t->id, e->tWR);
+		NV_WARN(drm, "(%u) Invalid tWR: %u", t->id, e->tWR);
 		return -ERANGE;
 	}
 
 	if (e->tCWL < 5) {
-		NV_WARN(dev, "(%u) Invalid tCWL: %u", t->id, e->tCWL);
+		NV_WARN(drm, "(%u) Invalid tCWL: %u", t->id, e->tCWL);
 		return -ERANGE;
 	}
 
@@ -720,13 +293,13 @@ nouveau_mem_ddr3_mr(struct drm_device *dev, u32 freq,
 		   (t->odt & 0x4) << 7;
 	t->mr[2] = (boot->mr[2] & 0x20ffb7) | (e->tCWL - 5) << 3;
 
-	NV_DEBUG(dev, "(%u) MR: %08x %08x", t->id, t->mr[0], t->mr[2]);
+	NV_DEBUG(drm, "(%u) MR: %08x %08x", t->id, t->mr[0], t->mr[2]);
 	return 0;
 }
 
-uint8_t nv_mem_cl_lut_gddr3[NV_MEM_CL_GDDR3_MAX] = {
+static const uint8_t nv_mem_cl_lut_gddr3[NV_MEM_CL_GDDR3_MAX] = {
 	0, 0, 0, 0, 4, 5, 6, 7, 0, 1, 2, 3, 8, 9, 10, 11};
-uint8_t nv_mem_wr_lut_gddr3[NV_MEM_WR_GDDR3_MAX] = {
+static const uint8_t nv_mem_wr_lut_gddr3[NV_MEM_WR_GDDR3_MAX] = {
 	0, 0, 0, 0, 0, 2, 3, 8, 9, 10, 11, 0, 0, 1, 1, 0, 3};
 
 static int
@@ -735,6 +308,8 @@ nouveau_mem_gddr3_mr(struct drm_device *dev, u32 freq,
 		     struct nouveau_pm_memtiming *boot,
 		     struct nouveau_pm_memtiming *t)
 {
+	struct nouveau_drm *drm = nouveau_drm(dev);
+
 	if (len < 15) {
 		t->drive_strength = boot->drive_strength;
 		t->odt = boot->odt;
@@ -744,17 +319,17 @@ nouveau_mem_gddr3_mr(struct drm_device *dev, u32 freq,
 	}
 
 	if (e->tCL >= NV_MEM_CL_GDDR3_MAX) {
-		NV_WARN(dev, "(%u) Invalid tCL: %u", t->id, e->tCL);
+		NV_WARN(drm, "(%u) Invalid tCL: %u", t->id, e->tCL);
 		return -ERANGE;
 	}
 
 	if (e->tWR >= NV_MEM_WR_GDDR3_MAX) {
-		NV_WARN(dev, "(%u) Invalid tWR: %u", t->id, e->tWR);
+		NV_WARN(drm, "(%u) Invalid tWR: %u", t->id, e->tWR);
 		return -ERANGE;
 	}
 
 	if (t->odt > 3) {
-		NV_WARN(dev, "(%u) Invalid odt value, assuming autocal: %x",
+		NV_WARN(drm, "(%u) Invalid odt value, assuming autocal: %x",
 			t->id, t->odt);
 		t->odt = 0;
 	}
@@ -768,7 +343,7 @@ nouveau_mem_gddr3_mr(struct drm_device *dev, u32 freq,
 		   (nv_mem_wr_lut_gddr3[e->tWR] & 0xf) << 4;
 	t->mr[2] = boot->mr[2];
 
-	NV_DEBUG(dev, "(%u) MR: %08x %08x %08x", t->id,
+	NV_DEBUG(drm, "(%u) MR: %08x %08x %08x", t->id,
 		      t->mr[0], t->mr[1], t->mr[2]);
 	return 0;
 }
@@ -779,6 +354,8 @@ nouveau_mem_gddr5_mr(struct drm_device *dev, u32 freq,
 		     struct nouveau_pm_memtiming *boot,
 		     struct nouveau_pm_memtiming *t)
 {
+	struct nouveau_drm *drm = nouveau_drm(dev);
+
 	if (len < 15) {
 		t->drive_strength = boot->drive_strength;
 		t->odt = boot->odt;
@@ -788,17 +365,17 @@ nouveau_mem_gddr5_mr(struct drm_device *dev, u32 freq,
 	}
 
 	if (e->tCL >= NV_MEM_CL_GDDR5_MAX) {
-		NV_WARN(dev, "(%u) Invalid tCL: %u", t->id, e->tCL);
+		NV_WARN(drm, "(%u) Invalid tCL: %u", t->id, e->tCL);
 		return -ERANGE;
 	}
 
 	if (e->tWR >= NV_MEM_WR_GDDR5_MAX) {
-		NV_WARN(dev, "(%u) Invalid tWR: %u", t->id, e->tWR);
+		NV_WARN(drm, "(%u) Invalid tWR: %u", t->id, e->tWR);
 		return -ERANGE;
 	}
 
 	if (t->odt > 3) {
-		NV_WARN(dev, "(%u) Invalid odt value, assuming autocal: %x",
+		NV_WARN(drm, "(%u) Invalid odt value, assuming autocal: %x",
 			t->id, t->odt);
 		t->odt = 0;
 	}
@@ -810,7 +387,7 @@ nouveau_mem_gddr5_mr(struct drm_device *dev, u32 freq,
 		   t->drive_strength |
 		   (t->odt << 2);
 
-	NV_DEBUG(dev, "(%u) MR: %08x %08x", t->id, t->mr[0], t->mr[1]);
+	NV_DEBUG(drm, "(%u) MR: %08x %08x", t->id, t->mr[0], t->mr[1]);
 	return 0;
 }
 
@@ -818,8 +395,9 @@ int
 nouveau_mem_timing_calc(struct drm_device *dev, u32 freq,
 			struct nouveau_pm_memtiming *t)
 {
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
+	struct nouveau_device *device = nouveau_dev(dev);
+	struct nouveau_fb *pfb = nouveau_fb(device);
+	struct nouveau_pm *pm = nouveau_pm(dev);
 	struct nouveau_pm_memtiming *boot = &pm->boot.timing;
 	struct nouveau_pm_tbl_entry *e;
 	u8 ver, len, *ptr, *ramcfg;
@@ -834,7 +412,7 @@ nouveau_mem_timing_calc(struct drm_device *dev, u32 freq,
 
 	t->tCWL = boot->tCWL;
 
-	switch (dev_priv->card_type) {
+	switch (device->card_type) {
 	case NV_40:
 		ret = nv40_mem_timing_calc(dev, freq, e, len, boot, t);
 		break;
@@ -850,7 +428,7 @@ nouveau_mem_timing_calc(struct drm_device *dev, u32 freq,
 		break;
 	}
 
-	switch (dev_priv->vram_type * !ret) {
+	switch (pfb->ram.type * !ret) {
 	case NV_MEM_TYPE_GDDR3:
 		ret = nouveau_mem_gddr3_mr(dev, freq, e, len, boot, t);
 		break;
@@ -877,7 +455,7 @@ nouveau_mem_timing_calc(struct drm_device *dev, u32 freq,
 		else
 			dll_off = !!(ramcfg[2] & 0x40);
 
-		switch (dev_priv->vram_type) {
+		switch (pfb->ram.type) {
 		case NV_MEM_TYPE_GDDR3:
 			t->mr[1] &= ~0x00000040;
 			t->mr[1] |=  0x00000040 * dll_off;
@@ -895,11 +473,12 @@ nouveau_mem_timing_calc(struct drm_device *dev, u32 freq,
 void
 nouveau_mem_timing_read(struct drm_device *dev, struct nouveau_pm_memtiming *t)
 {
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_device *device = nouveau_dev(dev);
+	struct nouveau_fb *pfb = nouveau_fb(device);
 	u32 timing_base, timing_regs, mr_base;
 	int i;
 
-	if (dev_priv->card_type >= 0xC0) {
+	if (device->card_type >= 0xC0) {
 		timing_base = 0x10f290;
 		mr_base = 0x10f300;
 	} else {
@@ -909,7 +488,7 @@ nouveau_mem_timing_read(struct drm_device *dev, struct nouveau_pm_memtiming *t)
 
 	t->id = -1;
 
-	switch (dev_priv->card_type) {
+	switch (device->card_type) {
 	case NV_50:
 		timing_regs = 9;
 		break;
@@ -926,24 +505,24 @@ nouveau_mem_timing_read(struct drm_device *dev, struct nouveau_pm_memtiming *t)
 		return;
 	}
 	for(i = 0; i < timing_regs; i++)
-		t->reg[i] = nv_rd32(dev, timing_base + (0x04 * i));
+		t->reg[i] = nv_rd32(device, timing_base + (0x04 * i));
 
 	t->tCWL = 0;
-	if (dev_priv->card_type < NV_C0) {
-		t->tCWL = ((nv_rd32(dev, 0x100228) & 0x0f000000) >> 24) + 1;
-	} else if (dev_priv->card_type <= NV_D0) {
-		t->tCWL = ((nv_rd32(dev, 0x10f294) & 0x00000f80) >> 7);
+	if (device->card_type < NV_C0) {
+		t->tCWL = ((nv_rd32(device, 0x100228) & 0x0f000000) >> 24) + 1;
+	} else if (device->card_type <= NV_D0) {
+		t->tCWL = ((nv_rd32(device, 0x10f294) & 0x00000f80) >> 7);
 	}
 
-	t->mr[0] = nv_rd32(dev, mr_base);
-	t->mr[1] = nv_rd32(dev, mr_base + 0x04);
-	t->mr[2] = nv_rd32(dev, mr_base + 0x20);
-	t->mr[3] = nv_rd32(dev, mr_base + 0x24);
+	t->mr[0] = nv_rd32(device, mr_base);
+	t->mr[1] = nv_rd32(device, mr_base + 0x04);
+	t->mr[2] = nv_rd32(device, mr_base + 0x20);
+	t->mr[3] = nv_rd32(device, mr_base + 0x24);
 
 	t->odt = 0;
 	t->drive_strength = 0;
 
-	switch (dev_priv->vram_type) {
+	switch (pfb->ram.type) {
 	case NV_MEM_TYPE_DDR3:
 		t->odt |= (t->mr[1] & 0x200) >> 7;
 	case NV_MEM_TYPE_DDR2:
@@ -964,13 +543,15 @@ int
 nouveau_mem_exec(struct nouveau_mem_exec_func *exec,
 		 struct nouveau_pm_level *perflvl)
 {
-	struct drm_nouveau_private *dev_priv = exec->dev->dev_private;
+	struct nouveau_drm *drm = nouveau_drm(exec->dev);
+	struct nouveau_device *device = nouveau_dev(exec->dev);
+	struct nouveau_fb *pfb = nouveau_fb(device);
 	struct nouveau_pm_memtiming *info = &perflvl->timing;
 	u32 tMRD = 1000, tCKSRE = 0, tCKSRX = 0, tXS = 0, tDLLK = 0;
 	u32 mr[3] = { info->mr[0], info->mr[1], info->mr[2] };
 	u32 mr1_dlloff;
 
-	switch (dev_priv->vram_type) {
+	switch (pfb->ram.type) {
 	case NV_MEM_TYPE_DDR2:
 		tDLLK = 2000;
 		mr1_dlloff = 0x00000001;
@@ -986,12 +567,12 @@ nouveau_mem_exec(struct nouveau_mem_exec_func *exec,
 		mr1_dlloff = 0x00000040;
 		break;
 	default:
-		NV_ERROR(exec->dev, "cannot reclock unsupported memtype\n");
+		NV_ERROR(drm, "cannot reclock unsupported memtype\n");
 		return -ENODEV;
 	}
 
 	/* fetch current MRs */
-	switch (dev_priv->vram_type) {
+	switch (pfb->ram.type) {
 	case NV_MEM_TYPE_GDDR3:
 	case NV_MEM_TYPE_DDR3:
 		mr[2] = exec->mrg(exec, 2);
@@ -1058,194 +639,9 @@ nouveau_mem_exec(struct nouveau_mem_exec_func *exec,
 		exec->mrs (exec, 0, info->mr[0] | 0x00000000);
 		exec->wait(exec, tMRD);
 		exec->wait(exec, tDLLK);
-		if (dev_priv->vram_type == NV_MEM_TYPE_GDDR3)
+		if (pfb->ram.type == NV_MEM_TYPE_GDDR3)
 			exec->precharge(exec);
 	}
 
 	return 0;
 }
-
-int
-nouveau_mem_vbios_type(struct drm_device *dev)
-{
-	struct bit_entry M;
-	u8 ramcfg = (nv_rd32(dev, 0x101000) & 0x0000003c) >> 2;
-	if (!bit_table(dev, 'M', &M) || M.version != 2 || M.length < 5) {
-		u8 *table = ROMPTR(dev, M.data[3]);
-		if (table && table[0] == 0x10 && ramcfg < table[3]) {
-			u8 *entry = table + table[1] + (ramcfg * table[2]);
-			switch (entry[0] & 0x0f) {
-			case 0: return NV_MEM_TYPE_DDR2;
-			case 1: return NV_MEM_TYPE_DDR3;
-			case 2: return NV_MEM_TYPE_GDDR3;
-			case 3: return NV_MEM_TYPE_GDDR5;
-			default:
-				break;
-			}
-
-		}
-	}
-	return NV_MEM_TYPE_UNKNOWN;
-}
-
-static int
-nouveau_vram_manager_init(struct ttm_mem_type_manager *man, unsigned long psize)
-{
-	/* nothing to do */
-	return 0;
-}
-
-static int
-nouveau_vram_manager_fini(struct ttm_mem_type_manager *man)
-{
-	/* nothing to do */
-	return 0;
-}
-
-static inline void
-nouveau_mem_node_cleanup(struct nouveau_mem *node)
-{
-	if (node->vma[0].node) {
-		nouveau_vm_unmap(&node->vma[0]);
-		nouveau_vm_put(&node->vma[0]);
-	}
-
-	if (node->vma[1].node) {
-		nouveau_vm_unmap(&node->vma[1]);
-		nouveau_vm_put(&node->vma[1]);
-	}
-}
-
-static void
-nouveau_vram_manager_del(struct ttm_mem_type_manager *man,
-			 struct ttm_mem_reg *mem)
-{
-	struct drm_nouveau_private *dev_priv = nouveau_bdev(man->bdev);
-	struct nouveau_vram_engine *vram = &dev_priv->engine.vram;
-	struct drm_device *dev = dev_priv->dev;
-
-	nouveau_mem_node_cleanup(mem->mm_node);
-	vram->put(dev, (struct nouveau_mem **)&mem->mm_node);
-}
-
-static int
-nouveau_vram_manager_new(struct ttm_mem_type_manager *man,
-			 struct ttm_buffer_object *bo,
-			 struct ttm_placement *placement,
-			 struct ttm_mem_reg *mem)
-{
-	struct drm_nouveau_private *dev_priv = nouveau_bdev(man->bdev);
-	struct nouveau_vram_engine *vram = &dev_priv->engine.vram;
-	struct drm_device *dev = dev_priv->dev;
-	struct nouveau_bo *nvbo = nouveau_bo(bo);
-	struct nouveau_mem *node;
-	u32 size_nc = 0;
-	int ret;
-
-	if (nvbo->tile_flags & NOUVEAU_GEM_TILE_NONCONTIG)
-		size_nc = 1 << nvbo->page_shift;
-
-	ret = vram->get(dev, mem->num_pages << PAGE_SHIFT,
-			mem->page_alignment << PAGE_SHIFT, size_nc,
-			(nvbo->tile_flags >> 8) & 0x3ff, &node);
-	if (ret) {
-		mem->mm_node = NULL;
-		return (ret == -ENOSPC) ? 0 : ret;
-	}
-
-	node->page_shift = nvbo->page_shift;
-
-	mem->mm_node = node;
-	mem->start   = node->offset >> PAGE_SHIFT;
-	return 0;
-}
-
-void
-nouveau_vram_manager_debug(struct ttm_mem_type_manager *man, const char *prefix)
-{
-	struct nouveau_mm *mm = man->priv;
-	struct nouveau_mm_node *r;
-	u32 total = 0, free = 0;
-
-	mutex_lock(&mm->mutex);
-	list_for_each_entry(r, &mm->nodes, nl_entry) {
-		printk(KERN_DEBUG "%s %d: 0x%010llx 0x%010llx\n",
-		       prefix, r->type, ((u64)r->offset << 12),
-		       (((u64)r->offset + r->length) << 12));
-
-		total += r->length;
-		if (!r->type)
-			free += r->length;
-	}
-	mutex_unlock(&mm->mutex);
-
-	printk(KERN_DEBUG "%s  total: 0x%010llx free: 0x%010llx\n",
-	       prefix, (u64)total << 12, (u64)free << 12);
-	printk(KERN_DEBUG "%s  block: 0x%08x\n",
-	       prefix, mm->block_size << 12);
-}
-
-const struct ttm_mem_type_manager_func nouveau_vram_manager = {
-	nouveau_vram_manager_init,
-	nouveau_vram_manager_fini,
-	nouveau_vram_manager_new,
-	nouveau_vram_manager_del,
-	nouveau_vram_manager_debug
-};
-
-static int
-nouveau_gart_manager_init(struct ttm_mem_type_manager *man, unsigned long psize)
-{
-	return 0;
-}
-
-static int
-nouveau_gart_manager_fini(struct ttm_mem_type_manager *man)
-{
-	return 0;
-}
-
-static void
-nouveau_gart_manager_del(struct ttm_mem_type_manager *man,
-			 struct ttm_mem_reg *mem)
-{
-	nouveau_mem_node_cleanup(mem->mm_node);
-	kfree(mem->mm_node);
-	mem->mm_node = NULL;
-}
-
-static int
-nouveau_gart_manager_new(struct ttm_mem_type_manager *man,
-			 struct ttm_buffer_object *bo,
-			 struct ttm_placement *placement,
-			 struct ttm_mem_reg *mem)
-{
-	struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
-	struct nouveau_mem *node;
-
-	if (unlikely((mem->num_pages << PAGE_SHIFT) >=
-		     dev_priv->gart_info.aper_size))
-		return -ENOMEM;
-
-	node = kzalloc(sizeof(*node), GFP_KERNEL);
-	if (!node)
-		return -ENOMEM;
-	node->page_shift = 12;
-
-	mem->mm_node = node;
-	mem->start   = 0;
-	return 0;
-}
-
-void
-nouveau_gart_manager_debug(struct ttm_mem_type_manager *man, const char *prefix)
-{
-}
-
-const struct ttm_mem_type_manager_func nouveau_gart_manager = {
-	nouveau_gart_manager_init,
-	nouveau_gart_manager_fini,
-	nouveau_gart_manager_new,
-	nouveau_gart_manager_del,
-	nouveau_gart_manager_debug
-};
diff --git a/drivers/gpu/drm/nouveau/nouveau_mm.h b/drivers/gpu/drm/nouveau/nouveau_mm.h
deleted file mode 100644
index 57a600c35c95..000000000000
--- a/drivers/gpu/drm/nouveau/nouveau_mm.h
+++ /dev/null
@@ -1,67 +0,0 @@
-/*
- * Copyright 2010 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Ben Skeggs
- */
-
-#ifndef __NOUVEAU_REGION_H__
-#define __NOUVEAU_REGION_H__
-
-struct nouveau_mm_node {
-	struct list_head nl_entry;
-	struct list_head fl_entry;
-	struct list_head rl_entry;
-
-	u8  type;
-	u32 offset;
-	u32 length;
-};
-
-struct nouveau_mm {
-	struct list_head nodes;
-	struct list_head free;
-
-	struct mutex mutex;
-
-	u32 block_size;
-	int heap_nodes;
-};
-
-int  nouveau_mm_init(struct nouveau_mm *, u32 offset, u32 length, u32 block);
-int  nouveau_mm_fini(struct nouveau_mm *);
-int  nouveau_mm_pre(struct nouveau_mm *);
-int  nouveau_mm_get(struct nouveau_mm *, int type, u32 size, u32 size_nc,
-		    u32 align, struct nouveau_mm_node **);
-void nouveau_mm_put(struct nouveau_mm *, struct nouveau_mm_node *);
-
-int  nv50_vram_init(struct drm_device *);
-void nv50_vram_fini(struct drm_device *);
-int  nv50_vram_new(struct drm_device *, u64 size, u32 align, u32 size_nc,
-		    u32 memtype, struct nouveau_mem **);
-void nv50_vram_del(struct drm_device *, struct nouveau_mem **);
-bool nv50_vram_flags_valid(struct drm_device *, u32 tile_flags);
-
-int  nvc0_vram_init(struct drm_device *);
-int  nvc0_vram_new(struct drm_device *, u64 size, u32 align, u32 ncmin,
-		    u32 memtype, struct nouveau_mem **);
-bool nvc0_vram_flags_valid(struct drm_device *, u32 tile_flags);
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_mxm.c b/drivers/gpu/drm/nouveau/nouveau_mxm.c
deleted file mode 100644
index d07f4a3310b9..000000000000
--- a/drivers/gpu/drm/nouveau/nouveau_mxm.c
+++ /dev/null
@@ -1,723 +0,0 @@
-/*
- * Copyright 2011 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Ben Skeggs
- */
-
-#include <linux/acpi.h>
-
-#include <drm/drmP.h>
-#include "nouveau_drv.h"
-
-#define MXM_DBG(dev, fmt, args...) NV_DEBUG((dev), "MXM: " fmt, ##args)
-#define MXM_MSG(dev, fmt, args...) NV_INFO((dev), "MXM: " fmt, ##args)
-
-static u8 *
-mxms_data(struct drm_device *dev)
-{
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	return dev_priv->mxms;
-
-}
-
-static u16
-mxms_version(struct drm_device *dev)
-{
-	u8 *mxms = mxms_data(dev);
-	u16 version = (mxms[4] << 8) | mxms[5];
-	switch (version ) {
-	case 0x0200:
-	case 0x0201:
-	case 0x0300:
-		return version;
-	default:
-		break;
-	}
-
-	MXM_DBG(dev, "unknown version %d.%d\n", mxms[4], mxms[5]);
-	return 0x0000;
-}
-
-static u16
-mxms_headerlen(struct drm_device *dev)
-{
-	return 8;
-}
-
-static u16
-mxms_structlen(struct drm_device *dev)
-{
-	return *(u16 *)&mxms_data(dev)[6];
-}
-
-static bool
-mxms_checksum(struct drm_device *dev)
-{
-	u16 size = mxms_headerlen(dev) + mxms_structlen(dev);
-	u8 *mxms = mxms_data(dev), sum = 0;
-	while (size--)
-		sum += *mxms++;
-	if (sum) {
-		MXM_DBG(dev, "checksum invalid\n");
-		return false;
-	}
-	return true;
-}
-
-static bool
-mxms_valid(struct drm_device *dev)
-{
-	u8 *mxms = mxms_data(dev);
-	if (*(u32 *)mxms != 0x5f4d584d) {
-		MXM_DBG(dev, "signature invalid\n");
-		return false;
-	}
-
-	if (!mxms_version(dev) || !mxms_checksum(dev))
-		return false;
-
-	return true;
-}
-
-static bool
-mxms_foreach(struct drm_device *dev, u8 types,
-	     bool (*exec)(struct drm_device *, u8 *, void *), void *info)
-{
-	u8 *mxms = mxms_data(dev);
-	u8 *desc = mxms + mxms_headerlen(dev);
-	u8 *fini = desc + mxms_structlen(dev) - 1;
-	while (desc < fini) {
-		u8 type = desc[0] & 0x0f;
-		u8 headerlen = 0;
-		u8 recordlen = 0;
-		u8 entries = 0;
-
-		switch (type) {
-		case 0: /* Output Device Structure */
-			if (mxms_version(dev) >= 0x0300)
-				headerlen = 8;
-			else
-				headerlen = 6;
-			break;
-		case 1: /* System Cooling Capability Structure */
-		case 2: /* Thermal Structure */
-		case 3: /* Input Power Structure */
-			headerlen = 4;
-			break;
-		case 4: /* GPIO Device Structure */
-			headerlen = 4;
-			recordlen = 2;
-			entries   = (ROM32(desc[0]) & 0x01f00000) >> 20;
-			break;
-		case 5: /* Vendor Specific Structure */
-			headerlen = 8;
-			break;
-		case 6: /* Backlight Control Structure */
-			if (mxms_version(dev) >= 0x0300) {
-				headerlen = 4;
-				recordlen = 8;
-				entries   = (desc[1] & 0xf0) >> 4;
-			} else {
-				headerlen = 8;
-			}
-			break;
-		case 7: /* Fan Control Structure */
-			headerlen = 8;
-			recordlen = 4;
-			entries   = desc[1] & 0x07;
-			break;
-		default:
-			MXM_DBG(dev, "unknown descriptor type %d\n", type);
-			return false;
-		}
-
-		if ((drm_debug & DRM_UT_DRIVER) && (exec == NULL)) {
-			static const char * mxms_desc_name[] = {
-				"ODS", "SCCS", "TS", "IPS",
-				"GSD", "VSS", "BCS", "FCS",
-			};
-			u8 *dump = desc;
-			int i, j;
-
-			MXM_DBG(dev, "%4s: ", mxms_desc_name[type]);
-			for (j = headerlen - 1; j >= 0; j--)
-				printk("%02x", dump[j]);
-			printk("\n");
-			dump += headerlen;
-
-			for (i = 0; i < entries; i++, dump += recordlen) {
-				MXM_DBG(dev, "      ");
-				for (j = recordlen - 1; j >= 0; j--)
-					printk("%02x", dump[j]);
-				printk("\n");
-			}
-		}
-
-		if (types & (1 << type)) {
-			if (!exec(dev, desc, info))
-				return false;
-		}
-
-		desc += headerlen + (entries * recordlen);
-	}
-
-	return true;
-}
-
-static u8 *
-mxm_table(struct drm_device *dev, u8 *size)
-{
-	struct bit_entry x;
-
-	if (bit_table(dev, 'x', &x)) {
-		MXM_DBG(dev, "BIT 'x' table not present\n");
-		return NULL;
-	}
-
-	if (x.version != 1 || x.length < 3) {
-		MXM_MSG(dev, "BIT x table %d/%d unknown\n",
-			x.version, x.length);
-		return NULL;
-	}
-
-	*size = x.length;
-	return x.data;
-}
-
-/* These map MXM v2.x digital connection values to the appropriate SOR/link,
- * hopefully they're correct for all boards within the same chipset...
- *
- * MXM v3.x VBIOS are nicer and provide pointers to these tables.
- */
-static u8 nv84_sor_map[16] = {
-	0x00, 0x12, 0x22, 0x11, 0x32, 0x31, 0x11, 0x31,
-	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
-};
-
-static u8 nv92_sor_map[16] = {
-	0x00, 0x12, 0x22, 0x11, 0x32, 0x31, 0x11, 0x31,
-	0x11, 0x31, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
-};
-
-static u8 nv94_sor_map[16] = {
-	0x00, 0x14, 0x24, 0x11, 0x34, 0x31, 0x11, 0x31,
-	0x11, 0x31, 0x12, 0x00, 0x00, 0x00, 0x00, 0x00
-};
-
-static u8 nv96_sor_map[16] = {
-	0x00, 0x14, 0x24, 0x00, 0x34, 0x00, 0x11, 0x31,
-	0x11, 0x31, 0x12, 0x00, 0x00, 0x00, 0x00, 0x00
-};
-
-static u8 nv98_sor_map[16] = {
-	0x00, 0x14, 0x12, 0x11, 0x00, 0x31, 0x11, 0x31,
-	0x11, 0x31, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
-};
-
-static u8
-mxm_sor_map(struct drm_device *dev, u8 conn)
-{
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	u8 len, *mxm = mxm_table(dev, &len);
-	if (mxm && len >= 6) {
-		u8 *map = ROMPTR(dev, mxm[4]);
-		if (map) {
-			if (map[0] == 0x10) {
-				if (conn < map[3])
-					return map[map[1] + conn];
-				return 0x00;
-			}
-
-			MXM_MSG(dev, "unknown sor map 0x%02x\n", map[0]);
-		}
-	}
-
-	if (dev_priv->chipset == 0x84 || dev_priv->chipset == 0x86)
-		return nv84_sor_map[conn];
-	if (dev_priv->chipset == 0x92)
-		return nv92_sor_map[conn];
-	if (dev_priv->chipset == 0x94)
-		return nv94_sor_map[conn];
-	if (dev_priv->chipset == 0x96)
-		return nv96_sor_map[conn];
-	if (dev_priv->chipset == 0x98)
-		return nv98_sor_map[conn];
-
-	MXM_MSG(dev, "missing sor map\n");
-	return 0x00;
-}
-
-static u8
-mxm_ddc_map(struct drm_device *dev, u8 port)
-{
-	u8 len, *mxm = mxm_table(dev, &len);
-	if (mxm && len >= 8) {
-		u8 *map = ROMPTR(dev, mxm[6]);
-		if (map) {
-			if (map[0] == 0x10) {
-				if (port < map[3])
-					return map[map[1] + port];
-				return 0x00;
-			}
-
-			MXM_MSG(dev, "unknown ddc map 0x%02x\n", map[0]);
-		}
-	}
-
-	/* v2.x: directly write port as dcb i2cidx */
-	return (port << 4) | port;
-}
-
-struct mxms_odev {
-	u8 outp_type;
-	u8 conn_type;
-	u8 ddc_port;
-	u8 dig_conn;
-};
-
-static void
-mxms_output_device(struct drm_device *dev, u8 *pdata, struct mxms_odev *desc)
-{
-	u64 data = ROM32(pdata[0]);
-	if (mxms_version(dev) >= 0x0300)
-		data |= (u64)ROM16(pdata[4]) << 32;
-
-	desc->outp_type = (data & 0x00000000000000f0ULL) >> 4;
-	desc->ddc_port  = (data & 0x0000000000000f00ULL) >> 8;
-	desc->conn_type = (data & 0x000000000001f000ULL) >> 12;
-	desc->dig_conn  = (data & 0x0000000000780000ULL) >> 19;
-}
-
-struct context {
-	u32 *outp;
-	struct mxms_odev desc;
-};
-
-static bool
-mxm_match_tmds_partner(struct drm_device *dev, u8 *data, void *info)
-{
-	struct context *ctx = info;
-	struct mxms_odev desc;
-
-	mxms_output_device(dev, data, &desc);
-	if (desc.outp_type == 2 &&
-	    desc.dig_conn == ctx->desc.dig_conn)
-		return false;
-	return true;
-}
-
-static bool
-mxm_match_dcb(struct drm_device *dev, u8 *data, void *info)
-{
-	struct context *ctx = info;
-	u64 desc = *(u64 *)data;
-
-	mxms_output_device(dev, data, &ctx->desc);
-
-	/* match dcb encoder type to mxm-ods device type */
-	if ((ctx->outp[0] & 0x0000000f) != ctx->desc.outp_type)
-		return true;
-
-	/* digital output, have some extra stuff to match here, there's a
-	 * table in the vbios that provides a mapping from the mxm digital
-	 * connection enum values to SOR/link
-	 */
-	if ((desc & 0x00000000000000f0) >= 0x20) {
-		/* check against sor index */
-		u8 link = mxm_sor_map(dev, ctx->desc.dig_conn);
-		if ((ctx->outp[0] & 0x0f000000) != (link & 0x0f) << 24)
-			return true;
-
-		/* check dcb entry has a compatible link field */
-		link = (link & 0x30) >> 4;
-		if ((link & ((ctx->outp[1] & 0x00000030) >> 4)) != link)
-			return true;
-	}
-
-	/* mark this descriptor accounted for by setting invalid device type,
-	 * except of course some manufactures don't follow specs properly and
-	 * we need to avoid killing off the TMDS function on DP connectors
-	 * if MXM-SIS is missing an entry for it.
-	 */
-	data[0] &= ~0xf0;
-	if (ctx->desc.outp_type == 6 && ctx->desc.conn_type == 6 &&
-	    mxms_foreach(dev, 0x01, mxm_match_tmds_partner, ctx)) {
-		data[0] |= 0x20; /* modify descriptor to match TMDS now */
-	} else {
-		data[0] |= 0xf0;
-	}
-
-	return false;
-}
-
-static int
-mxm_dcb_sanitise_entry(struct drm_device *dev, void *data, int idx, u8 *dcbe)
-{
-	struct context ctx = { .outp = (u32 *)dcbe };
-	u8 type, i2cidx, link;
-	u8 *conn;
-
-	/* look for an output device structure that matches this dcb entry.
-	 * if one isn't found, disable it.
-	 */
-	if (mxms_foreach(dev, 0x01, mxm_match_dcb, &ctx)) {
-		MXM_DBG(dev, "disable %d: 0x%08x 0x%08x\n",
-			idx, ctx.outp[0], ctx.outp[1]);
-		ctx.outp[0] |= 0x0000000f;
-		return 0;
-	}
-
-	/* modify the output's ddc/aux port, there's a pointer to a table
-	 * with the mapping from mxm ddc/aux port to dcb i2c_index in the
-	 * vbios mxm table
-	 */
-	i2cidx = mxm_ddc_map(dev, ctx.desc.ddc_port);
-	if ((ctx.outp[0] & 0x0000000f) != OUTPUT_DP)
-		i2cidx = (i2cidx & 0x0f) << 4;
-	else
-		i2cidx = (i2cidx & 0xf0);
-
-	if (i2cidx != 0xf0) {
-		ctx.outp[0] &= ~0x000000f0;
-		ctx.outp[0] |= i2cidx;
-	}
-
-	/* override dcb sorconf.link, based on what mxm data says */
-	switch (ctx.desc.outp_type) {
-	case 0x00: /* Analog CRT */
-	case 0x01: /* Analog TV/HDTV */
-		break;
-	default:
-		link = mxm_sor_map(dev, ctx.desc.dig_conn) & 0x30;
-		ctx.outp[1] &= ~0x00000030;
-		ctx.outp[1] |= link;
-		break;
-	}
-
-	/* we may need to fixup various other vbios tables based on what
-	 * the descriptor says the connector type should be.
-	 *
-	 * in a lot of cases, the vbios tables will claim DVI-I is possible,
-	 * and the mxm data says the connector is really HDMI.  another
-	 * common example is DP->eDP.
-	 */
-	conn = dcb_conn(dev, (ctx.outp[0] & 0x0000f000) >> 12);
-	type = conn[0];
-	switch (ctx.desc.conn_type) {
-	case 0x01: /* LVDS */
-		ctx.outp[1] |= 0x00000004; /* use_power_scripts */
-		/* XXX: modify default link width in LVDS table */
-		break;
-	case 0x02: /* HDMI */
-		type = DCB_CONNECTOR_HDMI_1;
-		break;
-	case 0x03: /* DVI-D */
-		type = DCB_CONNECTOR_DVI_D;
-		break;
-	case 0x0e: /* eDP, falls through to DPint */
-		ctx.outp[1] |= 0x00010000;
-	case 0x07: /* DP internal, wtf is this?? HP8670w */
-		ctx.outp[1] |= 0x00000004; /* use_power_scripts? */
-		type = DCB_CONNECTOR_eDP;
-		break;
-	default:
-		break;
-	}
-
-	if (mxms_version(dev) >= 0x0300)
-		conn[0] = type;
-
-	return 0;
-}
-
-static bool
-mxm_show_unmatched(struct drm_device *dev, u8 *data, void *info)
-{
-	u64 desc = *(u64 *)data;
-	if ((desc & 0xf0) != 0xf0)
-		MXM_MSG(dev, "unmatched output device 0x%016llx\n", desc);
-	return true;
-}
-
-static void
-mxm_dcb_sanitise(struct drm_device *dev)
-{
-	u8 *dcb = dcb_table(dev);
-	if (!dcb || dcb[0] != 0x40) {
-		MXM_DBG(dev, "unsupported DCB version\n");
-		return;
-	}
-
-	dcb_outp_foreach(dev, NULL, mxm_dcb_sanitise_entry);
-	mxms_foreach(dev, 0x01, mxm_show_unmatched, NULL);
-}
-
-static bool
-mxm_shadow_rom_fetch(struct nouveau_i2c_chan *i2c, u8 addr,
-		     u8 offset, u8 size, u8 *data)
-{
-	struct i2c_msg msgs[] = {
-		{ .addr = addr, .flags = 0, .len = 1, .buf = &offset },
-		{ .addr = addr, .flags = I2C_M_RD, .len = size, .buf = data, },
-	};
-
-	return i2c_transfer(&i2c->adapter, msgs, 2) == 2;
-}
-
-static bool
-mxm_shadow_rom(struct drm_device *dev, u8 version)
-{
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct nouveau_i2c_chan *i2c = NULL;
-	u8 i2cidx, mxms[6], addr, size;
-
-	i2cidx = mxm_ddc_map(dev, 1 /* LVDS_DDC */) & 0x0f;
-	if (i2cidx < 0x0f)
-		i2c = nouveau_i2c_find(dev, i2cidx);
-	if (!i2c)
-		return false;
-
-	addr = 0x54;
-	if (!mxm_shadow_rom_fetch(i2c, addr, 0, 6, mxms)) {
-		addr = 0x56;
-		if (!mxm_shadow_rom_fetch(i2c, addr, 0, 6, mxms))
-			return false;
-	}
-
-	dev_priv->mxms = mxms;
-	size = mxms_headerlen(dev) + mxms_structlen(dev);
-	dev_priv->mxms = kmalloc(size, GFP_KERNEL);
-
-	if (dev_priv->mxms &&
-	    mxm_shadow_rom_fetch(i2c, addr, 0, size, dev_priv->mxms))
-		return true;
-
-	kfree(dev_priv->mxms);
-	dev_priv->mxms = NULL;
-	return false;
-}
-
-#if defined(CONFIG_ACPI)
-static bool
-mxm_shadow_dsm(struct drm_device *dev, u8 version)
-{
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	static char muid[] = {
-		0x00, 0xA4, 0x04, 0x40, 0x7D, 0x91, 0xF2, 0x4C,
-		0xB8, 0x9C, 0x79, 0xB6, 0x2F, 0xD5, 0x56, 0x65
-	};
-	u32 mxms_args[] = { 0x00000000 };
-	union acpi_object args[4] = {
-		/* _DSM MUID */
-		{ .buffer.type = 3,
-		  .buffer.length = sizeof(muid),
-		  .buffer.pointer = muid,
-		},
-		/* spec says this can be zero to mean "highest revision", but
-		 * of course there's at least one bios out there which fails
-		 * unless you pass in exactly the version it supports..
-		 */
-		{ .integer.type = ACPI_TYPE_INTEGER,
-		  .integer.value = (version & 0xf0) << 4 | (version & 0x0f),
-		},
-		/* MXMS function */
-		{ .integer.type = ACPI_TYPE_INTEGER,
-		  .integer.value = 0x00000010,
-		},
-		/* Pointer to MXMS arguments */
-		{ .buffer.type = ACPI_TYPE_BUFFER,
-		  .buffer.length = sizeof(mxms_args),
-		  .buffer.pointer = (char *)mxms_args,
-		},
-	};
-	struct acpi_object_list list = { ARRAY_SIZE(args), args };
-	struct acpi_buffer retn = { ACPI_ALLOCATE_BUFFER, NULL };
-	union acpi_object *obj;
-	acpi_handle handle;
-	int ret;
-
-	handle = DEVICE_ACPI_HANDLE(&dev->pdev->dev);
-	if (!handle)
-		return false;
-
-	ret = acpi_evaluate_object(handle, "_DSM", &list, &retn);
-	if (ret) {
-		MXM_DBG(dev, "DSM MXMS failed: %d\n", ret);
-		return false;
-	}
-
-	obj = retn.pointer;
-	if (obj->type == ACPI_TYPE_BUFFER) {
-		dev_priv->mxms = kmemdup(obj->buffer.pointer,
-					 obj->buffer.length, GFP_KERNEL);
-	} else
-	if (obj->type == ACPI_TYPE_INTEGER) {
-		MXM_DBG(dev, "DSM MXMS returned 0x%llx\n", obj->integer.value);
-	}
-
-	kfree(obj);
-	return dev_priv->mxms != NULL;
-}
-#endif
-
-#if defined(CONFIG_ACPI_WMI) || defined(CONFIG_ACPI_WMI_MODULE)
-
-#define WMI_WMMX_GUID "F6CB5C3C-9CAE-4EBD-B577-931EA32A2CC0"
-
-static u8
-wmi_wmmx_mxmi(struct drm_device *dev, u8 version)
-{
-	u32 mxmi_args[] = { 0x494D584D /* MXMI */, version, 0 };
-	struct acpi_buffer args = { sizeof(mxmi_args), mxmi_args };
-	struct acpi_buffer retn = { ACPI_ALLOCATE_BUFFER, NULL };
-	union acpi_object *obj;
-	acpi_status status;
-
-	status = wmi_evaluate_method(WMI_WMMX_GUID, 0, 0, &args, &retn);
-	if (ACPI_FAILURE(status)) {
-		MXM_DBG(dev, "WMMX MXMI returned %d\n", status);
-		return 0x00;
-	}
-
-	obj = retn.pointer;
-	if (obj->type == ACPI_TYPE_INTEGER) {
-		version = obj->integer.value;
-		MXM_DBG(dev, "WMMX MXMI version %d.%d\n",
-			     (version >> 4), version & 0x0f);
-	} else {
-		version = 0;
-		MXM_DBG(dev, "WMMX MXMI returned non-integer\n");
-	}
-
-	kfree(obj);
-	return version;
-}
-
-static bool
-mxm_shadow_wmi(struct drm_device *dev, u8 version)
-{
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	u32 mxms_args[] = { 0x534D584D /* MXMS */, version, 0 };
-	struct acpi_buffer args = { sizeof(mxms_args), mxms_args };
-	struct acpi_buffer retn = { ACPI_ALLOCATE_BUFFER, NULL };
-	union acpi_object *obj;
-	acpi_status status;
-
-	if (!wmi_has_guid(WMI_WMMX_GUID)) {
-		MXM_DBG(dev, "WMMX GUID not found\n");
-		return false;
-	}
-
-	mxms_args[1] = wmi_wmmx_mxmi(dev, 0x00);
-	if (!mxms_args[1])
-		mxms_args[1] = wmi_wmmx_mxmi(dev, version);
-	if (!mxms_args[1])
-		return false;
-
-	status = wmi_evaluate_method(WMI_WMMX_GUID, 0, 0, &args, &retn);
-	if (ACPI_FAILURE(status)) {
-		MXM_DBG(dev, "WMMX MXMS returned %d\n", status);
-		return false;
-	}
-
-	obj = retn.pointer;
-	if (obj->type == ACPI_TYPE_BUFFER) {
-		dev_priv->mxms = kmemdup(obj->buffer.pointer,
-					 obj->buffer.length, GFP_KERNEL);
-	}
-
-	kfree(obj);
-	return dev_priv->mxms != NULL;
-}
-#endif
-
-struct mxm_shadow_h {
-	const char *name;
-	bool (*exec)(struct drm_device *, u8 version);
-} _mxm_shadow[] = {
-	{ "ROM", mxm_shadow_rom },
-#if defined(CONFIG_ACPI)
-	{ "DSM", mxm_shadow_dsm },
-#endif
-#if defined(CONFIG_ACPI_WMI) || defined(CONFIG_ACPI_WMI_MODULE)
-	{ "WMI", mxm_shadow_wmi },
-#endif
-	{}
-};
-
-static int
-mxm_shadow(struct drm_device *dev, u8 version)
-{
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct mxm_shadow_h *shadow = _mxm_shadow;
-	do {
-		MXM_DBG(dev, "checking %s\n", shadow->name);
-		if (shadow->exec(dev, version)) {
-			if (mxms_valid(dev))
-				return 0;
-			kfree(dev_priv->mxms);
-			dev_priv->mxms = NULL;
-		}
-	} while ((++shadow)->name);
-	return -ENOENT;
-}
-
-int
-nouveau_mxm_init(struct drm_device *dev)
-{
-	u8 mxm_size, *mxm = mxm_table(dev, &mxm_size);
-	if (!mxm || !mxm[0]) {
-		MXM_MSG(dev, "no VBIOS data, nothing to do\n");
-		return 0;
-	}
-
-	MXM_MSG(dev, "BIOS version %d.%d\n", mxm[0] >> 4, mxm[0] & 0x0f);
-
-	if (mxm_shadow(dev, mxm[0])) {
-		MXM_MSG(dev, "failed to locate valid SIS\n");
-#if 0
-		/* we should, perhaps, fall back to some kind of limited
-		 * mode here if the x86 vbios hasn't already done the
-		 * work for us (so we prevent loading with completely
-		 * whacked vbios tables).
-		 */
-		return -EINVAL;
-#else
-		return 0;
-#endif
-	}
-
-	MXM_MSG(dev, "MXMS Version %d.%d\n",
-		mxms_version(dev) >> 8, mxms_version(dev) & 0xff);
-	mxms_foreach(dev, 0, NULL, NULL);
-
-	if (nouveau_mxmdcb)
-		mxm_dcb_sanitise(dev);
-	return 0;
-}
-
-void
-nouveau_mxm_fini(struct drm_device *dev)
-{
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	kfree(dev_priv->mxms);
-	dev_priv->mxms = NULL;
-}
diff --git a/drivers/gpu/drm/nouveau/nouveau_notifier.c b/drivers/gpu/drm/nouveau/nouveau_notifier.c
deleted file mode 100644
index 1ad3e6c8c432..000000000000
--- a/drivers/gpu/drm/nouveau/nouveau_notifier.c
+++ /dev/null
@@ -1,162 +0,0 @@
-/*
- * Copyright (C) 2007 Ben Skeggs.
- *
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining
- * a copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sublicense, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial
- * portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
- * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
- * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
- * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
- * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-
-#include <drm/drmP.h>
-#include "nouveau_drv.h"
-#include "nouveau_ramht.h"
-
-int
-nouveau_notifier_init_channel(struct nouveau_channel *chan)
-{
-	struct drm_device *dev = chan->dev;
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct nouveau_bo *ntfy = NULL;
-	uint32_t flags, ttmpl;
-	int ret;
-
-	if (nouveau_vram_notify) {
-		flags = NOUVEAU_GEM_DOMAIN_VRAM;
-		ttmpl = TTM_PL_FLAG_VRAM;
-	} else {
-		flags = NOUVEAU_GEM_DOMAIN_GART;
-		ttmpl = TTM_PL_FLAG_TT;
-	}
-
-	ret = nouveau_gem_new(dev, PAGE_SIZE, 0, flags, 0, 0, &ntfy);
-	if (ret)
-		return ret;
-
-	ret = nouveau_bo_pin(ntfy, ttmpl);
-	if (ret)
-		goto out_err;
-
-	ret = nouveau_bo_map(ntfy);
-	if (ret)
-		goto out_err;
-
-	if (dev_priv->card_type >= NV_50) {
-		ret = nouveau_bo_vma_add(ntfy, chan->vm, &chan->notifier_vma);
-		if (ret)
-			goto out_err;
-	}
-
-	ret = drm_mm_init(&chan->notifier_heap, 0, ntfy->bo.mem.size);
-	if (ret)
-		goto out_err;
-
-	chan->notifier_bo = ntfy;
-out_err:
-	if (ret) {
-		nouveau_bo_vma_del(ntfy, &chan->notifier_vma);
-		drm_gem_object_unreference_unlocked(ntfy->gem);
-	}
-
-	return ret;
-}
-
-void
-nouveau_notifier_takedown_channel(struct nouveau_channel *chan)
-{
-	struct drm_device *dev = chan->dev;
-
-	if (!chan->notifier_bo)
-		return;
-
-	nouveau_bo_vma_del(chan->notifier_bo, &chan->notifier_vma);
-	nouveau_bo_unmap(chan->notifier_bo);
-	mutex_lock(&dev->struct_mutex);
-	nouveau_bo_unpin(chan->notifier_bo);
-	mutex_unlock(&dev->struct_mutex);
-	drm_gem_object_unreference_unlocked(chan->notifier_bo->gem);
-	drm_mm_takedown(&chan->notifier_heap);
-}
-
-static void
-nouveau_notifier_gpuobj_dtor(struct drm_device *dev,
-			     struct nouveau_gpuobj *gpuobj)
-{
-	NV_DEBUG(dev, "\n");
-
-	if (gpuobj->priv)
-		drm_mm_put_block(gpuobj->priv);
-}
-
-int
-nouveau_notifier_alloc(struct nouveau_channel *chan, uint32_t handle,
-		       int size, uint32_t start, uint32_t end,
-		       uint32_t *b_offset)
-{
-	struct drm_device *dev = chan->dev;
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct nouveau_gpuobj *nobj = NULL;
-	struct drm_mm_node *mem;
-	uint64_t offset;
-	int target, ret;
-
-	mem = drm_mm_search_free_in_range(&chan->notifier_heap, size, 0,
-					  start, end, 0);
-	if (mem)
-		mem = drm_mm_get_block_range(mem, size, 0, start, end);
-	if (!mem) {
-		NV_ERROR(dev, "Channel %d notifier block full\n", chan->id);
-		return -ENOMEM;
-	}
-
-	if (dev_priv->card_type < NV_50) {
-		if (chan->notifier_bo->bo.mem.mem_type == TTM_PL_VRAM)
-			target = NV_MEM_TARGET_VRAM;
-		else
-			target = NV_MEM_TARGET_GART;
-		offset  = chan->notifier_bo->bo.offset;
-	} else {
-		target = NV_MEM_TARGET_VM;
-		offset = chan->notifier_vma.offset;
-	}
-	offset += mem->start;
-
-	ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, offset,
-				     mem->size, NV_MEM_ACCESS_RW, target,
-				     &nobj);
-	if (ret) {
-		drm_mm_put_block(mem);
-		NV_ERROR(dev, "Error creating notifier ctxdma: %d\n", ret);
-		return ret;
-	}
-	nobj->dtor = nouveau_notifier_gpuobj_dtor;
-	nobj->priv = mem;
-
-	ret = nouveau_ramht_insert(chan, handle, nobj);
-	nouveau_gpuobj_ref(NULL, &nobj);
-	if (ret) {
-		drm_mm_put_block(mem);
-		NV_ERROR(dev, "Error adding notifier to ramht: %d\n", ret);
-		return ret;
-	}
-
-	*b_offset = mem->start;
-	return 0;
-}
diff --git a/drivers/gpu/drm/nouveau/nouveau_perf.c b/drivers/gpu/drm/nouveau/nouveau_perf.c
index 4946d308a362..4fe883c54918 100644
--- a/drivers/gpu/drm/nouveau/nouveau_perf.c
+++ b/drivers/gpu/drm/nouveau/nouveau_perf.c
@@ -24,14 +24,15 @@
 
 #include <drm/drmP.h>
 
-#include "nouveau_drv.h"
+#include "nouveau_drm.h"
+#include "nouveau_reg.h"
 #include "nouveau_pm.h"
 
 static u8 *
 nouveau_perf_table(struct drm_device *dev, u8 *ver)
 {
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct nvbios *bios = &dev_priv->vbios;
+	struct nouveau_drm *drm = nouveau_drm(dev);
+	struct nvbios *bios = &drm->vbios;
 	struct bit_entry P;
 
 	if (!bit_table(dev, 'P', &P) && P.version && P.version <= 2) {
@@ -87,7 +88,7 @@ u8 *
 nouveau_perf_rammap(struct drm_device *dev, u32 freq,
 		    u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
 {
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_drm *drm = nouveau_drm(dev);
 	struct bit_entry P;
 	u8 *perf, i = 0;
 
@@ -114,8 +115,8 @@ nouveau_perf_rammap(struct drm_device *dev, u32 freq,
 		return NULL;
 	}
 
-	if (dev_priv->chipset == 0x49 ||
-	    dev_priv->chipset == 0x4b)
+	if (nv_device(drm->device)->chipset == 0x49 ||
+	    nv_device(drm->device)->chipset == 0x4b)
 		freq /= 2;
 
 	while ((perf = nouveau_perf_entry(dev, i++, ver, hdr, cnt, len))) {
@@ -142,12 +143,13 @@ nouveau_perf_rammap(struct drm_device *dev, u32 freq,
 u8 *
 nouveau_perf_ramcfg(struct drm_device *dev, u32 freq, u8 *ver, u8 *len)
 {
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct nvbios *bios = &dev_priv->vbios;
+	struct nouveau_device *device = nouveau_dev(dev);
+	struct nouveau_drm *drm = nouveau_drm(dev);
+	struct nvbios *bios = &drm->vbios;
 	u8 strap, hdr, cnt;
 	u8 *rammap;
 
-	strap = (nv_rd32(dev, 0x101000) & 0x0000003c) >> 2;
+	strap = (nv_rd32(device, 0x101000) & 0x0000003c) >> 2;
 	if (bios->ram_restrict_tbl_ptr)
 		strap = bios->data[bios->ram_restrict_tbl_ptr + strap];
 
@@ -161,8 +163,8 @@ nouveau_perf_ramcfg(struct drm_device *dev, u32 freq, u8 *ver, u8 *len)
 u8 *
 nouveau_perf_timing(struct drm_device *dev, u32 freq, u8 *ver, u8 *len)
 {
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct nvbios *bios = &dev_priv->vbios;
+	struct nouveau_drm *drm = nouveau_drm(dev);
+	struct nvbios *bios = &drm->vbios;
 	struct bit_entry P;
 	u8 *perf, *timing = NULL;
 	u8 i = 0, hdr, cnt;
@@ -202,20 +204,21 @@ nouveau_perf_timing(struct drm_device *dev, u32 freq, u8 *ver, u8 *len)
 static void
 legacy_perf_init(struct drm_device *dev)
 {
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct nvbios *bios = &dev_priv->vbios;
-	struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
+	struct nouveau_device *device = nouveau_dev(dev);
+	struct nouveau_drm *drm = nouveau_drm(dev);
+	struct nvbios *bios = &drm->vbios;
+	struct nouveau_pm *pm = nouveau_pm(dev);
 	char *perf, *entry, *bmp = &bios->data[bios->offset];
 	int headerlen, use_straps;
 
 	if (bmp[5] < 0x5 || bmp[6] < 0x14) {
-		NV_DEBUG(dev, "BMP version too old for perf\n");
+		NV_DEBUG(drm, "BMP version too old for perf\n");
 		return;
 	}
 
 	perf = ROMPTR(dev, bmp[0x73]);
 	if (!perf) {
-		NV_DEBUG(dev, "No memclock table pointer found.\n");
+		NV_DEBUG(drm, "No memclock table pointer found.\n");
 		return;
 	}
 
@@ -231,13 +234,13 @@ legacy_perf_init(struct drm_device *dev)
 		headerlen = (use_straps ? 8 : 2);
 		break;
 	default:
-		NV_WARN(dev, "Unknown memclock table version %x.\n", perf[0]);
+		NV_WARN(drm, "Unknown memclock table version %x.\n", perf[0]);
 		return;
 	}
 
 	entry = perf + headerlen;
 	if (use_straps)
-		entry += (nv_rd32(dev, NV_PEXTDEV_BOOT_0) & 0x3c) >> 1;
+		entry += (nv_rd32(device, NV_PEXTDEV_BOOT_0) & 0x3c) >> 1;
 
 	sprintf(pm->perflvl[0].name, "performance_level_0");
 	pm->perflvl[0].memory = ROM16(entry[0]) * 20;
@@ -247,7 +250,7 @@ legacy_perf_init(struct drm_device *dev)
 static void
 nouveau_perf_voltage(struct drm_device *dev, struct nouveau_pm_level *perflvl)
 {
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_drm *drm = nouveau_drm(dev);
 	struct bit_entry P;
 	u8 *vmap;
 	int id;
@@ -258,7 +261,7 @@ nouveau_perf_voltage(struct drm_device *dev, struct nouveau_pm_level *perflvl)
 	/* boards using voltage table version <0x40 store the voltage
 	 * level directly in the perflvl entry as a multiple of 10mV
 	 */
-	if (dev_priv->engine.pm.voltage.version < 0x40) {
+	if (drm->pm->voltage.version < 0x40) {
 		perflvl->volt_min = id * 10000;
 		perflvl->volt_max = perflvl->volt_min;
 		return;
@@ -268,14 +271,14 @@ nouveau_perf_voltage(struct drm_device *dev, struct nouveau_pm_level *perflvl)
 	 * vbios table containing a min/max voltage value for the perflvl
 	 */
 	if (bit_table(dev, 'P', &P) || P.version != 2 || P.length < 34) {
-		NV_DEBUG(dev, "where's our volt map table ptr? %d %d\n",
+		NV_DEBUG(drm, "where's our volt map table ptr? %d %d\n",
 			 P.version, P.length);
 		return;
 	}
 
 	vmap = ROMPTR(dev, P.data[32]);
 	if (!vmap) {
-		NV_DEBUG(dev, "volt map table pointer invalid\n");
+		NV_DEBUG(drm, "volt map table pointer invalid\n");
 		return;
 	}
 
@@ -289,9 +292,9 @@ nouveau_perf_voltage(struct drm_device *dev, struct nouveau_pm_level *perflvl)
 void
 nouveau_perf_init(struct drm_device *dev)
 {
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
-	struct nvbios *bios = &dev_priv->vbios;
+	struct nouveau_drm *drm = nouveau_drm(dev);
+	struct nouveau_pm *pm = nouveau_pm(dev);
+	struct nvbios *bios = &drm->vbios;
 	u8 *perf, ver, hdr, cnt, len;
 	int ret, vid, i = -1;
 
@@ -301,8 +304,6 @@ nouveau_perf_init(struct drm_device *dev)
 	}
 
 	perf = nouveau_perf_table(dev, &ver);
-	if (ver >= 0x20 && ver < 0x40)
-		pm->fan.pwm_divisor = ROM16(perf[6]);
 
 	while ((perf = nouveau_perf_entry(dev, ++i, &ver, &hdr, &cnt, &len))) {
 		struct nouveau_pm_level *perflvl = &pm->perflvl[pm->nr_perflvl];
@@ -328,8 +329,8 @@ nouveau_perf_init(struct drm_device *dev)
 			perflvl->shader = ROM16(perf[6]) * 1000;
 			perflvl->core = perflvl->shader;
 			perflvl->core += (signed char)perf[8] * 1000;
-			if (dev_priv->chipset == 0x49 ||
-			    dev_priv->chipset == 0x4b)
+			if (nv_device(drm->device)->chipset == 0x49 ||
+			    nv_device(drm->device)->chipset == 0x4b)
 				perflvl->memory = ROM16(perf[11]) * 1000;
 			else
 				perflvl->memory = ROM16(perf[11]) * 2000;
@@ -356,7 +357,7 @@ nouveau_perf_init(struct drm_device *dev)
 #define subent(n) ((ROM16(perf[hdr + (n) * len]) & 0xfff) * 1000)
 			perflvl->fanspeed = 0; /*XXX*/
 			perflvl->volt_min = perf[2];
-			if (dev_priv->card_type == NV_50) {
+			if (nv_device(drm->device)->card_type == NV_50) {
 				perflvl->core   = subent(0);
 				perflvl->shader = subent(1);
 				perflvl->memory = subent(2);
@@ -382,7 +383,7 @@ nouveau_perf_init(struct drm_device *dev)
 		if (pm->voltage.supported && perflvl->volt_min) {
 			vid = nouveau_volt_vid_lookup(dev, perflvl->volt_min);
 			if (vid < 0) {
-				NV_DEBUG(dev, "perflvl %d, bad vid\n", i);
+				NV_DEBUG(drm, "perflvl %d, bad vid\n", i);
 				continue;
 			}
 		}
@@ -391,7 +392,7 @@ nouveau_perf_init(struct drm_device *dev)
 		ret = nouveau_mem_timing_calc(dev, perflvl->memory,
 					          &perflvl->timing);
 		if (ret) {
-			NV_DEBUG(dev, "perflvl %d, bad timing: %d\n", i, ret);
+			NV_DEBUG(drm, "perflvl %d, bad timing: %d\n", i, ret);
 			continue;
 		}
 
diff --git a/drivers/gpu/drm/nouveau/nouveau_pm.c b/drivers/gpu/drm/nouveau/nouveau_pm.c
index 7cf95b20b7a4..0bf64c90aa20 100644
--- a/drivers/gpu/drm/nouveau/nouveau_pm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_pm.c
@@ -22,12 +22,6 @@
  * Authors: Ben Skeggs
  */
 
-#include <drm/drmP.h>
-
-#include "nouveau_drv.h"
-#include "nouveau_pm.h"
-#include "nouveau_gpio.h"
-
 #ifdef CONFIG_ACPI
 #include <linux/acpi.h>
 #endif
@@ -35,85 +29,41 @@
 #include <linux/hwmon.h>
 #include <linux/hwmon-sysfs.h>
 
-static int
-nouveau_pwmfan_get(struct drm_device *dev)
-{
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
-	struct gpio_func gpio;
-	u32 divs, duty;
-	int ret;
+#include <drm/drmP.h>
 
-	if (!pm->pwm_get)
-		return -ENODEV;
+#include "nouveau_drm.h"
+#include "nouveau_pm.h"
 
-	ret = nouveau_gpio_find(dev, 0, DCB_GPIO_PWM_FAN, 0xff, &gpio);
-	if (ret == 0) {
-		ret = pm->pwm_get(dev, gpio.line, &divs, &duty);
-		if (ret == 0 && divs) {
-			divs = max(divs, duty);
-			if (dev_priv->card_type <= NV_40 || (gpio.log[0] & 1))
-				duty = divs - duty;
-			return (duty * 100) / divs;
-		}
+#include <subdev/gpio.h>
+#include <subdev/timer.h>
+#include <subdev/therm.h>
 
-		return nouveau_gpio_func_get(dev, gpio.func) * 100;
-	}
+MODULE_PARM_DESC(perflvl, "Performance level (default: boot)");
+static char *nouveau_perflvl;
+module_param_named(perflvl, nouveau_perflvl, charp, 0400);
 
-	return -ENODEV;
-}
-
-static int
-nouveau_pwmfan_set(struct drm_device *dev, int percent)
-{
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
-	struct gpio_func gpio;
-	u32 divs, duty;
-	int ret;
-
-	if (!pm->pwm_set)
-		return -ENODEV;
-
-	ret = nouveau_gpio_find(dev, 0, DCB_GPIO_PWM_FAN, 0xff, &gpio);
-	if (ret == 0) {
-		divs = pm->fan.pwm_divisor;
-		if (pm->fan.pwm_freq) {
-			/*XXX: PNVIO clock more than likely... */
-			divs = 135000 / pm->fan.pwm_freq;
-			if (dev_priv->chipset < 0xa3)
-				divs /= 4;
-		}
-
-		duty = ((divs * percent) + 99) / 100;
-		if (dev_priv->card_type <= NV_40 || (gpio.log[0] & 1))
-			duty = divs - duty;
-
-		ret = pm->pwm_set(dev, gpio.line, divs, duty);
-		if (!ret)
-			pm->fan.percent = percent;
-		return ret;
-	}
-
-	return -ENODEV;
-}
+MODULE_PARM_DESC(perflvl_wr, "Allow perflvl changes (warning: dangerous!)");
+static int nouveau_perflvl_wr;
+module_param_named(perflvl_wr, nouveau_perflvl_wr, int, 0400);
 
 static int
 nouveau_pm_perflvl_aux(struct drm_device *dev, struct nouveau_pm_level *perflvl,
 		       struct nouveau_pm_level *a, struct nouveau_pm_level *b)
 {
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
+	struct nouveau_drm *drm = nouveau_drm(dev);
+	struct nouveau_pm *pm = nouveau_pm(dev);
+	struct nouveau_therm *therm = nouveau_therm(drm);
 	int ret;
 
 	/*XXX: not on all boards, we should control based on temperature
 	 *     on recent boards..  or maybe on some other factor we don't
 	 *     know about?
 	 */
-	if (a->fanspeed && b->fanspeed && b->fanspeed > a->fanspeed) {
-		ret = nouveau_pwmfan_set(dev, perflvl->fanspeed);
+	if (therm && therm->fan_set &&
+		a->fanspeed && b->fanspeed && b->fanspeed > a->fanspeed) {
+		ret = therm->fan_set(therm, perflvl->fanspeed);
 		if (ret && ret != -ENODEV) {
-			NV_ERROR(dev, "fanspeed set failed: %d\n", ret);
+			NV_ERROR(drm, "fanspeed set failed: %d\n", ret);
 			return ret;
 		}
 	}
@@ -122,7 +72,7 @@ nouveau_pm_perflvl_aux(struct drm_device *dev, struct nouveau_pm_level *perflvl,
 		if (perflvl->volt_min && b->volt_min > a->volt_min) {
 			ret = pm->voltage_set(dev, perflvl->volt_min);
 			if (ret) {
-				NV_ERROR(dev, "voltage set failed: %d\n", ret);
+				NV_ERROR(drm, "voltage set failed: %d\n", ret);
 				return ret;
 			}
 		}
@@ -134,8 +84,7 @@ nouveau_pm_perflvl_aux(struct drm_device *dev, struct nouveau_pm_level *perflvl,
 static int
 nouveau_pm_perflvl_set(struct drm_device *dev, struct nouveau_pm_level *perflvl)
 {
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
+	struct nouveau_pm *pm = nouveau_pm(dev);
 	void *state;
 	int ret;
 
@@ -171,8 +120,9 @@ error:
 void
 nouveau_pm_trigger(struct drm_device *dev)
 {
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
+	struct nouveau_drm *drm = nouveau_drm(dev);
+	struct nouveau_timer *ptimer = nouveau_timer(drm->device);
+	struct nouveau_pm *pm = nouveau_pm(dev);
 	struct nouveau_pm_profile *profile = NULL;
 	struct nouveau_pm_level *perflvl = NULL;
 	int ret;
@@ -194,24 +144,22 @@ nouveau_pm_trigger(struct drm_device *dev)
 
 	/* change perflvl, if necessary */
 	if (perflvl != pm->cur) {
-		struct nouveau_timer_engine *ptimer = &dev_priv->engine.timer;
-		u64 time0 = ptimer->read(dev);
+		u64 time0 = ptimer->read(ptimer);
 
-		NV_INFO(dev, "setting performance level: %d", perflvl->id);
+		NV_INFO(drm, "setting performance level: %d", perflvl->id);
 		ret = nouveau_pm_perflvl_set(dev, perflvl);
 		if (ret)
-			NV_INFO(dev, "> reclocking failed: %d\n\n", ret);
+			NV_INFO(drm, "> reclocking failed: %d\n\n", ret);
 
-		NV_INFO(dev, "> reclocking took %lluns\n\n",
-			     ptimer->read(dev) - time0);
+		NV_INFO(drm, "> reclocking took %lluns\n\n",
+			     ptimer->read(ptimer) - time0);
 	}
 }
 
 static struct nouveau_pm_profile *
 profile_find(struct drm_device *dev, const char *string)
 {
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
+	struct nouveau_pm *pm = nouveau_pm(dev);
 	struct nouveau_pm_profile *profile;
 
 	list_for_each_entry(profile, &pm->profiles, head) {
@@ -225,8 +173,7 @@ profile_find(struct drm_device *dev, const char *string)
 static int
 nouveau_pm_profile_set(struct drm_device *dev, const char *profile)
 {
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
+	struct nouveau_pm *pm = nouveau_pm(dev);
 	struct nouveau_pm_profile *ac = NULL, *dc = NULL;
 	char string[16], *cur = string, *ptr;
 
@@ -279,8 +226,9 @@ const struct nouveau_pm_profile_func nouveau_pm_static_profile_func = {
 static int
 nouveau_pm_perflvl_get(struct drm_device *dev, struct nouveau_pm_level *perflvl)
 {
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
+	struct nouveau_drm *drm = nouveau_drm(dev);
+	struct nouveau_pm *pm = nouveau_pm(dev);
+	struct nouveau_therm *therm = nouveau_therm(drm->device);
 	int ret;
 
 	memset(perflvl, 0, sizeof(*perflvl));
@@ -299,9 +247,11 @@ nouveau_pm_perflvl_get(struct drm_device *dev, struct nouveau_pm_level *perflvl)
 		}
 	}
 
-	ret = nouveau_pwmfan_get(dev);
-	if (ret > 0)
-		perflvl->fanspeed = ret;
+	if (therm && therm->fan_get) {
+		ret = therm->fan_get(therm);
+		if (ret >= 0)
+			perflvl->fanspeed = ret;
+	}
 
 	nouveau_mem_timing_read(dev, &perflvl->timing);
 	return 0;
@@ -362,8 +312,7 @@ static ssize_t
 nouveau_pm_get_perflvl(struct device *d, struct device_attribute *a, char *buf)
 {
 	struct drm_device *dev = pci_get_drvdata(to_pci_dev(d));
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
+	struct nouveau_pm *pm = nouveau_pm(dev);
 	struct nouveau_pm_level cur;
 	int len = PAGE_SIZE, ret;
 	char *ptr = buf;
@@ -398,8 +347,8 @@ static DEVICE_ATTR(performance_level, S_IRUGO | S_IWUSR,
 static int
 nouveau_sysfs_init(struct drm_device *dev)
 {
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
+	struct nouveau_drm *drm = nouveau_drm(dev);
+	struct nouveau_pm *pm = nouveau_pm(dev);
 	struct device *d = &dev->pdev->dev;
 	int ret, i;
 
@@ -418,7 +367,7 @@ nouveau_sysfs_init(struct drm_device *dev)
 
 		ret = device_create_file(d, &perflvl->dev_attr);
 		if (ret) {
-			NV_ERROR(dev, "failed pervlvl %d sysfs: %d\n",
+			NV_ERROR(drm, "failed pervlvl %d sysfs: %d\n",
 				 perflvl->id, i);
 			perflvl->dev_attr.attr.name = NULL;
 			nouveau_pm_fini(dev);
@@ -432,8 +381,7 @@ nouveau_sysfs_init(struct drm_device *dev)
 static void
 nouveau_sysfs_fini(struct drm_device *dev)
 {
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
+	struct nouveau_pm *pm = nouveau_pm(dev);
 	struct device *d = &dev->pdev->dev;
 	int i;
 
@@ -453,10 +401,10 @@ static ssize_t
 nouveau_hwmon_show_temp(struct device *d, struct device_attribute *a, char *buf)
 {
 	struct drm_device *dev = dev_get_drvdata(d);
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
+	struct nouveau_drm *drm = nouveau_drm(dev);
+	struct nouveau_therm *therm = nouveau_therm(drm->device);
 
-	return snprintf(buf, PAGE_SIZE, "%d\n", pm->temp_get(dev)*1000);
+	return snprintf(buf, PAGE_SIZE, "%d\n", therm->temp_get(therm) * 1000);
 }
 static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, nouveau_hwmon_show_temp,
 						  NULL, 0);
@@ -465,28 +413,25 @@ static ssize_t
 nouveau_hwmon_max_temp(struct device *d, struct device_attribute *a, char *buf)
 {
 	struct drm_device *dev = dev_get_drvdata(d);
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
-	struct nouveau_pm_threshold_temp *temp = &pm->threshold_temp;
+	struct nouveau_drm *drm = nouveau_drm(dev);
+	struct nouveau_therm *therm = nouveau_therm(drm->device);
 
-	return snprintf(buf, PAGE_SIZE, "%d\n", temp->down_clock*1000);
+	return snprintf(buf, PAGE_SIZE, "%d\n",
+	       therm->attr_get(therm, NOUVEAU_THERM_ATTR_THRS_DOWN_CLK) * 1000);
 }
 static ssize_t
 nouveau_hwmon_set_max_temp(struct device *d, struct device_attribute *a,
 						const char *buf, size_t count)
 {
 	struct drm_device *dev = dev_get_drvdata(d);
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
-	struct nouveau_pm_threshold_temp *temp = &pm->threshold_temp;
+	struct nouveau_drm *drm = nouveau_drm(dev);
+	struct nouveau_therm *therm = nouveau_therm(drm->device);
 	long value;
 
 	if (kstrtol(buf, 10, &value) == -EINVAL)
 		return count;
 
-	temp->down_clock = value/1000;
-
-	nouveau_temp_safety_checks(dev);
+	therm->attr_set(therm, NOUVEAU_THERM_ATTR_THRS_DOWN_CLK, value / 1000);
 
 	return count;
 }
@@ -499,11 +444,11 @@ nouveau_hwmon_critical_temp(struct device *d, struct device_attribute *a,
 							char *buf)
 {
 	struct drm_device *dev = dev_get_drvdata(d);
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
-	struct nouveau_pm_threshold_temp *temp = &pm->threshold_temp;
+	struct nouveau_drm *drm = nouveau_drm(dev);
+	struct nouveau_therm *therm = nouveau_therm(drm->device);
 
-	return snprintf(buf, PAGE_SIZE, "%d\n", temp->critical*1000);
+	return snprintf(buf, PAGE_SIZE, "%d\n",
+	       therm->attr_get(therm, NOUVEAU_THERM_ATTR_THRS_CRITICAL) * 1000);
 }
 static ssize_t
 nouveau_hwmon_set_critical_temp(struct device *d, struct device_attribute *a,
@@ -511,17 +456,14 @@ nouveau_hwmon_set_critical_temp(struct device *d, struct device_attribute *a,
 								size_t count)
 {
 	struct drm_device *dev = dev_get_drvdata(d);
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
-	struct nouveau_pm_threshold_temp *temp = &pm->threshold_temp;
+	struct nouveau_drm *drm = nouveau_drm(dev);
+	struct nouveau_therm *therm = nouveau_therm(drm->device);
 	long value;
 
 	if (kstrtol(buf, 10, &value) == -EINVAL)
 		return count;
 
-	temp->critical = value/1000;
-
-	nouveau_temp_safety_checks(dev);
+	therm->attr_set(therm, NOUVEAU_THERM_ATTR_THRS_CRITICAL, value / 1000);
 
 	return count;
 }
@@ -553,47 +495,62 @@ nouveau_hwmon_show_fan0_input(struct device *d, struct device_attribute *attr,
 			      char *buf)
 {
 	struct drm_device *dev = dev_get_drvdata(d);
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct nouveau_timer_engine *ptimer = &dev_priv->engine.timer;
-	struct gpio_func gpio;
-	u32 cycles, cur, prev;
-	u64 start;
+	struct nouveau_drm *drm = nouveau_drm(dev);
+	struct nouveau_therm *therm = nouveau_therm(drm->device);
+
+	return snprintf(buf, PAGE_SIZE, "%d\n", therm->fan_sense(therm));
+}
+static SENSOR_DEVICE_ATTR(fan0_input, S_IRUGO, nouveau_hwmon_show_fan0_input,
+			  NULL, 0);
+
+ static ssize_t
+nouveau_hwmon_get_pwm1_enable(struct device *d,
+			   struct device_attribute *a, char *buf)
+{
+	struct drm_device *dev = dev_get_drvdata(d);
+	struct nouveau_drm *drm = nouveau_drm(dev);
+	struct nouveau_therm *therm = nouveau_therm(drm->device);
 	int ret;
 
-	ret = nouveau_gpio_find(dev, 0, DCB_GPIO_FAN_SENSE, 0xff, &gpio);
-	if (ret)
+	ret = therm->attr_get(therm, NOUVEAU_THERM_ATTR_FAN_MODE);
+	if (ret < 0)
 		return ret;
 
-	/* Monitor the GPIO input 0x3b for 250ms.
-	 * When the fan spins, it changes the value of GPIO FAN_SENSE.
-	 * We get 4 changes (0 -> 1 -> 0 -> 1 -> [...]) per complete rotation.
-	 */
-	start = ptimer->read(dev);
-	prev = nouveau_gpio_sense(dev, 0, gpio.line);
-	cycles = 0;
-	do {
-		cur = nouveau_gpio_sense(dev, 0, gpio.line);
-		if (prev != cur) {
-			cycles++;
-			prev = cur;
-		}
+	return sprintf(buf, "%i\n", ret);
+}
 
-		usleep_range(500, 1000); /* supports 0 < rpm < 7500 */
-	} while (ptimer->read(dev) - start < 250000000);
+static ssize_t
+nouveau_hwmon_set_pwm1_enable(struct device *d, struct device_attribute *a,
+			   const char *buf, size_t count)
+{
+	struct drm_device *dev = dev_get_drvdata(d);
+	struct nouveau_drm *drm = nouveau_drm(dev);
+	struct nouveau_therm *therm = nouveau_therm(drm->device);
+	long value;
+	int ret;
+
+	if (strict_strtol(buf, 10, &value) == -EINVAL)
+		return -EINVAL;
 
-	/* interpolate to get rpm */
-	return sprintf(buf, "%i\n", cycles / 4 * 4 * 60);
+	ret = therm->attr_set(therm, NOUVEAU_THERM_ATTR_FAN_MODE, value);
+	if (ret)
+		return ret;
+	else
+		return count;
 }
-static SENSOR_DEVICE_ATTR(fan0_input, S_IRUGO, nouveau_hwmon_show_fan0_input,
-			  NULL, 0);
+static SENSOR_DEVICE_ATTR(pwm1_enable, S_IRUGO | S_IWUSR,
+			  nouveau_hwmon_get_pwm1_enable,
+			  nouveau_hwmon_set_pwm1_enable, 0);
 
 static ssize_t
-nouveau_hwmon_get_pwm0(struct device *d, struct device_attribute *a, char *buf)
+nouveau_hwmon_get_pwm1(struct device *d, struct device_attribute *a, char *buf)
 {
 	struct drm_device *dev = dev_get_drvdata(d);
+	struct nouveau_drm *drm = nouveau_drm(dev);
+	struct nouveau_therm *therm = nouveau_therm(drm->device);
 	int ret;
 
-	ret = nouveau_pwmfan_get(dev);
+	ret = therm->fan_get(therm);
 	if (ret < 0)
 		return ret;
 
@@ -601,12 +558,12 @@ nouveau_hwmon_get_pwm0(struct device *d, struct device_attribute *a, char *buf)
 }
 
 static ssize_t
-nouveau_hwmon_set_pwm0(struct device *d, struct device_attribute *a,
+nouveau_hwmon_set_pwm1(struct device *d, struct device_attribute *a,
 		       const char *buf, size_t count)
 {
 	struct drm_device *dev = dev_get_drvdata(d);
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
+	struct nouveau_drm *drm = nouveau_drm(dev);
+	struct nouveau_therm *therm = nouveau_therm(drm->device);
 	int ret = -ENODEV;
 	long value;
 
@@ -616,103 +573,96 @@ nouveau_hwmon_set_pwm0(struct device *d, struct device_attribute *a,
 	if (kstrtol(buf, 10, &value) == -EINVAL)
 		return -EINVAL;
 
-	if (value < pm->fan.min_duty)
-		value = pm->fan.min_duty;
-	if (value > pm->fan.max_duty)
-		value = pm->fan.max_duty;
-
-	ret = nouveau_pwmfan_set(dev, value);
+	ret = therm->fan_set(therm, value);
 	if (ret)
 		return ret;
 
 	return count;
 }
 
-static SENSOR_DEVICE_ATTR(pwm0, S_IRUGO | S_IWUSR,
-			  nouveau_hwmon_get_pwm0,
-			  nouveau_hwmon_set_pwm0, 0);
+static SENSOR_DEVICE_ATTR(pwm1, S_IRUGO | S_IWUSR,
+			  nouveau_hwmon_get_pwm1,
+			  nouveau_hwmon_set_pwm1, 0);
 
 static ssize_t
-nouveau_hwmon_get_pwm0_min(struct device *d,
+nouveau_hwmon_get_pwm1_min(struct device *d,
 			   struct device_attribute *a, char *buf)
 {
 	struct drm_device *dev = dev_get_drvdata(d);
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
+	struct nouveau_drm *drm = nouveau_drm(dev);
+	struct nouveau_therm *therm = nouveau_therm(drm->device);
+	int ret;
+
+	ret = therm->attr_get(therm, NOUVEAU_THERM_ATTR_FAN_MIN_DUTY);
+	if (ret < 0)
+		return ret;
 
-	return sprintf(buf, "%i\n", pm->fan.min_duty);
+	return sprintf(buf, "%i\n", ret);
 }
 
 static ssize_t
-nouveau_hwmon_set_pwm0_min(struct device *d, struct device_attribute *a,
+nouveau_hwmon_set_pwm1_min(struct device *d, struct device_attribute *a,
 			   const char *buf, size_t count)
 {
 	struct drm_device *dev = dev_get_drvdata(d);
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
+	struct nouveau_drm *drm = nouveau_drm(dev);
+	struct nouveau_therm *therm = nouveau_therm(drm->device);
 	long value;
+	int ret;
 
 	if (kstrtol(buf, 10, &value) == -EINVAL)
 		return -EINVAL;
 
-	if (value < 0)
-		value = 0;
-
-	if (pm->fan.max_duty - value < 10)
-		value = pm->fan.max_duty - 10;
-
-	if (value < 10)
-		pm->fan.min_duty = 10;
-	else
-		pm->fan.min_duty = value;
+	ret = therm->attr_set(therm, NOUVEAU_THERM_ATTR_FAN_MIN_DUTY, value);
+	if (ret < 0)
+		return ret;
 
 	return count;
 }
 
-static SENSOR_DEVICE_ATTR(pwm0_min, S_IRUGO | S_IWUSR,
-			  nouveau_hwmon_get_pwm0_min,
-			  nouveau_hwmon_set_pwm0_min, 0);
+static SENSOR_DEVICE_ATTR(pwm1_min, S_IRUGO | S_IWUSR,
+			  nouveau_hwmon_get_pwm1_min,
+			  nouveau_hwmon_set_pwm1_min, 0);
 
 static ssize_t
-nouveau_hwmon_get_pwm0_max(struct device *d,
+nouveau_hwmon_get_pwm1_max(struct device *d,
 			   struct device_attribute *a, char *buf)
 {
 	struct drm_device *dev = dev_get_drvdata(d);
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
+	struct nouveau_drm *drm = nouveau_drm(dev);
+	struct nouveau_therm *therm = nouveau_therm(drm->device);
+	int ret;
+
+	ret = therm->attr_get(therm, NOUVEAU_THERM_ATTR_FAN_MAX_DUTY);
+	if (ret < 0)
+		return ret;
 
-	return sprintf(buf, "%i\n", pm->fan.max_duty);
+	return sprintf(buf, "%i\n", ret);
 }
 
 static ssize_t
-nouveau_hwmon_set_pwm0_max(struct device *d, struct device_attribute *a,
+nouveau_hwmon_set_pwm1_max(struct device *d, struct device_attribute *a,
 			   const char *buf, size_t count)
 {
 	struct drm_device *dev = dev_get_drvdata(d);
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
+	struct nouveau_drm *drm = nouveau_drm(dev);
+	struct nouveau_therm *therm = nouveau_therm(drm->device);
 	long value;
+	int ret;
 
 	if (kstrtol(buf, 10, &value) == -EINVAL)
 		return -EINVAL;
 
-	if (value < 0)
-		value = 0;
-
-	if (value - pm->fan.min_duty < 10)
-		value = pm->fan.min_duty + 10;
-
-	if (value > 100)
-		pm->fan.max_duty = 100;
-	else
-		pm->fan.max_duty = value;
+	ret = therm->attr_set(therm, NOUVEAU_THERM_ATTR_FAN_MAX_DUTY, value);
+	if (ret < 0)
+		return ret;
 
 	return count;
 }
 
-static SENSOR_DEVICE_ATTR(pwm0_max, S_IRUGO | S_IWUSR,
-			  nouveau_hwmon_get_pwm0_max,
-			  nouveau_hwmon_set_pwm0_max, 0);
+static SENSOR_DEVICE_ATTR(pwm1_max, S_IRUGO | S_IWUSR,
+			  nouveau_hwmon_get_pwm1_max,
+			  nouveau_hwmon_set_pwm1_max, 0);
 
 static struct attribute *hwmon_attributes[] = {
 	&sensor_dev_attr_temp1_input.dev_attr.attr,
@@ -727,9 +677,10 @@ static struct attribute *hwmon_fan_rpm_attributes[] = {
 	NULL
 };
 static struct attribute *hwmon_pwm_fan_attributes[] = {
-	&sensor_dev_attr_pwm0.dev_attr.attr,
-	&sensor_dev_attr_pwm0_min.dev_attr.attr,
-	&sensor_dev_attr_pwm0_max.dev_attr.attr,
+	&sensor_dev_attr_pwm1_enable.dev_attr.attr,
+	&sensor_dev_attr_pwm1.dev_attr.attr,
+	&sensor_dev_attr_pwm1_min.dev_attr.attr,
+	&sensor_dev_attr_pwm1_max.dev_attr.attr,
 	NULL
 };
 
@@ -747,20 +698,22 @@ static const struct attribute_group hwmon_pwm_fan_attrgroup = {
 static int
 nouveau_hwmon_init(struct drm_device *dev)
 {
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
+	struct nouveau_pm *pm = nouveau_pm(dev);
+	struct nouveau_drm *drm = nouveau_drm(dev);
+	struct nouveau_therm *therm = nouveau_therm(drm->device);
+
 #if defined(CONFIG_HWMON) || (defined(MODULE) && defined(CONFIG_HWMON_MODULE))
 	struct device *hwmon_dev;
 	int ret = 0;
 
-	if (!pm->temp_get)
+	if (!therm || !therm->temp_get || !therm->attr_get ||
+		!therm->attr_set || therm->temp_get(therm) < 0)
 		return -ENODEV;
 
 	hwmon_dev = hwmon_device_register(&dev->pdev->dev);
 	if (IS_ERR(hwmon_dev)) {
 		ret = PTR_ERR(hwmon_dev);
-		NV_ERROR(dev,
-			"Unable to register hwmon device: %d\n", ret);
+		NV_ERROR(drm, "Unable to register hwmon device: %d\n", ret);
 		return ret;
 	}
 	dev_set_drvdata(hwmon_dev, dev);
@@ -776,7 +729,7 @@ nouveau_hwmon_init(struct drm_device *dev)
 	/*XXX: incorrect, need better detection for this, some boards have
 	 *     the gpio entries for pwm fan control even when there's no
 	 *     actual fan connected to it... therm table? */
-	if (nouveau_pwmfan_get(dev) >= 0) {
+	if (therm->fan_get && therm->fan_get(therm) >= 0) {
 		ret = sysfs_create_group(&dev->pdev->dev.kobj,
 					 &hwmon_pwm_fan_attrgroup);
 		if (ret)
@@ -784,7 +737,7 @@ nouveau_hwmon_init(struct drm_device *dev)
 	}
 
 	/* if the card can read the fan rpm */
-	if (nouveau_gpio_func_valid(dev, DCB_GPIO_FAN_SENSE)) {
+	if (therm->fan_sense(therm) >= 0) {
 		ret = sysfs_create_group(&dev->pdev->dev.kobj,
 					 &hwmon_fan_rpm_attrgroup);
 		if (ret)
@@ -796,7 +749,7 @@ nouveau_hwmon_init(struct drm_device *dev)
 	return 0;
 
 error:
-	NV_ERROR(dev, "Unable to create some hwmon sysfs files: %d\n", ret);
+	NV_ERROR(drm, "Unable to create some hwmon sysfs files: %d\n", ret);
 	hwmon_device_unregister(hwmon_dev);
 	pm->hwmon = NULL;
 	return ret;
@@ -810,8 +763,7 @@ static void
 nouveau_hwmon_fini(struct drm_device *dev)
 {
 #if defined(CONFIG_HWMON) || (defined(MODULE) && defined(CONFIG_HWMON_MODULE))
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
+	struct nouveau_pm *pm = nouveau_pm(dev);
 
 	if (pm->hwmon) {
 		sysfs_remove_group(&dev->pdev->dev.kobj, &hwmon_attrgroup);
@@ -829,16 +781,15 @@ nouveau_hwmon_fini(struct drm_device *dev)
 static int
 nouveau_pm_acpi_event(struct notifier_block *nb, unsigned long val, void *data)
 {
-	struct drm_nouveau_private *dev_priv =
-		container_of(nb, struct drm_nouveau_private, engine.pm.acpi_nb);
-	struct drm_device *dev = dev_priv->dev;
+	struct nouveau_pm *pm = container_of(nb, struct nouveau_pm, acpi_nb);
+	struct nouveau_drm *drm = nouveau_drm(pm->dev);
 	struct acpi_bus_event *entry = (struct acpi_bus_event *)data;
 
 	if (strcmp(entry->device_class, "ac_adapter") == 0) {
 		bool ac = power_supply_is_system_supplied();
 
-		NV_DEBUG(dev, "power supply changed: %s\n", ac ? "AC" : "DC");
-		nouveau_pm_trigger(dev);
+		NV_DEBUG(drm, "power supply changed: %s\n", ac ? "AC" : "DC");
+		nouveau_pm_trigger(pm->dev);
 	}
 
 	return NOTIFY_OK;
@@ -848,19 +799,67 @@ nouveau_pm_acpi_event(struct notifier_block *nb, unsigned long val, void *data)
 int
 nouveau_pm_init(struct drm_device *dev)
 {
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
+	struct nouveau_device *device = nouveau_dev(dev);
+	struct nouveau_drm *drm = nouveau_drm(dev);
+	struct nouveau_pm *pm;
 	char info[256];
 	int ret, i;
 
+	pm = drm->pm = kzalloc(sizeof(*pm), GFP_KERNEL);
+	if (!pm)
+		return -ENOMEM;
+
+	pm->dev = dev;
+
+	if (device->card_type < NV_40) {
+		pm->clocks_get = nv04_pm_clocks_get;
+		pm->clocks_pre = nv04_pm_clocks_pre;
+		pm->clocks_set = nv04_pm_clocks_set;
+		if (nouveau_gpio(drm->device)) {
+			pm->voltage_get = nouveau_voltage_gpio_get;
+			pm->voltage_set = nouveau_voltage_gpio_set;
+		}
+	} else
+	if (device->card_type < NV_50) {
+		pm->clocks_get = nv40_pm_clocks_get;
+		pm->clocks_pre = nv40_pm_clocks_pre;
+		pm->clocks_set = nv40_pm_clocks_set;
+		pm->voltage_get = nouveau_voltage_gpio_get;
+		pm->voltage_set = nouveau_voltage_gpio_set;
+	} else
+	if (device->card_type < NV_C0) {
+		if (device->chipset <  0xa3 ||
+		    device->chipset == 0xaa ||
+		    device->chipset == 0xac) {
+			pm->clocks_get = nv50_pm_clocks_get;
+			pm->clocks_pre = nv50_pm_clocks_pre;
+			pm->clocks_set = nv50_pm_clocks_set;
+		} else {
+			pm->clocks_get = nva3_pm_clocks_get;
+			pm->clocks_pre = nva3_pm_clocks_pre;
+			pm->clocks_set = nva3_pm_clocks_set;
+		}
+		pm->voltage_get = nouveau_voltage_gpio_get;
+		pm->voltage_set = nouveau_voltage_gpio_set;
+	} else
+	if (device->card_type < NV_E0) {
+		pm->clocks_get = nvc0_pm_clocks_get;
+		pm->clocks_pre = nvc0_pm_clocks_pre;
+		pm->clocks_set = nvc0_pm_clocks_set;
+		pm->voltage_get = nouveau_voltage_gpio_get;
+		pm->voltage_set = nouveau_voltage_gpio_set;
+	}
+
+
 	/* parse aux tables from vbios */
 	nouveau_volt_init(dev);
-	nouveau_temp_init(dev);
+
+	INIT_LIST_HEAD(&pm->profiles);
 
 	/* determine current ("boot") performance level */
 	ret = nouveau_pm_perflvl_get(dev, &pm->boot);
 	if (ret) {
-		NV_ERROR(dev, "failed to determine boot perflvl\n");
+		NV_ERROR(drm, "failed to determine boot perflvl\n");
 		return ret;
 	}
 
@@ -868,7 +867,6 @@ nouveau_pm_init(struct drm_device *dev)
 	strncpy(pm->boot.profile.name, "boot", 4);
 	pm->boot.profile.func = &nouveau_pm_static_profile_func;
 
-	INIT_LIST_HEAD(&pm->profiles);
 	list_add(&pm->boot.profile.head, &pm->profiles);
 
 	pm->profile_ac = &pm->boot.profile;
@@ -880,22 +878,19 @@ nouveau_pm_init(struct drm_device *dev)
 	nouveau_perf_init(dev);
 
 	/* display available performance levels */
-	NV_INFO(dev, "%d available performance level(s)\n", pm->nr_perflvl);
+	NV_INFO(drm, "%d available performance level(s)\n", pm->nr_perflvl);
 	for (i = 0; i < pm->nr_perflvl; i++) {
 		nouveau_pm_perflvl_info(&pm->perflvl[i], info, sizeof(info));
-		NV_INFO(dev, "%d:%s", pm->perflvl[i].id, info);
+		NV_INFO(drm, "%d:%s", pm->perflvl[i].id, info);
 	}
 
 	nouveau_pm_perflvl_info(&pm->boot, info, sizeof(info));
-	NV_INFO(dev, "c:%s", info);
+	NV_INFO(drm, "c:%s", info);
 
 	/* switch performance levels now if requested */
 	if (nouveau_perflvl != NULL)
 		nouveau_pm_profile_set(dev, nouveau_perflvl);
 
-	/* determine the current fan speed */
-	pm->fan.percent = nouveau_pwmfan_get(dev);
-
 	nouveau_sysfs_init(dev);
 	nouveau_hwmon_init(dev);
 #if defined(CONFIG_ACPI) && defined(CONFIG_POWER_SUPPLY)
@@ -909,8 +904,7 @@ nouveau_pm_init(struct drm_device *dev)
 void
 nouveau_pm_fini(struct drm_device *dev)
 {
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
+	struct nouveau_pm *pm = nouveau_pm(dev);
 	struct nouveau_pm_profile *profile, *tmp;
 
 	list_for_each_entry_safe(profile, tmp, &pm->profiles, head) {
@@ -921,7 +915,6 @@ nouveau_pm_fini(struct drm_device *dev)
 	if (pm->cur != &pm->boot)
 		nouveau_pm_perflvl_set(dev, &pm->boot);
 
-	nouveau_temp_fini(dev);
 	nouveau_perf_fini(dev);
 	nouveau_volt_fini(dev);
 
@@ -930,13 +923,15 @@ nouveau_pm_fini(struct drm_device *dev)
 #endif
 	nouveau_hwmon_fini(dev);
 	nouveau_sysfs_fini(dev);
+
+	nouveau_drm(dev)->pm = NULL;
+	kfree(pm);
 }
 
 void
 nouveau_pm_resume(struct drm_device *dev)
 {
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
+	struct nouveau_pm *pm = nouveau_pm(dev);
 	struct nouveau_pm_level *perflvl;
 
 	if (!pm->cur || pm->cur == &pm->boot)
@@ -945,5 +940,4 @@ nouveau_pm_resume(struct drm_device *dev)
 	perflvl = pm->cur;
 	pm->cur = &pm->boot;
 	nouveau_pm_perflvl_set(dev, perflvl);
-	nouveau_pwmfan_set(dev, pm->fan.percent);
 }
diff --git a/drivers/gpu/drm/nouveau/nouveau_pm.h b/drivers/gpu/drm/nouveau/nouveau_pm.h
index 07cac72c72b4..73b789c230a9 100644
--- a/drivers/gpu/drm/nouveau/nouveau_pm.h
+++ b/drivers/gpu/drm/nouveau/nouveau_pm.h
@@ -25,6 +25,165 @@
 #ifndef __NOUVEAU_PM_H__
 #define __NOUVEAU_PM_H__
 
+#include <subdev/bios/pll.h>
+#include <subdev/clock.h>
+
+struct nouveau_pm_voltage_level {
+	u32 voltage; /* microvolts */
+	u8  vid;
+};
+
+struct nouveau_pm_voltage {
+	bool supported;
+	u8 version;
+	u8 vid_mask;
+
+	struct nouveau_pm_voltage_level *level;
+	int nr_level;
+};
+
+/* Exclusive upper limits */
+#define NV_MEM_CL_DDR2_MAX 8
+#define NV_MEM_WR_DDR2_MAX 9
+#define NV_MEM_CL_DDR3_MAX 17
+#define NV_MEM_WR_DDR3_MAX 17
+#define NV_MEM_CL_GDDR3_MAX 16
+#define NV_MEM_WR_GDDR3_MAX 18
+#define NV_MEM_CL_GDDR5_MAX 21
+#define NV_MEM_WR_GDDR5_MAX 20
+
+struct nouveau_pm_memtiming {
+	int id;
+
+	u32 reg[9];
+	u32 mr[4];
+
+	u8 tCWL;
+
+	u8 odt;
+	u8 drive_strength;
+};
+
+struct nouveau_pm_tbl_header {
+	u8 version;
+	u8 header_len;
+	u8 entry_cnt;
+	u8 entry_len;
+};
+
+struct nouveau_pm_tbl_entry {
+	u8 tWR;
+	u8 tWTR;
+	u8 tCL;
+	u8 tRC;
+	u8 empty_4;
+	u8 tRFC;	/* Byte 5 */
+	u8 empty_6;
+	u8 tRAS;	/* Byte 7 */
+	u8 empty_8;
+	u8 tRP;		/* Byte 9 */
+	u8 tRCDRD;
+	u8 tRCDWR;
+	u8 tRRD;
+	u8 tUNK_13;
+	u8 RAM_FT1;		/* 14, a bitmask of random RAM features */
+	u8 empty_15;
+	u8 tUNK_16;
+	u8 empty_17;
+	u8 tUNK_18;
+	u8 tCWL;
+	u8 tUNK_20, tUNK_21;
+};
+
+struct nouveau_pm_profile;
+struct nouveau_pm_profile_func {
+	void (*destroy)(struct nouveau_pm_profile *);
+	void (*init)(struct nouveau_pm_profile *);
+	void (*fini)(struct nouveau_pm_profile *);
+	struct nouveau_pm_level *(*select)(struct nouveau_pm_profile *);
+};
+
+struct nouveau_pm_profile {
+	const struct nouveau_pm_profile_func *func;
+	struct list_head head;
+	char name[8];
+};
+
+#define NOUVEAU_PM_MAX_LEVEL 8
+struct nouveau_pm_level {
+	struct nouveau_pm_profile profile;
+	struct device_attribute dev_attr;
+	char name[32];
+	int id;
+
+	struct nouveau_pm_memtiming timing;
+	u32 memory;
+	u16 memscript;
+
+	u32 core;
+	u32 shader;
+	u32 rop;
+	u32 copy;
+	u32 daemon;
+	u32 vdec;
+	u32 dom6;
+	u32 unka0;	/* nva3:nvc0 */
+	u32 hub01;	/* nvc0- */
+	u32 hub06;	/* nvc0- */
+	u32 hub07;	/* nvc0- */
+
+	u32 volt_min; /* microvolts */
+	u32 volt_max;
+	u8  fanspeed;
+};
+
+struct nouveau_pm_temp_sensor_constants {
+	u16 offset_constant;
+	s16 offset_mult;
+	s16 offset_div;
+	s16 slope_mult;
+	s16 slope_div;
+};
+
+struct nouveau_pm_threshold_temp {
+	s16 critical;
+	s16 down_clock;
+};
+
+struct nouveau_pm {
+	struct drm_device *dev;
+
+	struct nouveau_pm_voltage voltage;
+	struct nouveau_pm_level perflvl[NOUVEAU_PM_MAX_LEVEL];
+	int nr_perflvl;
+	struct nouveau_pm_temp_sensor_constants sensor_constants;
+	struct nouveau_pm_threshold_temp threshold_temp;
+
+	struct nouveau_pm_profile *profile_ac;
+	struct nouveau_pm_profile *profile_dc;
+	struct nouveau_pm_profile *profile;
+	struct list_head profiles;
+
+	struct nouveau_pm_level boot;
+	struct nouveau_pm_level *cur;
+
+	struct device *hwmon;
+	struct notifier_block acpi_nb;
+
+	int  (*clocks_get)(struct drm_device *, struct nouveau_pm_level *);
+	void *(*clocks_pre)(struct drm_device *, struct nouveau_pm_level *);
+	int (*clocks_set)(struct drm_device *, void *);
+
+	int (*voltage_get)(struct drm_device *);
+	int (*voltage_set)(struct drm_device *, int voltage);
+};
+
+static inline struct nouveau_pm *
+nouveau_pm(struct drm_device *dev)
+{
+	return nouveau_drm(dev)->pm;
+}
+
 struct nouveau_mem_exec_func {
 	struct drm_device *dev;
 	void (*precharge)(struct nouveau_mem_exec_func *);
@@ -99,11 +258,26 @@ int nvc0_pm_clocks_get(struct drm_device *, struct nouveau_pm_level *);
 void *nvc0_pm_clocks_pre(struct drm_device *, struct nouveau_pm_level *);
 int nvc0_pm_clocks_set(struct drm_device *, void *);
 
-/* nouveau_temp.c */
-void nouveau_temp_init(struct drm_device *dev);
-void nouveau_temp_fini(struct drm_device *dev);
-void nouveau_temp_safety_checks(struct drm_device *dev);
-int nv40_temp_get(struct drm_device *dev);
-int nv84_temp_get(struct drm_device *dev);
+/* nouveau_mem.c */
+int  nouveau_mem_timing_calc(struct drm_device *, u32 freq,
+			     struct nouveau_pm_memtiming *);
+void nouveau_mem_timing_read(struct drm_device *,
+			     struct nouveau_pm_memtiming *);
+
+static inline int
+nva3_calc_pll(struct drm_device *dev, struct nvbios_pll *pll, u32 freq,
+	      int *N, int *fN, int *M, int *P)
+{
+	struct nouveau_device *device = nouveau_dev(dev);
+	struct nouveau_clock *clk = nouveau_clock(device);
+	struct nouveau_pll_vals pv;
+	int ret;
+
+	ret = clk->pll_calc(clk, pll, freq, &pv);
+	*N = pv.N1;
+	*M = pv.M1;
+	*P = pv.log2P;
+	return ret;
+}
 
 #endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_prime.c b/drivers/gpu/drm/nouveau/nouveau_prime.c
index aef7181415a8..366462cf8a2c 100644
--- a/drivers/gpu/drm/nouveau/nouveau_prime.c
+++ b/drivers/gpu/drm/nouveau/nouveau_prime.c
@@ -22,13 +22,12 @@
  * Authors: Dave Airlie
  */
 
-#include <drm/drmP.h>
+#include <linux/dma-buf.h>
 
-#include "nouveau_drv.h"
-#include <drm/nouveau_drm.h>
-#include "nouveau_dma.h"
+#include <drm/drmP.h>
 
-#include <linux/dma-buf.h>
+#include "nouveau_drm.h"
+#include "nouveau_gem.h"
 
 static struct sg_table *nouveau_gem_map_dma_buf(struct dma_buf_attachment *attachment,
 					  enum dma_data_direction dir)
diff --git a/drivers/gpu/drm/nouveau/nouveau_ramht.c b/drivers/gpu/drm/nouveau/nouveau_ramht.c
deleted file mode 100644
index 0ebb62f1fc80..000000000000
--- a/drivers/gpu/drm/nouveau/nouveau_ramht.c
+++ /dev/null
@@ -1,309 +0,0 @@
-/*
- * Copyright 2010 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Ben Skeggs
- */
-
-#include <drm/drmP.h>
-
-#include "nouveau_drv.h"
-#include "nouveau_ramht.h"
-
-static u32
-nouveau_ramht_hash_handle(struct nouveau_channel *chan, u32 handle)
-{
-	struct drm_device *dev = chan->dev;
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct nouveau_ramht *ramht = chan->ramht;
-	u32 hash = 0;
-	int i;
-
-	NV_DEBUG(dev, "ch%d handle=0x%08x\n", chan->id, handle);
-
-	for (i = 32; i > 0; i -= ramht->bits) {
-		hash ^= (handle & ((1 << ramht->bits) - 1));
-		handle >>= ramht->bits;
-	}
-
-	if (dev_priv->card_type < NV_50)
-		hash ^= chan->id << (ramht->bits - 4);
-	hash <<= 3;
-
-	NV_DEBUG(dev, "hash=0x%08x\n", hash);
-	return hash;
-}
-
-static int
-nouveau_ramht_entry_valid(struct drm_device *dev, struct nouveau_gpuobj *ramht,
-			  u32 offset)
-{
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	u32 ctx = nv_ro32(ramht, offset + 4);
-
-	if (dev_priv->card_type < NV_40)
-		return ((ctx & NV_RAMHT_CONTEXT_VALID) != 0);
-	return (ctx != 0);
-}
-
-static int
-nouveau_ramht_entry_same_channel(struct nouveau_channel *chan,
-				 struct nouveau_gpuobj *ramht, u32 offset)
-{
-	struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
-	u32 ctx = nv_ro32(ramht, offset + 4);
-
-	if (dev_priv->card_type >= NV_50)
-		return true;
-	else if (dev_priv->card_type >= NV_40)
-		return chan->id ==
-			((ctx >> NV40_RAMHT_CONTEXT_CHANNEL_SHIFT) & 0x1f);
-	else
-		return chan->id ==
-			((ctx >> NV_RAMHT_CONTEXT_CHANNEL_SHIFT) & 0x1f);
-}
-
-int
-nouveau_ramht_insert(struct nouveau_channel *chan, u32 handle,
-		     struct nouveau_gpuobj *gpuobj)
-{
-	struct drm_device *dev = chan->dev;
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct nouveau_instmem_engine *instmem = &dev_priv->engine.instmem;
-	struct nouveau_ramht_entry *entry;
-	struct nouveau_gpuobj *ramht = chan->ramht->gpuobj;
-	unsigned long flags;
-	u32 ctx, co, ho;
-
-	if (nouveau_ramht_find(chan, handle))
-		return -EEXIST;
-
-	entry = kmalloc(sizeof(*entry), GFP_KERNEL);
-	if (!entry)
-		return -ENOMEM;
-	entry->channel = chan;
-	entry->gpuobj = NULL;
-	entry->handle = handle;
-	nouveau_gpuobj_ref(gpuobj, &entry->gpuobj);
-
-	if (dev_priv->card_type < NV_40) {
-		ctx = NV_RAMHT_CONTEXT_VALID | (gpuobj->pinst >> 4) |
-		      (chan->id << NV_RAMHT_CONTEXT_CHANNEL_SHIFT) |
-		      (gpuobj->engine << NV_RAMHT_CONTEXT_ENGINE_SHIFT);
-	} else
-	if (dev_priv->card_type < NV_50) {
-		ctx = (gpuobj->pinst >> 4) |
-		      (chan->id << NV40_RAMHT_CONTEXT_CHANNEL_SHIFT) |
-		      (gpuobj->engine << NV40_RAMHT_CONTEXT_ENGINE_SHIFT);
-	} else {
-		if (gpuobj->engine == NVOBJ_ENGINE_DISPLAY) {
-			ctx = (gpuobj->cinst << 10) |
-			      (chan->id << 28) |
-			      chan->id; /* HASH_TAG */
-		} else {
-			ctx = (gpuobj->cinst >> 4) |
-			      ((gpuobj->engine <<
-				NV40_RAMHT_CONTEXT_ENGINE_SHIFT));
-		}
-	}
-
-	spin_lock_irqsave(&chan->ramht->lock, flags);
-	list_add(&entry->head, &chan->ramht->entries);
-
-	co = ho = nouveau_ramht_hash_handle(chan, handle);
-	do {
-		if (!nouveau_ramht_entry_valid(dev, ramht, co)) {
-			NV_DEBUG(dev,
-				 "insert ch%d 0x%08x: h=0x%08x, c=0x%08x\n",
-				 chan->id, co, handle, ctx);
-			nv_wo32(ramht, co + 0, handle);
-			nv_wo32(ramht, co + 4, ctx);
-
-			spin_unlock_irqrestore(&chan->ramht->lock, flags);
-			instmem->flush(dev);
-			return 0;
-		}
-		NV_DEBUG(dev, "collision ch%d 0x%08x: h=0x%08x\n",
-			 chan->id, co, nv_ro32(ramht, co));
-
-		co += 8;
-		if (co >= ramht->size)
-			co = 0;
-	} while (co != ho);
-
-	NV_ERROR(dev, "RAMHT space exhausted. ch=%d\n", chan->id);
-	list_del(&entry->head);
-	spin_unlock_irqrestore(&chan->ramht->lock, flags);
-	kfree(entry);
-	return -ENOMEM;
-}
-
-static struct nouveau_ramht_entry *
-nouveau_ramht_remove_entry(struct nouveau_channel *chan, u32 handle)
-{
-	struct nouveau_ramht *ramht = chan ? chan->ramht : NULL;
-	struct nouveau_ramht_entry *entry;
-	unsigned long flags;
-
-	if (!ramht)
-		return NULL;
-
-	spin_lock_irqsave(&ramht->lock, flags);
-	list_for_each_entry(entry, &ramht->entries, head) {
-		if (entry->channel == chan &&
-		    (!handle || entry->handle == handle)) {
-			list_del(&entry->head);
-			spin_unlock_irqrestore(&ramht->lock, flags);
-
-			return entry;
-		}
-	}
-	spin_unlock_irqrestore(&ramht->lock, flags);
-
-	return NULL;
-}
-
-static void
-nouveau_ramht_remove_hash(struct nouveau_channel *chan, u32 handle)
-{
-	struct drm_device *dev = chan->dev;
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct nouveau_instmem_engine *instmem = &dev_priv->engine.instmem;
-	struct nouveau_gpuobj *ramht = chan->ramht->gpuobj;
-	unsigned long flags;
-	u32 co, ho;
-
-	spin_lock_irqsave(&chan->ramht->lock, flags);
-	co = ho = nouveau_ramht_hash_handle(chan, handle);
-	do {
-		if (nouveau_ramht_entry_valid(dev, ramht, co) &&
-		    nouveau_ramht_entry_same_channel(chan, ramht, co) &&
-		    (handle == nv_ro32(ramht, co))) {
-			NV_DEBUG(dev,
-				 "remove ch%d 0x%08x: h=0x%08x, c=0x%08x\n",
-				 chan->id, co, handle, nv_ro32(ramht, co + 4));
-			nv_wo32(ramht, co + 0, 0x00000000);
-			nv_wo32(ramht, co + 4, 0x00000000);
-			instmem->flush(dev);
-			goto out;
-		}
-
-		co += 8;
-		if (co >= ramht->size)
-			co = 0;
-	} while (co != ho);
-
-	NV_ERROR(dev, "RAMHT entry not found. ch=%d, handle=0x%08x\n",
-		 chan->id, handle);
-out:
-	spin_unlock_irqrestore(&chan->ramht->lock, flags);
-}
-
-int
-nouveau_ramht_remove(struct nouveau_channel *chan, u32 handle)
-{
-	struct nouveau_ramht_entry *entry;
-
-	entry = nouveau_ramht_remove_entry(chan, handle);
-	if (!entry)
-		return -ENOENT;
-
-	nouveau_ramht_remove_hash(chan, entry->handle);
-	nouveau_gpuobj_ref(NULL, &entry->gpuobj);
-	kfree(entry);
-	return 0;
-}
-
-struct nouveau_gpuobj *
-nouveau_ramht_find(struct nouveau_channel *chan, u32 handle)
-{
-	struct nouveau_ramht *ramht = chan->ramht;
-	struct nouveau_ramht_entry *entry;
-	struct nouveau_gpuobj *gpuobj = NULL;
-	unsigned long flags;
-
-	if (unlikely(!chan->ramht))
-		return NULL;
-
-	spin_lock_irqsave(&ramht->lock, flags);
-	list_for_each_entry(entry, &chan->ramht->entries, head) {
-		if (entry->channel == chan && entry->handle == handle) {
-			gpuobj = entry->gpuobj;
-			break;
-		}
-	}
-	spin_unlock_irqrestore(&ramht->lock, flags);
-
-	return gpuobj;
-}
-
-int
-nouveau_ramht_new(struct drm_device *dev, struct nouveau_gpuobj *gpuobj,
-		  struct nouveau_ramht **pramht)
-{
-	struct nouveau_ramht *ramht;
-
-	ramht = kzalloc(sizeof(*ramht), GFP_KERNEL);
-	if (!ramht)
-		return -ENOMEM;
-
-	ramht->dev = dev;
-	kref_init(&ramht->refcount);
-	ramht->bits = drm_order(gpuobj->size / 8);
-	INIT_LIST_HEAD(&ramht->entries);
-	spin_lock_init(&ramht->lock);
-	nouveau_gpuobj_ref(gpuobj, &ramht->gpuobj);
-
-	*pramht = ramht;
-	return 0;
-}
-
-static void
-nouveau_ramht_del(struct kref *ref)
-{
-	struct nouveau_ramht *ramht =
-		container_of(ref, struct nouveau_ramht, refcount);
-
-	nouveau_gpuobj_ref(NULL, &ramht->gpuobj);
-	kfree(ramht);
-}
-
-void
-nouveau_ramht_ref(struct nouveau_ramht *ref, struct nouveau_ramht **ptr,
-		  struct nouveau_channel *chan)
-{
-	struct nouveau_ramht_entry *entry;
-	struct nouveau_ramht *ramht;
-
-	if (ref)
-		kref_get(&ref->refcount);
-
-	ramht = *ptr;
-	if (ramht) {
-		while ((entry = nouveau_ramht_remove_entry(chan, 0))) {
-			nouveau_ramht_remove_hash(chan, entry->handle);
-			nouveau_gpuobj_ref(NULL, &entry->gpuobj);
-			kfree(entry);
-		}
-
-		kref_put(&ramht->refcount, nouveau_ramht_del);
-	}
-	*ptr = ref;
-}
diff --git a/drivers/gpu/drm/nouveau/nouveau_sgdma.c b/drivers/gpu/drm/nouveau/nouveau_sgdma.c
index 9d76a82d3c90..ca5492ac2da5 100644
--- a/drivers/gpu/drm/nouveau/nouveau_sgdma.c
+++ b/drivers/gpu/drm/nouveau/nouveau_sgdma.c
@@ -1,11 +1,10 @@
-#include <drm/drmP.h>
-#include "nouveau_drv.h"
 #include <linux/pagemap.h>
 #include <linux/slab.h>
 
-#define NV_CTXDMA_PAGE_SHIFT 12
-#define NV_CTXDMA_PAGE_SIZE  (1 << NV_CTXDMA_PAGE_SHIFT)
-#define NV_CTXDMA_PAGE_MASK  (NV_CTXDMA_PAGE_SIZE - 1)
+#include <subdev/fb.h>
+
+#include "nouveau_drm.h"
+#include "nouveau_ttm.h"
 
 struct nouveau_sgdma_be {
 	/* this has to be the first field so populate/unpopulated in
@@ -13,7 +12,7 @@ struct nouveau_sgdma_be {
 	 */
 	struct ttm_dma_tt ttm;
 	struct drm_device *dev;
-	u64 offset;
+	struct nouveau_mem *node;
 };
 
 static void
@@ -22,7 +21,6 @@ nouveau_sgdma_destroy(struct ttm_tt *ttm)
 	struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
 
 	if (ttm) {
-		NV_DEBUG(nvbe->dev, "\n");
 		ttm_dma_tt_fini(&nvbe->ttm);
 		kfree(nvbe);
 	}
@@ -32,25 +30,18 @@ static int
 nv04_sgdma_bind(struct ttm_tt *ttm, struct ttm_mem_reg *mem)
 {
 	struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
-	struct drm_device *dev = nvbe->dev;
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct nouveau_gpuobj *gpuobj = dev_priv->gart_info.sg_ctxdma;
-	unsigned i, j, pte;
-
-	NV_DEBUG(dev, "pg=0x%lx\n", mem->start);
-
-	nvbe->offset = mem->start << PAGE_SHIFT;
-	pte = (nvbe->offset >> NV_CTXDMA_PAGE_SHIFT) + 2;
-	for (i = 0; i < ttm->num_pages; i++) {
-		dma_addr_t dma_offset = nvbe->ttm.dma_address[i];
-		uint32_t offset_l = lower_32_bits(dma_offset);
+	struct nouveau_mem *node = mem->mm_node;
+	u64 size = mem->num_pages << 12;
 
-		for (j = 0; j < PAGE_SIZE / NV_CTXDMA_PAGE_SIZE; j++, pte++) {
-			nv_wo32(gpuobj, (pte * 4) + 0, offset_l | 3);
-			offset_l += NV_CTXDMA_PAGE_SIZE;
-		}
+	if (ttm->sg) {
+		node->sg = ttm->sg;
+		nouveau_vm_map_sg_table(&node->vma[0], 0, size, node);
+	} else {
+		node->pages = nvbe->ttm.dma_address;
+		nouveau_vm_map_sg(&node->vma[0], 0, size, node);
 	}
 
+	nvbe->node = node;
 	return 0;
 }
 
@@ -58,22 +49,7 @@ static int
 nv04_sgdma_unbind(struct ttm_tt *ttm)
 {
 	struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
-	struct drm_device *dev = nvbe->dev;
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct nouveau_gpuobj *gpuobj = dev_priv->gart_info.sg_ctxdma;
-	unsigned i, j, pte;
-
-	NV_DEBUG(dev, "\n");
-
-	if (ttm->state != tt_bound)
-		return 0;
-
-	pte = (nvbe->offset >> NV_CTXDMA_PAGE_SHIFT) + 2;
-	for (i = 0; i < ttm->num_pages; i++) {
-		for (j = 0; j < PAGE_SIZE / NV_CTXDMA_PAGE_SIZE; j++, pte++)
-			nv_wo32(gpuobj, (pte * 4) + 0, 0x00000000);
-	}
-
+	nouveau_vm_unmap(&nvbe->node->vma[0]);
 	return 0;
 }
 
@@ -83,206 +59,6 @@ static struct ttm_backend_func nv04_sgdma_backend = {
 	.destroy		= nouveau_sgdma_destroy
 };
 
-static void
-nv41_sgdma_flush(struct nouveau_sgdma_be *nvbe)
-{
-	struct drm_device *dev = nvbe->dev;
-
-	nv_wr32(dev, 0x100810, 0x00000022);
-	if (!nv_wait(dev, 0x100810, 0x00000100, 0x00000100))
-		NV_ERROR(dev, "vm flush timeout: 0x%08x\n",
-			 nv_rd32(dev, 0x100810));
-	nv_wr32(dev, 0x100810, 0x00000000);
-}
-
-static int
-nv41_sgdma_bind(struct ttm_tt *ttm, struct ttm_mem_reg *mem)
-{
-	struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
-	struct drm_nouveau_private *dev_priv = nvbe->dev->dev_private;
-	struct nouveau_gpuobj *pgt = dev_priv->gart_info.sg_ctxdma;
-	dma_addr_t *list = nvbe->ttm.dma_address;
-	u32 pte = mem->start << 2;
-	u32 cnt = ttm->num_pages;
-
-	nvbe->offset = mem->start << PAGE_SHIFT;
-
-	while (cnt--) {
-		nv_wo32(pgt, pte, (*list++ >> 7) | 1);
-		pte += 4;
-	}
-
-	nv41_sgdma_flush(nvbe);
-	return 0;
-}
-
-static int
-nv41_sgdma_unbind(struct ttm_tt *ttm)
-{
-	struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
-	struct drm_nouveau_private *dev_priv = nvbe->dev->dev_private;
-	struct nouveau_gpuobj *pgt = dev_priv->gart_info.sg_ctxdma;
-	u32 pte = (nvbe->offset >> 12) << 2;
-	u32 cnt = ttm->num_pages;
-
-	while (cnt--) {
-		nv_wo32(pgt, pte, 0x00000000);
-		pte += 4;
-	}
-
-	nv41_sgdma_flush(nvbe);
-	return 0;
-}
-
-static struct ttm_backend_func nv41_sgdma_backend = {
-	.bind			= nv41_sgdma_bind,
-	.unbind			= nv41_sgdma_unbind,
-	.destroy		= nouveau_sgdma_destroy
-};
-
-static void
-nv44_sgdma_flush(struct ttm_tt *ttm)
-{
-	struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
-	struct drm_device *dev = nvbe->dev;
-
-	nv_wr32(dev, 0x100814, (ttm->num_pages - 1) << 12);
-	nv_wr32(dev, 0x100808, nvbe->offset | 0x20);
-	if (!nv_wait(dev, 0x100808, 0x00000001, 0x00000001))
-		NV_ERROR(dev, "gart flush timeout: 0x%08x\n",
-			 nv_rd32(dev, 0x100808));
-	nv_wr32(dev, 0x100808, 0x00000000);
-}
-
-static void
-nv44_sgdma_fill(struct nouveau_gpuobj *pgt, dma_addr_t *list, u32 base, u32 cnt)
-{
-	struct drm_nouveau_private *dev_priv = pgt->dev->dev_private;
-	dma_addr_t dummy = dev_priv->gart_info.dummy.addr;
-	u32 pte, tmp[4];
-
-	pte   = base >> 2;
-	base &= ~0x0000000f;
-
-	tmp[0] = nv_ro32(pgt, base + 0x0);
-	tmp[1] = nv_ro32(pgt, base + 0x4);
-	tmp[2] = nv_ro32(pgt, base + 0x8);
-	tmp[3] = nv_ro32(pgt, base + 0xc);
-	while (cnt--) {
-		u32 addr = list ? (*list++ >> 12) : (dummy >> 12);
-		switch (pte++ & 0x3) {
-		case 0:
-			tmp[0] &= ~0x07ffffff;
-			tmp[0] |= addr;
-			break;
-		case 1:
-			tmp[0] &= ~0xf8000000;
-			tmp[0] |= addr << 27;
-			tmp[1] &= ~0x003fffff;
-			tmp[1] |= addr >> 5;
-			break;
-		case 2:
-			tmp[1] &= ~0xffc00000;
-			tmp[1] |= addr << 22;
-			tmp[2] &= ~0x0001ffff;
-			tmp[2] |= addr >> 10;
-			break;
-		case 3:
-			tmp[2] &= ~0xfffe0000;
-			tmp[2] |= addr << 17;
-			tmp[3] &= ~0x00000fff;
-			tmp[3] |= addr >> 15;
-			break;
-		}
-	}
-
-	tmp[3] |= 0x40000000;
-
-	nv_wo32(pgt, base + 0x0, tmp[0]);
-	nv_wo32(pgt, base + 0x4, tmp[1]);
-	nv_wo32(pgt, base + 0x8, tmp[2]);
-	nv_wo32(pgt, base + 0xc, tmp[3]);
-}
-
-static int
-nv44_sgdma_bind(struct ttm_tt *ttm, struct ttm_mem_reg *mem)
-{
-	struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
-	struct drm_nouveau_private *dev_priv = nvbe->dev->dev_private;
-	struct nouveau_gpuobj *pgt = dev_priv->gart_info.sg_ctxdma;
-	dma_addr_t *list = nvbe->ttm.dma_address;
-	u32 pte = mem->start << 2, tmp[4];
-	u32 cnt = ttm->num_pages;
-	int i;
-
-	nvbe->offset = mem->start << PAGE_SHIFT;
-
-	if (pte & 0x0000000c) {
-		u32  max = 4 - ((pte >> 2) & 0x3);
-		u32 part = (cnt > max) ? max : cnt;
-		nv44_sgdma_fill(pgt, list, pte, part);
-		pte  += (part << 2);
-		list += part;
-		cnt  -= part;
-	}
-
-	while (cnt >= 4) {
-		for (i = 0; i < 4; i++)
-			tmp[i] = *list++ >> 12;
-		nv_wo32(pgt, pte + 0x0, tmp[0] >>  0 | tmp[1] << 27);
-		nv_wo32(pgt, pte + 0x4, tmp[1] >>  5 | tmp[2] << 22);
-		nv_wo32(pgt, pte + 0x8, tmp[2] >> 10 | tmp[3] << 17);
-		nv_wo32(pgt, pte + 0xc, tmp[3] >> 15 | 0x40000000);
-		pte  += 0x10;
-		cnt  -= 4;
-	}
-
-	if (cnt)
-		nv44_sgdma_fill(pgt, list, pte, cnt);
-
-	nv44_sgdma_flush(ttm);
-	return 0;
-}
-
-static int
-nv44_sgdma_unbind(struct ttm_tt *ttm)
-{
-	struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
-	struct drm_nouveau_private *dev_priv = nvbe->dev->dev_private;
-	struct nouveau_gpuobj *pgt = dev_priv->gart_info.sg_ctxdma;
-	u32 pte = (nvbe->offset >> 12) << 2;
-	u32 cnt = ttm->num_pages;
-
-	if (pte & 0x0000000c) {
-		u32  max = 4 - ((pte >> 2) & 0x3);
-		u32 part = (cnt > max) ? max : cnt;
-		nv44_sgdma_fill(pgt, NULL, pte, part);
-		pte  += (part << 2);
-		cnt  -= part;
-	}
-
-	while (cnt >= 4) {
-		nv_wo32(pgt, pte + 0x0, 0x00000000);
-		nv_wo32(pgt, pte + 0x4, 0x00000000);
-		nv_wo32(pgt, pte + 0x8, 0x00000000);
-		nv_wo32(pgt, pte + 0xc, 0x00000000);
-		pte  += 0x10;
-		cnt  -= 4;
-	}
-
-	if (cnt)
-		nv44_sgdma_fill(pgt, NULL, pte, cnt);
-
-	nv44_sgdma_flush(ttm);
-	return 0;
-}
-
-static struct ttm_backend_func nv44_sgdma_backend = {
-	.bind			= nv44_sgdma_bind,
-	.unbind			= nv44_sgdma_unbind,
-	.destroy		= nouveau_sgdma_destroy
-};
-
 static int
 nv50_sgdma_bind(struct ttm_tt *ttm, struct ttm_mem_reg *mem)
 {
@@ -315,16 +91,18 @@ nouveau_sgdma_create_ttm(struct ttm_bo_device *bdev,
 			 unsigned long size, uint32_t page_flags,
 			 struct page *dummy_read_page)
 {
-	struct drm_nouveau_private *dev_priv = nouveau_bdev(bdev);
-	struct drm_device *dev = dev_priv->dev;
+	struct nouveau_drm *drm = nouveau_bdev(bdev);
 	struct nouveau_sgdma_be *nvbe;
 
 	nvbe = kzalloc(sizeof(*nvbe), GFP_KERNEL);
 	if (!nvbe)
 		return NULL;
 
-	nvbe->dev = dev;
-	nvbe->ttm.ttm.func = dev_priv->gart_info.func;
+	nvbe->dev = drm->dev;
+	if (nv_device(drm->device)->card_type < NV_50)
+		nvbe->ttm.ttm.func = &nv04_sgdma_backend;
+	else
+		nvbe->ttm.ttm.func = &nv50_sgdma_backend;
 
 	if (ttm_dma_tt_init(&nvbe->ttm, bdev, size, page_flags, dummy_read_page)) {
 		kfree(nvbe);
@@ -332,116 +110,3 @@ nouveau_sgdma_create_ttm(struct ttm_bo_device *bdev,
 	}
 	return &nvbe->ttm.ttm;
 }
-
-int
-nouveau_sgdma_init(struct drm_device *dev)
-{
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct nouveau_gpuobj *gpuobj = NULL;
-	u32 aper_size, align;
-	int ret;
-
-	if (dev_priv->card_type >= NV_40)
-		aper_size = 512 * 1024 * 1024;
-	else
-		aper_size = 128 * 1024 * 1024;
-
-	/* Dear NVIDIA, NV44+ would like proper present bits in PTEs for
-	 * christmas.  The cards before it have them, the cards after
-	 * it have them, why is NV44 so unloved?
-	 */
-	dev_priv->gart_info.dummy.page = alloc_page(GFP_DMA32 | GFP_KERNEL);
-	if (!dev_priv->gart_info.dummy.page)
-		return -ENOMEM;
-
-	dev_priv->gart_info.dummy.addr =
-		pci_map_page(dev->pdev, dev_priv->gart_info.dummy.page,
-			     0, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
-	if (pci_dma_mapping_error(dev->pdev, dev_priv->gart_info.dummy.addr)) {
-		NV_ERROR(dev, "error mapping dummy page\n");
-		__free_page(dev_priv->gart_info.dummy.page);
-		dev_priv->gart_info.dummy.page = NULL;
-		return -ENOMEM;
-	}
-
-	if (dev_priv->card_type >= NV_50) {
-		dev_priv->gart_info.aper_base = 0;
-		dev_priv->gart_info.aper_size = aper_size;
-		dev_priv->gart_info.type = NOUVEAU_GART_HW;
-		dev_priv->gart_info.func = &nv50_sgdma_backend;
-	} else
-	if (0 && pci_is_pcie(dev->pdev) &&
-	    dev_priv->chipset > 0x40 && dev_priv->chipset != 0x45) {
-		if (nv44_graph_class(dev)) {
-			dev_priv->gart_info.func = &nv44_sgdma_backend;
-			align = 512 * 1024;
-		} else {
-			dev_priv->gart_info.func = &nv41_sgdma_backend;
-			align = 16;
-		}
-
-		ret = nouveau_gpuobj_new(dev, NULL, aper_size / 1024, align,
-					 NVOBJ_FLAG_ZERO_ALLOC |
-					 NVOBJ_FLAG_ZERO_FREE, &gpuobj);
-		if (ret) {
-			NV_ERROR(dev, "Error creating sgdma object: %d\n", ret);
-			return ret;
-		}
-
-		dev_priv->gart_info.sg_ctxdma = gpuobj;
-		dev_priv->gart_info.aper_base = 0;
-		dev_priv->gart_info.aper_size = aper_size;
-		dev_priv->gart_info.type = NOUVEAU_GART_HW;
-	} else {
-		ret = nouveau_gpuobj_new(dev, NULL, (aper_size / 1024) + 8, 16,
-					 NVOBJ_FLAG_ZERO_ALLOC |
-					 NVOBJ_FLAG_ZERO_FREE, &gpuobj);
-		if (ret) {
-			NV_ERROR(dev, "Error creating sgdma object: %d\n", ret);
-			return ret;
-		}
-
-		nv_wo32(gpuobj, 0, NV_CLASS_DMA_IN_MEMORY |
-				   (1 << 12) /* PT present */ |
-				   (0 << 13) /* PT *not* linear */ |
-				   (0 << 14) /* RW */ |
-				   (2 << 16) /* PCI */);
-		nv_wo32(gpuobj, 4, aper_size - 1);
-
-		dev_priv->gart_info.sg_ctxdma = gpuobj;
-		dev_priv->gart_info.aper_base = 0;
-		dev_priv->gart_info.aper_size = aper_size;
-		dev_priv->gart_info.type = NOUVEAU_GART_PDMA;
-		dev_priv->gart_info.func = &nv04_sgdma_backend;
-	}
-
-	return 0;
-}
-
-void
-nouveau_sgdma_takedown(struct drm_device *dev)
-{
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-
-	nouveau_gpuobj_ref(NULL, &dev_priv->gart_info.sg_ctxdma);
-
-	if (dev_priv->gart_info.dummy.page) {
-		pci_unmap_page(dev->pdev, dev_priv->gart_info.dummy.addr,
-			       PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
-		__free_page(dev_priv->gart_info.dummy.page);
-		dev_priv->gart_info.dummy.page = NULL;
-	}
-}
-
-uint32_t
-nouveau_sgdma_get_physical(struct drm_device *dev, uint32_t offset)
-{
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct nouveau_gpuobj *gpuobj = dev_priv->gart_info.sg_ctxdma;
-	int pte = (offset >> NV_CTXDMA_PAGE_SHIFT) + 2;
-
-	BUG_ON(dev_priv->card_type >= NV_50);
-
-	return (nv_ro32(gpuobj, 4 * pte) & ~NV_CTXDMA_PAGE_MASK) |
-		(offset & NV_CTXDMA_PAGE_MASK);
-}
diff --git a/drivers/gpu/drm/nouveau/nouveau_software.h b/drivers/gpu/drm/nouveau/nouveau_software.h
deleted file mode 100644
index 709e5ac680ec..000000000000
--- a/drivers/gpu/drm/nouveau/nouveau_software.h
+++ /dev/null
@@ -1,56 +0,0 @@
-#ifndef __NOUVEAU_SOFTWARE_H__
-#define __NOUVEAU_SOFTWARE_H__
-
-struct nouveau_software_priv {
-	struct nouveau_exec_engine base;
-	struct list_head vblank;
-	spinlock_t peephole_lock;
-};
-
-struct nouveau_software_chan {
-	struct list_head flip;
-	struct {
-		struct list_head list;
-		u32 channel;
-		u32 ctxdma;
-		u32 offset;
-		u32 value;
-		u32 head;
-	} vblank;
-};
-
-static inline void
-nouveau_software_context_new(struct nouveau_software_chan *pch)
-{
-	INIT_LIST_HEAD(&pch->flip);
-	INIT_LIST_HEAD(&pch->vblank.list);
-}
-
-static inline void
-nouveau_software_create(struct nouveau_software_priv *psw)
-{
-	INIT_LIST_HEAD(&psw->vblank);
-	spin_lock_init(&psw->peephole_lock);
-}
-
-static inline u16
-nouveau_software_class(struct drm_device *dev)
-{
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	if (dev_priv->card_type <= NV_04)
-		return 0x006e;
-	if (dev_priv->card_type <= NV_40)
-		return 0x016e;
-	if (dev_priv->card_type <= NV_50)
-		return 0x506e;
-	if (dev_priv->card_type <= NV_E0)
-		return 0x906e;
-	return 0x0000;
-}
-
-int nv04_software_create(struct drm_device *);
-int nv50_software_create(struct drm_device *);
-int nvc0_software_create(struct drm_device *);
-u64 nvc0_software_crtc(struct nouveau_channel *, int crtc);
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_state.c b/drivers/gpu/drm/nouveau/nouveau_state.c
deleted file mode 100644
index 30fe9291d17e..000000000000
--- a/drivers/gpu/drm/nouveau/nouveau_state.c
+++ /dev/null
@@ -1,1304 +0,0 @@
-/*
- * Copyright 2005 Stephane Marchesin
- * Copyright 2008 Stuart Bennett
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-#include <linux/swab.h>
-#include <linux/slab.h>
-#include <drm/drmP.h>
-#include <drm/drm_crtc_helper.h>
-#include <linux/vgaarb.h>
-#include <linux/vga_switcheroo.h>
-
-#include "nouveau_drv.h"
-#include <drm/nouveau_drm.h>
-#include "nouveau_fbcon.h"
-#include "nouveau_ramht.h"
-#include "nouveau_gpio.h"
-#include "nouveau_pm.h"
-#include "nv50_display.h"
-#include "nouveau_fifo.h"
-#include "nouveau_fence.h"
-#include "nouveau_software.h"
-
-static void nouveau_stub_takedown(struct drm_device *dev) {}
-static int nouveau_stub_init(struct drm_device *dev) { return 0; }
-
-static int nouveau_init_engine_ptrs(struct drm_device *dev)
-{
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct nouveau_engine *engine = &dev_priv->engine;
-
-	switch (dev_priv->chipset & 0xf0) {
-	case 0x00:
-		engine->instmem.init		= nv04_instmem_init;
-		engine->instmem.takedown	= nv04_instmem_takedown;
-		engine->instmem.suspend		= nv04_instmem_suspend;
-		engine->instmem.resume		= nv04_instmem_resume;
-		engine->instmem.get		= nv04_instmem_get;
-		engine->instmem.put		= nv04_instmem_put;
-		engine->instmem.map		= nv04_instmem_map;
-		engine->instmem.unmap		= nv04_instmem_unmap;
-		engine->instmem.flush		= nv04_instmem_flush;
-		engine->mc.init			= nv04_mc_init;
-		engine->mc.takedown		= nv04_mc_takedown;
-		engine->timer.init		= nv04_timer_init;
-		engine->timer.read		= nv04_timer_read;
-		engine->timer.takedown		= nv04_timer_takedown;
-		engine->fb.init			= nv04_fb_init;
-		engine->fb.takedown		= nv04_fb_takedown;
-		engine->display.early_init	= nv04_display_early_init;
-		engine->display.late_takedown	= nv04_display_late_takedown;
-		engine->display.create		= nv04_display_create;
-		engine->display.destroy		= nv04_display_destroy;
-		engine->display.init		= nv04_display_init;
-		engine->display.fini		= nv04_display_fini;
-		engine->pm.clocks_get		= nv04_pm_clocks_get;
-		engine->pm.clocks_pre		= nv04_pm_clocks_pre;
-		engine->pm.clocks_set		= nv04_pm_clocks_set;
-		engine->vram.init		= nv04_fb_vram_init;
-		engine->vram.takedown		= nouveau_stub_takedown;
-		engine->vram.flags_valid	= nouveau_mem_flags_valid;
-		break;
-	case 0x10:
-		engine->instmem.init		= nv04_instmem_init;
-		engine->instmem.takedown	= nv04_instmem_takedown;
-		engine->instmem.suspend		= nv04_instmem_suspend;
-		engine->instmem.resume		= nv04_instmem_resume;
-		engine->instmem.get		= nv04_instmem_get;
-		engine->instmem.put		= nv04_instmem_put;
-		engine->instmem.map		= nv04_instmem_map;
-		engine->instmem.unmap		= nv04_instmem_unmap;
-		engine->instmem.flush		= nv04_instmem_flush;
-		engine->mc.init			= nv04_mc_init;
-		engine->mc.takedown		= nv04_mc_takedown;
-		engine->timer.init		= nv04_timer_init;
-		engine->timer.read		= nv04_timer_read;
-		engine->timer.takedown		= nv04_timer_takedown;
-		engine->fb.init			= nv10_fb_init;
-		engine->fb.takedown		= nv10_fb_takedown;
-		engine->fb.init_tile_region	= nv10_fb_init_tile_region;
-		engine->fb.set_tile_region	= nv10_fb_set_tile_region;
-		engine->fb.free_tile_region	= nv10_fb_free_tile_region;
-		engine->display.early_init	= nv04_display_early_init;
-		engine->display.late_takedown	= nv04_display_late_takedown;
-		engine->display.create		= nv04_display_create;
-		engine->display.destroy		= nv04_display_destroy;
-		engine->display.init		= nv04_display_init;
-		engine->display.fini		= nv04_display_fini;
-		engine->gpio.drive		= nv10_gpio_drive;
-		engine->gpio.sense		= nv10_gpio_sense;
-		engine->pm.clocks_get		= nv04_pm_clocks_get;
-		engine->pm.clocks_pre		= nv04_pm_clocks_pre;
-		engine->pm.clocks_set		= nv04_pm_clocks_set;
-		if (dev_priv->chipset == 0x1a ||
-		    dev_priv->chipset == 0x1f)
-			engine->vram.init	= nv1a_fb_vram_init;
-		else
-			engine->vram.init	= nv10_fb_vram_init;
-		engine->vram.takedown		= nouveau_stub_takedown;
-		engine->vram.flags_valid	= nouveau_mem_flags_valid;
-		break;
-	case 0x20:
-		engine->instmem.init		= nv04_instmem_init;
-		engine->instmem.takedown	= nv04_instmem_takedown;
-		engine->instmem.suspend		= nv04_instmem_suspend;
-		engine->instmem.resume		= nv04_instmem_resume;
-		engine->instmem.get		= nv04_instmem_get;
-		engine->instmem.put		= nv04_instmem_put;
-		engine->instmem.map		= nv04_instmem_map;
-		engine->instmem.unmap		= nv04_instmem_unmap;
-		engine->instmem.flush		= nv04_instmem_flush;
-		engine->mc.init			= nv04_mc_init;
-		engine->mc.takedown		= nv04_mc_takedown;
-		engine->timer.init		= nv04_timer_init;
-		engine->timer.read		= nv04_timer_read;
-		engine->timer.takedown		= nv04_timer_takedown;
-		engine->fb.init			= nv20_fb_init;
-		engine->fb.takedown		= nv20_fb_takedown;
-		engine->fb.init_tile_region	= nv20_fb_init_tile_region;
-		engine->fb.set_tile_region	= nv20_fb_set_tile_region;
-		engine->fb.free_tile_region	= nv20_fb_free_tile_region;
-		engine->display.early_init	= nv04_display_early_init;
-		engine->display.late_takedown	= nv04_display_late_takedown;
-		engine->display.create		= nv04_display_create;
-		engine->display.destroy		= nv04_display_destroy;
-		engine->display.init		= nv04_display_init;
-		engine->display.fini		= nv04_display_fini;
-		engine->gpio.drive		= nv10_gpio_drive;
-		engine->gpio.sense		= nv10_gpio_sense;
-		engine->pm.clocks_get		= nv04_pm_clocks_get;
-		engine->pm.clocks_pre		= nv04_pm_clocks_pre;
-		engine->pm.clocks_set		= nv04_pm_clocks_set;
-		engine->vram.init		= nv20_fb_vram_init;
-		engine->vram.takedown		= nouveau_stub_takedown;
-		engine->vram.flags_valid	= nouveau_mem_flags_valid;
-		break;
-	case 0x30:
-		engine->instmem.init		= nv04_instmem_init;
-		engine->instmem.takedown	= nv04_instmem_takedown;
-		engine->instmem.suspend		= nv04_instmem_suspend;
-		engine->instmem.resume		= nv04_instmem_resume;
-		engine->instmem.get		= nv04_instmem_get;
-		engine->instmem.put		= nv04_instmem_put;
-		engine->instmem.map		= nv04_instmem_map;
-		engine->instmem.unmap		= nv04_instmem_unmap;
-		engine->instmem.flush		= nv04_instmem_flush;
-		engine->mc.init			= nv04_mc_init;
-		engine->mc.takedown		= nv04_mc_takedown;
-		engine->timer.init		= nv04_timer_init;
-		engine->timer.read		= nv04_timer_read;
-		engine->timer.takedown		= nv04_timer_takedown;
-		engine->fb.init			= nv30_fb_init;
-		engine->fb.takedown		= nv30_fb_takedown;
-		engine->fb.init_tile_region	= nv30_fb_init_tile_region;
-		engine->fb.set_tile_region	= nv10_fb_set_tile_region;
-		engine->fb.free_tile_region	= nv30_fb_free_tile_region;
-		engine->display.early_init	= nv04_display_early_init;
-		engine->display.late_takedown	= nv04_display_late_takedown;
-		engine->display.create		= nv04_display_create;
-		engine->display.destroy		= nv04_display_destroy;
-		engine->display.init		= nv04_display_init;
-		engine->display.fini		= nv04_display_fini;
-		engine->gpio.drive		= nv10_gpio_drive;
-		engine->gpio.sense		= nv10_gpio_sense;
-		engine->pm.clocks_get		= nv04_pm_clocks_get;
-		engine->pm.clocks_pre		= nv04_pm_clocks_pre;
-		engine->pm.clocks_set		= nv04_pm_clocks_set;
-		engine->pm.voltage_get		= nouveau_voltage_gpio_get;
-		engine->pm.voltage_set		= nouveau_voltage_gpio_set;
-		engine->vram.init		= nv20_fb_vram_init;
-		engine->vram.takedown		= nouveau_stub_takedown;
-		engine->vram.flags_valid	= nouveau_mem_flags_valid;
-		break;
-	case 0x40:
-	case 0x60:
-		engine->instmem.init		= nv04_instmem_init;
-		engine->instmem.takedown	= nv04_instmem_takedown;
-		engine->instmem.suspend		= nv04_instmem_suspend;
-		engine->instmem.resume		= nv04_instmem_resume;
-		engine->instmem.get		= nv04_instmem_get;
-		engine->instmem.put		= nv04_instmem_put;
-		engine->instmem.map		= nv04_instmem_map;
-		engine->instmem.unmap		= nv04_instmem_unmap;
-		engine->instmem.flush		= nv04_instmem_flush;
-		engine->mc.init			= nv40_mc_init;
-		engine->mc.takedown		= nv40_mc_takedown;
-		engine->timer.init		= nv04_timer_init;
-		engine->timer.read		= nv04_timer_read;
-		engine->timer.takedown		= nv04_timer_takedown;
-		engine->fb.init			= nv40_fb_init;
-		engine->fb.takedown		= nv40_fb_takedown;
-		engine->fb.init_tile_region	= nv30_fb_init_tile_region;
-		engine->fb.set_tile_region	= nv40_fb_set_tile_region;
-		engine->fb.free_tile_region	= nv30_fb_free_tile_region;
-		engine->display.early_init	= nv04_display_early_init;
-		engine->display.late_takedown	= nv04_display_late_takedown;
-		engine->display.create		= nv04_display_create;
-		engine->display.destroy		= nv04_display_destroy;
-		engine->display.init		= nv04_display_init;
-		engine->display.fini		= nv04_display_fini;
-		engine->gpio.init		= nv10_gpio_init;
-		engine->gpio.fini		= nv10_gpio_fini;
-		engine->gpio.drive		= nv10_gpio_drive;
-		engine->gpio.sense		= nv10_gpio_sense;
-		engine->gpio.irq_enable		= nv10_gpio_irq_enable;
-		engine->pm.clocks_get		= nv40_pm_clocks_get;
-		engine->pm.clocks_pre		= nv40_pm_clocks_pre;
-		engine->pm.clocks_set		= nv40_pm_clocks_set;
-		engine->pm.voltage_get		= nouveau_voltage_gpio_get;
-		engine->pm.voltage_set		= nouveau_voltage_gpio_set;
-		engine->pm.temp_get		= nv40_temp_get;
-		engine->pm.pwm_get		= nv40_pm_pwm_get;
-		engine->pm.pwm_set		= nv40_pm_pwm_set;
-		engine->vram.init		= nv40_fb_vram_init;
-		engine->vram.takedown		= nouveau_stub_takedown;
-		engine->vram.flags_valid	= nouveau_mem_flags_valid;
-		break;
-	case 0x50:
-	case 0x80: /* gotta love NVIDIA's consistency.. */
-	case 0x90:
-	case 0xa0:
-		engine->instmem.init		= nv50_instmem_init;
-		engine->instmem.takedown	= nv50_instmem_takedown;
-		engine->instmem.suspend		= nv50_instmem_suspend;
-		engine->instmem.resume		= nv50_instmem_resume;
-		engine->instmem.get		= nv50_instmem_get;
-		engine->instmem.put		= nv50_instmem_put;
-		engine->instmem.map		= nv50_instmem_map;
-		engine->instmem.unmap		= nv50_instmem_unmap;
-		if (dev_priv->chipset == 0x50)
-			engine->instmem.flush	= nv50_instmem_flush;
-		else
-			engine->instmem.flush	= nv84_instmem_flush;
-		engine->mc.init			= nv50_mc_init;
-		engine->mc.takedown		= nv50_mc_takedown;
-		engine->timer.init		= nv04_timer_init;
-		engine->timer.read		= nv04_timer_read;
-		engine->timer.takedown		= nv04_timer_takedown;
-		engine->fb.init			= nv50_fb_init;
-		engine->fb.takedown		= nv50_fb_takedown;
-		engine->display.early_init	= nv50_display_early_init;
-		engine->display.late_takedown	= nv50_display_late_takedown;
-		engine->display.create		= nv50_display_create;
-		engine->display.destroy		= nv50_display_destroy;
-		engine->display.init		= nv50_display_init;
-		engine->display.fini		= nv50_display_fini;
-		engine->gpio.init		= nv50_gpio_init;
-		engine->gpio.fini		= nv50_gpio_fini;
-		engine->gpio.drive		= nv50_gpio_drive;
-		engine->gpio.sense		= nv50_gpio_sense;
-		engine->gpio.irq_enable		= nv50_gpio_irq_enable;
-		switch (dev_priv->chipset) {
-		case 0x84:
-		case 0x86:
-		case 0x92:
-		case 0x94:
-		case 0x96:
-		case 0x98:
-		case 0xa0:
-		case 0xaa:
-		case 0xac:
-		case 0x50:
-			engine->pm.clocks_get	= nv50_pm_clocks_get;
-			engine->pm.clocks_pre	= nv50_pm_clocks_pre;
-			engine->pm.clocks_set	= nv50_pm_clocks_set;
-			break;
-		default:
-			engine->pm.clocks_get	= nva3_pm_clocks_get;
-			engine->pm.clocks_pre	= nva3_pm_clocks_pre;
-			engine->pm.clocks_set	= nva3_pm_clocks_set;
-			break;
-		}
-		engine->pm.voltage_get		= nouveau_voltage_gpio_get;
-		engine->pm.voltage_set		= nouveau_voltage_gpio_set;
-		if (dev_priv->chipset >= 0x84)
-			engine->pm.temp_get	= nv84_temp_get;
-		else
-			engine->pm.temp_get	= nv40_temp_get;
-		engine->pm.pwm_get		= nv50_pm_pwm_get;
-		engine->pm.pwm_set		= nv50_pm_pwm_set;
-		engine->vram.init		= nv50_vram_init;
-		engine->vram.takedown		= nv50_vram_fini;
-		engine->vram.get		= nv50_vram_new;
-		engine->vram.put		= nv50_vram_del;
-		engine->vram.flags_valid	= nv50_vram_flags_valid;
-		break;
-	case 0xc0:
-		engine->instmem.init		= nvc0_instmem_init;
-		engine->instmem.takedown	= nvc0_instmem_takedown;
-		engine->instmem.suspend		= nvc0_instmem_suspend;
-		engine->instmem.resume		= nvc0_instmem_resume;
-		engine->instmem.get		= nv50_instmem_get;
-		engine->instmem.put		= nv50_instmem_put;
-		engine->instmem.map		= nv50_instmem_map;
-		engine->instmem.unmap		= nv50_instmem_unmap;
-		engine->instmem.flush		= nv84_instmem_flush;
-		engine->mc.init			= nv50_mc_init;
-		engine->mc.takedown		= nv50_mc_takedown;
-		engine->timer.init		= nv04_timer_init;
-		engine->timer.read		= nv04_timer_read;
-		engine->timer.takedown		= nv04_timer_takedown;
-		engine->fb.init			= nvc0_fb_init;
-		engine->fb.takedown		= nvc0_fb_takedown;
-		engine->display.early_init	= nv50_display_early_init;
-		engine->display.late_takedown	= nv50_display_late_takedown;
-		engine->display.create		= nv50_display_create;
-		engine->display.destroy		= nv50_display_destroy;
-		engine->display.init		= nv50_display_init;
-		engine->display.fini		= nv50_display_fini;
-		engine->gpio.init		= nv50_gpio_init;
-		engine->gpio.fini		= nv50_gpio_fini;
-		engine->gpio.drive		= nv50_gpio_drive;
-		engine->gpio.sense		= nv50_gpio_sense;
-		engine->gpio.irq_enable		= nv50_gpio_irq_enable;
-		engine->vram.init		= nvc0_vram_init;
-		engine->vram.takedown		= nv50_vram_fini;
-		engine->vram.get		= nvc0_vram_new;
-		engine->vram.put		= nv50_vram_del;
-		engine->vram.flags_valid	= nvc0_vram_flags_valid;
-		engine->pm.temp_get		= nv84_temp_get;
-		engine->pm.clocks_get		= nvc0_pm_clocks_get;
-		engine->pm.clocks_pre		= nvc0_pm_clocks_pre;
-		engine->pm.clocks_set		= nvc0_pm_clocks_set;
-		engine->pm.voltage_get		= nouveau_voltage_gpio_get;
-		engine->pm.voltage_set		= nouveau_voltage_gpio_set;
-		engine->pm.pwm_get		= nv50_pm_pwm_get;
-		engine->pm.pwm_set		= nv50_pm_pwm_set;
-		break;
-	case 0xd0:
-		engine->instmem.init		= nvc0_instmem_init;
-		engine->instmem.takedown	= nvc0_instmem_takedown;
-		engine->instmem.suspend		= nvc0_instmem_suspend;
-		engine->instmem.resume		= nvc0_instmem_resume;
-		engine->instmem.get		= nv50_instmem_get;
-		engine->instmem.put		= nv50_instmem_put;
-		engine->instmem.map		= nv50_instmem_map;
-		engine->instmem.unmap		= nv50_instmem_unmap;
-		engine->instmem.flush		= nv84_instmem_flush;
-		engine->mc.init			= nv50_mc_init;
-		engine->mc.takedown		= nv50_mc_takedown;
-		engine->timer.init		= nv04_timer_init;
-		engine->timer.read		= nv04_timer_read;
-		engine->timer.takedown		= nv04_timer_takedown;
-		engine->fb.init			= nvc0_fb_init;
-		engine->fb.takedown		= nvc0_fb_takedown;
-		engine->display.early_init	= nouveau_stub_init;
-		engine->display.late_takedown	= nouveau_stub_takedown;
-		engine->display.create		= nvd0_display_create;
-		engine->display.destroy		= nvd0_display_destroy;
-		engine->display.init		= nvd0_display_init;
-		engine->display.fini		= nvd0_display_fini;
-		engine->gpio.init		= nv50_gpio_init;
-		engine->gpio.fini		= nv50_gpio_fini;
-		engine->gpio.drive		= nvd0_gpio_drive;
-		engine->gpio.sense		= nvd0_gpio_sense;
-		engine->gpio.irq_enable		= nv50_gpio_irq_enable;
-		engine->vram.init		= nvc0_vram_init;
-		engine->vram.takedown		= nv50_vram_fini;
-		engine->vram.get		= nvc0_vram_new;
-		engine->vram.put		= nv50_vram_del;
-		engine->vram.flags_valid	= nvc0_vram_flags_valid;
-		engine->pm.temp_get		= nv84_temp_get;
-		engine->pm.clocks_get		= nvc0_pm_clocks_get;
-		engine->pm.clocks_pre		= nvc0_pm_clocks_pre;
-		engine->pm.clocks_set		= nvc0_pm_clocks_set;
-		engine->pm.voltage_get		= nouveau_voltage_gpio_get;
-		engine->pm.voltage_set		= nouveau_voltage_gpio_set;
-		break;
-	case 0xe0:
-		engine->instmem.init		= nvc0_instmem_init;
-		engine->instmem.takedown	= nvc0_instmem_takedown;
-		engine->instmem.suspend		= nvc0_instmem_suspend;
-		engine->instmem.resume		= nvc0_instmem_resume;
-		engine->instmem.get		= nv50_instmem_get;
-		engine->instmem.put		= nv50_instmem_put;
-		engine->instmem.map		= nv50_instmem_map;
-		engine->instmem.unmap		= nv50_instmem_unmap;
-		engine->instmem.flush		= nv84_instmem_flush;
-		engine->mc.init			= nv50_mc_init;
-		engine->mc.takedown		= nv50_mc_takedown;
-		engine->timer.init		= nv04_timer_init;
-		engine->timer.read		= nv04_timer_read;
-		engine->timer.takedown		= nv04_timer_takedown;
-		engine->fb.init			= nvc0_fb_init;
-		engine->fb.takedown		= nvc0_fb_takedown;
-		engine->display.early_init	= nouveau_stub_init;
-		engine->display.late_takedown	= nouveau_stub_takedown;
-		engine->display.create		= nvd0_display_create;
-		engine->display.destroy		= nvd0_display_destroy;
-		engine->display.init		= nvd0_display_init;
-		engine->display.fini		= nvd0_display_fini;
-		engine->gpio.init		= nv50_gpio_init;
-		engine->gpio.fini		= nv50_gpio_fini;
-		engine->gpio.drive		= nvd0_gpio_drive;
-		engine->gpio.sense		= nvd0_gpio_sense;
-		engine->gpio.irq_enable		= nv50_gpio_irq_enable;
-		engine->vram.init		= nvc0_vram_init;
-		engine->vram.takedown		= nv50_vram_fini;
-		engine->vram.get		= nvc0_vram_new;
-		engine->vram.put		= nv50_vram_del;
-		engine->vram.flags_valid	= nvc0_vram_flags_valid;
-		break;
-	default:
-		NV_ERROR(dev, "NV%02x unsupported\n", dev_priv->chipset);
-		return 1;
-	}
-
-	/* headless mode */
-	if (nouveau_modeset == 2) {
-		engine->display.early_init = nouveau_stub_init;
-		engine->display.late_takedown = nouveau_stub_takedown;
-		engine->display.create = nouveau_stub_init;
-		engine->display.init = nouveau_stub_init;
-		engine->display.destroy = nouveau_stub_takedown;
-	}
-
-	return 0;
-}
-
-static unsigned int
-nouveau_vga_set_decode(void *priv, bool state)
-{
-	struct drm_device *dev = priv;
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-
-	if (dev_priv->chipset >= 0x40)
-		nv_wr32(dev, 0x88054, state);
-	else
-		nv_wr32(dev, 0x1854, state);
-
-	if (state)
-		return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
-		       VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
-	else
-		return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
-}
-
-static void nouveau_switcheroo_set_state(struct pci_dev *pdev,
-					 enum vga_switcheroo_state state)
-{
-	struct drm_device *dev = pci_get_drvdata(pdev);
-	pm_message_t pmm = { .event = PM_EVENT_SUSPEND };
-	if (state == VGA_SWITCHEROO_ON) {
-		printk(KERN_ERR "VGA switcheroo: switched nouveau on\n");
-		dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
-		nouveau_pci_resume(pdev);
-		drm_kms_helper_poll_enable(dev);
-		dev->switch_power_state = DRM_SWITCH_POWER_ON;
-	} else {
-		printk(KERN_ERR "VGA switcheroo: switched nouveau off\n");
-		dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
-		drm_kms_helper_poll_disable(dev);
-		nouveau_switcheroo_optimus_dsm();
-		nouveau_pci_suspend(pdev, pmm);
-		dev->switch_power_state = DRM_SWITCH_POWER_OFF;
-	}
-}
-
-static void nouveau_switcheroo_reprobe(struct pci_dev *pdev)
-{
-	struct drm_device *dev = pci_get_drvdata(pdev);
-	nouveau_fbcon_output_poll_changed(dev);
-}
-
-static bool nouveau_switcheroo_can_switch(struct pci_dev *pdev)
-{
-	struct drm_device *dev = pci_get_drvdata(pdev);
-	bool can_switch;
-
-	spin_lock(&dev->count_lock);
-	can_switch = (dev->open_count == 0);
-	spin_unlock(&dev->count_lock);
-	return can_switch;
-}
-
-static void
-nouveau_card_channel_fini(struct drm_device *dev)
-{
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-
-	if (dev_priv->channel)
-		nouveau_channel_put_unlocked(&dev_priv->channel);
-}
-
-static int
-nouveau_card_channel_init(struct drm_device *dev)
-{
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct nouveau_channel *chan;
-	int ret;
-
-	ret = nouveau_channel_alloc(dev, &chan, NULL, NvDmaFB, NvDmaTT);
-	dev_priv->channel = chan;
-	if (ret)
-		return ret;
-	mutex_unlock(&dev_priv->channel->mutex);
-
-	nouveau_bo_move_init(chan);
-	return 0;
-}
-
-static const struct vga_switcheroo_client_ops nouveau_switcheroo_ops = {
-	.set_gpu_state = nouveau_switcheroo_set_state,
-	.reprobe = nouveau_switcheroo_reprobe,
-	.can_switch = nouveau_switcheroo_can_switch,
-};
-
-int
-nouveau_card_init(struct drm_device *dev)
-{
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct nouveau_engine *engine;
-	int ret, e = 0;
-
-	vga_client_register(dev->pdev, dev, NULL, nouveau_vga_set_decode);
-	vga_switcheroo_register_client(dev->pdev, &nouveau_switcheroo_ops);
-
-	/* Initialise internal driver API hooks */
-	ret = nouveau_init_engine_ptrs(dev);
-	if (ret)
-		goto out;
-	engine = &dev_priv->engine;
-	spin_lock_init(&dev_priv->channels.lock);
-	spin_lock_init(&dev_priv->tile.lock);
-	spin_lock_init(&dev_priv->context_switch_lock);
-	spin_lock_init(&dev_priv->vm_lock);
-
-	/* Make the CRTCs and I2C buses accessible */
-	ret = engine->display.early_init(dev);
-	if (ret)
-		goto out;
-
-	/* Parse BIOS tables / Run init tables if card not POSTed */
-	ret = nouveau_bios_init(dev);
-	if (ret)
-		goto out_display_early;
-
-	/* workaround an odd issue on nvc1 by disabling the device's
-	 * nosnoop capability.  hopefully won't cause issues until a
-	 * better fix is found - assuming there is one...
-	 */
-	if (dev_priv->chipset == 0xc1) {
-		nv_mask(dev, 0x00088080, 0x00000800, 0x00000000);
-	}
-
-	/* PMC */
-	ret = engine->mc.init(dev);
-	if (ret)
-		goto out_bios;
-
-	/* PTIMER */
-	ret = engine->timer.init(dev);
-	if (ret)
-		goto out_mc;
-
-	/* PFB */
-	ret = engine->fb.init(dev);
-	if (ret)
-		goto out_timer;
-
-	ret = engine->vram.init(dev);
-	if (ret)
-		goto out_fb;
-
-	/* PGPIO */
-	ret = nouveau_gpio_create(dev);
-	if (ret)
-		goto out_vram;
-
-	ret = nouveau_gpuobj_init(dev);
-	if (ret)
-		goto out_gpio;
-
-	ret = engine->instmem.init(dev);
-	if (ret)
-		goto out_gpuobj;
-
-	ret = nouveau_mem_vram_init(dev);
-	if (ret)
-		goto out_instmem;
-
-	ret = nouveau_mem_gart_init(dev);
-	if (ret)
-		goto out_ttmvram;
-
-	if (!dev_priv->noaccel) {
-		switch (dev_priv->card_type) {
-		case NV_04:
-			nv04_fifo_create(dev);
-			break;
-		case NV_10:
-		case NV_20:
-		case NV_30:
-			if (dev_priv->chipset < 0x17)
-				nv10_fifo_create(dev);
-			else
-				nv17_fifo_create(dev);
-			break;
-		case NV_40:
-			nv40_fifo_create(dev);
-			break;
-		case NV_50:
-			if (dev_priv->chipset == 0x50)
-				nv50_fifo_create(dev);
-			else
-				nv84_fifo_create(dev);
-			break;
-		case NV_C0:
-		case NV_D0:
-			nvc0_fifo_create(dev);
-			break;
-		case NV_E0:
-			nve0_fifo_create(dev);
-			break;
-		default:
-			break;
-		}
-
-		switch (dev_priv->card_type) {
-		case NV_04:
-			nv04_fence_create(dev);
-			break;
-		case NV_10:
-		case NV_20:
-		case NV_30:
-		case NV_40:
-		case NV_50:
-			if (dev_priv->chipset < 0x84)
-				nv10_fence_create(dev);
-			else
-				nv84_fence_create(dev);
-			break;
-		case NV_C0:
-		case NV_D0:
-		case NV_E0:
-			nvc0_fence_create(dev);
-			break;
-		default:
-			break;
-		}
-
-		switch (dev_priv->card_type) {
-		case NV_04:
-		case NV_10:
-		case NV_20:
-		case NV_30:
-		case NV_40:
-			nv04_software_create(dev);
-			break;
-		case NV_50:
-			nv50_software_create(dev);
-			break;
-		case NV_C0:
-		case NV_D0:
-		case NV_E0:
-			nvc0_software_create(dev);
-			break;
-		default:
-			break;
-		}
-
-		switch (dev_priv->card_type) {
-		case NV_04:
-			nv04_graph_create(dev);
-			break;
-		case NV_10:
-			nv10_graph_create(dev);
-			break;
-		case NV_20:
-		case NV_30:
-			nv20_graph_create(dev);
-			break;
-		case NV_40:
-			nv40_graph_create(dev);
-			break;
-		case NV_50:
-			nv50_graph_create(dev);
-			break;
-		case NV_C0:
-		case NV_D0:
-			nvc0_graph_create(dev);
-			break;
-		case NV_E0:
-			nve0_graph_create(dev);
-			break;
-		default:
-			break;
-		}
-
-		switch (dev_priv->chipset) {
-		case 0x84:
-		case 0x86:
-		case 0x92:
-		case 0x94:
-		case 0x96:
-		case 0xa0:
-			nv84_crypt_create(dev);
-			break;
-		case 0x98:
-		case 0xaa:
-		case 0xac:
-			nv98_crypt_create(dev);
-			break;
-		}
-
-		switch (dev_priv->card_type) {
-		case NV_50:
-			switch (dev_priv->chipset) {
-			case 0xa3:
-			case 0xa5:
-			case 0xa8:
-				nva3_copy_create(dev);
-				break;
-			}
-			break;
-		case NV_C0:
-			if (!(nv_rd32(dev, 0x022500) & 0x00000200))
-				nvc0_copy_create(dev, 1);
-		case NV_D0:
-			if (!(nv_rd32(dev, 0x022500) & 0x00000100))
-				nvc0_copy_create(dev, 0);
-			break;
-		default:
-			break;
-		}
-
-		if (dev_priv->chipset >= 0xa3 || dev_priv->chipset == 0x98) {
-			nv84_bsp_create(dev);
-			nv84_vp_create(dev);
-			nv98_ppp_create(dev);
-		} else
-		if (dev_priv->chipset >= 0x84) {
-			nv50_mpeg_create(dev);
-			nv84_bsp_create(dev);
-			nv84_vp_create(dev);
-		} else
-		if (dev_priv->chipset >= 0x50) {
-			nv50_mpeg_create(dev);
-		} else
-		if (dev_priv->card_type == NV_40 ||
-		    dev_priv->chipset == 0x31 ||
-		    dev_priv->chipset == 0x34 ||
-		    dev_priv->chipset == 0x36) {
-			nv31_mpeg_create(dev);
-		}
-
-		for (e = 0; e < NVOBJ_ENGINE_NR; e++) {
-			if (dev_priv->eng[e]) {
-				ret = dev_priv->eng[e]->init(dev, e);
-				if (ret)
-					goto out_engine;
-			}
-		}
-	}
-
-	ret = nouveau_irq_init(dev);
-	if (ret)
-		goto out_engine;
-
-	ret = nouveau_display_create(dev);
-	if (ret)
-		goto out_irq;
-
-	nouveau_backlight_init(dev);
-	nouveau_pm_init(dev);
-
-	if (dev_priv->eng[NVOBJ_ENGINE_GR]) {
-		ret = nouveau_card_channel_init(dev);
-		if (ret)
-			goto out_pm;
-	}
-
-	if (dev->mode_config.num_crtc) {
-		ret = nouveau_display_init(dev);
-		if (ret)
-			goto out_chan;
-
-		nouveau_fbcon_init(dev);
-	}
-
-	return 0;
-
-out_chan:
-	nouveau_card_channel_fini(dev);
-out_pm:
-	nouveau_pm_fini(dev);
-	nouveau_backlight_exit(dev);
-	nouveau_display_destroy(dev);
-out_irq:
-	nouveau_irq_fini(dev);
-out_engine:
-	if (!dev_priv->noaccel) {
-		for (e = e - 1; e >= 0; e--) {
-			if (!dev_priv->eng[e])
-				continue;
-			dev_priv->eng[e]->fini(dev, e, false);
-			dev_priv->eng[e]->destroy(dev,e );
-		}
-	}
-	nouveau_mem_gart_fini(dev);
-out_ttmvram:
-	nouveau_mem_vram_fini(dev);
-out_instmem:
-	engine->instmem.takedown(dev);
-out_gpuobj:
-	nouveau_gpuobj_takedown(dev);
-out_gpio:
-	nouveau_gpio_destroy(dev);
-out_vram:
-	engine->vram.takedown(dev);
-out_fb:
-	engine->fb.takedown(dev);
-out_timer:
-	engine->timer.takedown(dev);
-out_mc:
-	engine->mc.takedown(dev);
-out_bios:
-	nouveau_bios_takedown(dev);
-out_display_early:
-	engine->display.late_takedown(dev);
-out:
-	vga_switcheroo_unregister_client(dev->pdev);
-	vga_client_register(dev->pdev, NULL, NULL, NULL);
-	return ret;
-}
-
-static void nouveau_card_takedown(struct drm_device *dev)
-{
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct nouveau_engine *engine = &dev_priv->engine;
-	int e;
-
-	if (dev->mode_config.num_crtc) {
-		nouveau_fbcon_fini(dev);
-		nouveau_display_fini(dev);
-	}
-
-	nouveau_card_channel_fini(dev);
-	nouveau_pm_fini(dev);
-	nouveau_backlight_exit(dev);
-	nouveau_display_destroy(dev);
-
-	if (!dev_priv->noaccel) {
-		for (e = NVOBJ_ENGINE_NR - 1; e >= 0; e--) {
-			if (dev_priv->eng[e]) {
-				dev_priv->eng[e]->fini(dev, e, false);
-				dev_priv->eng[e]->destroy(dev,e );
-			}
-		}
-	}
-
-	if (dev_priv->vga_ram) {
-		nouveau_bo_unpin(dev_priv->vga_ram);
-		nouveau_bo_ref(NULL, &dev_priv->vga_ram);
-	}
-
-	mutex_lock(&dev->struct_mutex);
-	ttm_bo_clean_mm(&dev_priv->ttm.bdev, TTM_PL_VRAM);
-	ttm_bo_clean_mm(&dev_priv->ttm.bdev, TTM_PL_TT);
-	mutex_unlock(&dev->struct_mutex);
-	nouveau_mem_gart_fini(dev);
-	nouveau_mem_vram_fini(dev);
-
-	engine->instmem.takedown(dev);
-	nouveau_gpuobj_takedown(dev);
-
-	nouveau_gpio_destroy(dev);
-	engine->vram.takedown(dev);
-	engine->fb.takedown(dev);
-	engine->timer.takedown(dev);
-	engine->mc.takedown(dev);
-
-	nouveau_bios_takedown(dev);
-	engine->display.late_takedown(dev);
-
-	nouveau_irq_fini(dev);
-
-	vga_switcheroo_unregister_client(dev->pdev);
-	vga_client_register(dev->pdev, NULL, NULL, NULL);
-}
-
-int
-nouveau_open(struct drm_device *dev, struct drm_file *file_priv)
-{
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct nouveau_fpriv *fpriv;
-	int ret;
-
-	fpriv = kzalloc(sizeof(*fpriv), GFP_KERNEL);
-	if (unlikely(!fpriv))
-		return -ENOMEM;
-
-	spin_lock_init(&fpriv->lock);
-	INIT_LIST_HEAD(&fpriv->channels);
-
-	if (dev_priv->card_type == NV_50) {
-		ret = nouveau_vm_new(dev, 0, (1ULL << 40), 0x0020000000ULL,
-				     &fpriv->vm);
-		if (ret) {
-			kfree(fpriv);
-			return ret;
-		}
-	} else
-	if (dev_priv->card_type >= NV_C0) {
-		ret = nouveau_vm_new(dev, 0, (1ULL << 40), 0x0008000000ULL,
-				     &fpriv->vm);
-		if (ret) {
-			kfree(fpriv);
-			return ret;
-		}
-	}
-
-	file_priv->driver_priv = fpriv;
-	return 0;
-}
-
-/* here a client dies, release the stuff that was allocated for its
- * file_priv */
-void nouveau_preclose(struct drm_device *dev, struct drm_file *file_priv)
-{
-	nouveau_channel_cleanup(dev, file_priv);
-}
-
-void
-nouveau_postclose(struct drm_device *dev, struct drm_file *file_priv)
-{
-	struct nouveau_fpriv *fpriv = nouveau_fpriv(file_priv);
-	nouveau_vm_ref(NULL, &fpriv->vm, NULL);
-	kfree(fpriv);
-}
-
-/* first module load, setup the mmio/fb mapping */
-/* KMS: we need mmio at load time, not when the first drm client opens. */
-int nouveau_firstopen(struct drm_device *dev)
-{
-	return 0;
-}
-
-/* if we have an OF card, copy vbios to RAMIN */
-static void nouveau_OF_copy_vbios_to_ramin(struct drm_device *dev)
-{
-#if defined(__powerpc__)
-	int size, i;
-	const uint32_t *bios;
-	struct device_node *dn = pci_device_to_OF_node(dev->pdev);
-	if (!dn) {
-		NV_INFO(dev, "Unable to get the OF node\n");
-		return;
-	}
-
-	bios = of_get_property(dn, "NVDA,BMP", &size);
-	if (bios) {
-		for (i = 0; i < size; i += 4)
-			nv_wi32(dev, i, bios[i/4]);
-		NV_INFO(dev, "OF bios successfully copied (%d bytes)\n", size);
-	} else {
-		NV_INFO(dev, "Unable to get the OF bios\n");
-	}
-#endif
-}
-
-static struct apertures_struct *nouveau_get_apertures(struct drm_device *dev)
-{
-	struct pci_dev *pdev = dev->pdev;
-	struct apertures_struct *aper = alloc_apertures(3);
-	if (!aper)
-		return NULL;
-
-	aper->ranges[0].base = pci_resource_start(pdev, 1);
-	aper->ranges[0].size = pci_resource_len(pdev, 1);
-	aper->count = 1;
-
-	if (pci_resource_len(pdev, 2)) {
-		aper->ranges[aper->count].base = pci_resource_start(pdev, 2);
-		aper->ranges[aper->count].size = pci_resource_len(pdev, 2);
-		aper->count++;
-	}
-
-	if (pci_resource_len(pdev, 3)) {
-		aper->ranges[aper->count].base = pci_resource_start(pdev, 3);
-		aper->ranges[aper->count].size = pci_resource_len(pdev, 3);
-		aper->count++;
-	}
-
-	return aper;
-}
-
-static int nouveau_remove_conflicting_drivers(struct drm_device *dev)
-{
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	bool primary = false;
-	dev_priv->apertures = nouveau_get_apertures(dev);
-	if (!dev_priv->apertures)
-		return -ENOMEM;
-
-#ifdef CONFIG_X86
-	primary = dev->pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW;
-#endif
-
-	remove_conflicting_framebuffers(dev_priv->apertures, "nouveaufb", primary);
-	return 0;
-}
-
-int nouveau_load(struct drm_device *dev, unsigned long flags)
-{
-	struct drm_nouveau_private *dev_priv;
-	unsigned long long offset, length;
-	uint32_t reg0 = ~0, strap;
-	int ret;
-
-	dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL);
-	if (!dev_priv) {
-		ret = -ENOMEM;
-		goto err_out;
-	}
-	dev->dev_private = dev_priv;
-	dev_priv->dev = dev;
-
-	pci_set_master(dev->pdev);
-
-	dev_priv->flags = flags & NOUVEAU_FLAGS;
-
-	NV_DEBUG(dev, "vendor: 0x%X device: 0x%X class: 0x%X\n",
-		 dev->pci_vendor, dev->pci_device, dev->pdev->class);
-
-	/* first up, map the start of mmio and determine the chipset */
-	dev_priv->mmio = ioremap(pci_resource_start(dev->pdev, 0), PAGE_SIZE);
-	if (dev_priv->mmio) {
-#ifdef __BIG_ENDIAN
-		/* put the card into big-endian mode if it's not */
-		if (nv_rd32(dev, NV03_PMC_BOOT_1) != 0x01000001)
-			nv_wr32(dev, NV03_PMC_BOOT_1, 0x01000001);
-		DRM_MEMORYBARRIER();
-#endif
-
-		/* determine chipset and derive architecture from it */
-		reg0 = nv_rd32(dev, NV03_PMC_BOOT_0);
-		if ((reg0 & 0x0f000000) > 0) {
-			dev_priv->chipset = (reg0 & 0xff00000) >> 20;
-			switch (dev_priv->chipset & 0xf0) {
-			case 0x10:
-			case 0x20:
-			case 0x30:
-				dev_priv->card_type = dev_priv->chipset & 0xf0;
-				break;
-			case 0x40:
-			case 0x60:
-				dev_priv->card_type = NV_40;
-				break;
-			case 0x50:
-			case 0x80:
-			case 0x90:
-			case 0xa0:
-				dev_priv->card_type = NV_50;
-				break;
-			case 0xc0:
-				dev_priv->card_type = NV_C0;
-				break;
-			case 0xd0:
-				dev_priv->card_type = NV_D0;
-				break;
-			case 0xe0:
-				dev_priv->card_type = NV_E0;
-				break;
-			default:
-				break;
-			}
-		} else
-		if ((reg0 & 0xff00fff0) == 0x20004000) {
-			if (reg0 & 0x00f00000)
-				dev_priv->chipset = 0x05;
-			else
-				dev_priv->chipset = 0x04;
-			dev_priv->card_type = NV_04;
-		}
-
-		iounmap(dev_priv->mmio);
-	}
-
-	if (!dev_priv->card_type) {
-		NV_ERROR(dev, "unsupported chipset 0x%08x\n", reg0);
-		ret = -EINVAL;
-		goto err_priv;
-	}
-
-	NV_INFO(dev, "Detected an NV%02x generation card (0x%08x)\n",
-		     dev_priv->card_type, reg0);
-
-	/* map the mmio regs, limiting the amount to preserve vmap space */
-	offset = pci_resource_start(dev->pdev, 0);
-	length = pci_resource_len(dev->pdev, 0);
-	if (dev_priv->card_type < NV_E0)
-		length = min(length, (unsigned long long)0x00800000);
-
-	dev_priv->mmio = ioremap(offset, length);
-	if (!dev_priv->mmio) {
-		NV_ERROR(dev, "Unable to initialize the mmio mapping. "
-			 "Please report your setup to " DRIVER_EMAIL "\n");
-		ret = -EINVAL;
-		goto err_priv;
-	}
-	NV_DEBUG(dev, "regs mapped ok at 0x%llx\n", offset);
-
-	/* determine frequency of timing crystal */
-	strap = nv_rd32(dev, 0x101000);
-	if ( dev_priv->chipset < 0x17 ||
-	    (dev_priv->chipset >= 0x20 && dev_priv->chipset <= 0x25))
-		strap &= 0x00000040;
-	else
-		strap &= 0x00400040;
-
-	switch (strap) {
-	case 0x00000000: dev_priv->crystal = 13500; break;
-	case 0x00000040: dev_priv->crystal = 14318; break;
-	case 0x00400000: dev_priv->crystal = 27000; break;
-	case 0x00400040: dev_priv->crystal = 25000; break;
-	}
-
-	NV_DEBUG(dev, "crystal freq: %dKHz\n", dev_priv->crystal);
-
-	/* Determine whether we'll attempt acceleration or not, some
-	 * cards are disabled by default here due to them being known
-	 * non-functional, or never been tested due to lack of hw.
-	 */
-	dev_priv->noaccel = !!nouveau_noaccel;
-	if (nouveau_noaccel == -1) {
-		switch (dev_priv->chipset) {
-		case 0xd9: /* known broken */
-		case 0xe4: /* needs binary driver firmware */
-		case 0xe7: /* needs binary driver firmware */
-			NV_INFO(dev, "acceleration disabled by default, pass "
-				     "noaccel=0 to force enable\n");
-			dev_priv->noaccel = true;
-			break;
-		default:
-			dev_priv->noaccel = false;
-			break;
-		}
-	}
-
-	ret = nouveau_remove_conflicting_drivers(dev);
-	if (ret)
-		goto err_mmio;
-
-	/* Map PRAMIN BAR, or on older cards, the aperture within BAR0 */
-	if (dev_priv->card_type >= NV_40) {
-		int ramin_bar = 2;
-		if (pci_resource_len(dev->pdev, ramin_bar) == 0)
-			ramin_bar = 3;
-
-		dev_priv->ramin_size = pci_resource_len(dev->pdev, ramin_bar);
-		dev_priv->ramin =
-			ioremap(pci_resource_start(dev->pdev, ramin_bar),
-				dev_priv->ramin_size);
-		if (!dev_priv->ramin) {
-			NV_ERROR(dev, "Failed to map PRAMIN BAR\n");
-			ret = -ENOMEM;
-			goto err_mmio;
-		}
-	} else {
-		dev_priv->ramin_size = 1 * 1024 * 1024;
-		dev_priv->ramin = ioremap(offset + NV_RAMIN,
-					  dev_priv->ramin_size);
-		if (!dev_priv->ramin) {
-			NV_ERROR(dev, "Failed to map BAR0 PRAMIN.\n");
-			ret = -ENOMEM;
-			goto err_mmio;
-		}
-	}
-
-	nouveau_OF_copy_vbios_to_ramin(dev);
-
-	/* Special flags */
-	if (dev->pci_device == 0x01a0)
-		dev_priv->flags |= NV_NFORCE;
-	else if (dev->pci_device == 0x01f0)
-		dev_priv->flags |= NV_NFORCE2;
-
-	/* For kernel modesetting, init card now and bring up fbcon */
-	ret = nouveau_card_init(dev);
-	if (ret)
-		goto err_ramin;
-
-	return 0;
-
-err_ramin:
-	iounmap(dev_priv->ramin);
-err_mmio:
-	iounmap(dev_priv->mmio);
-err_priv:
-	kfree(dev_priv);
-	dev->dev_private = NULL;
-err_out:
-	return ret;
-}
-
-void nouveau_lastclose(struct drm_device *dev)
-{
-	vga_switcheroo_process_delayed_switch();
-}
-
-int nouveau_unload(struct drm_device *dev)
-{
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-
-	nouveau_card_takedown(dev);
-
-	iounmap(dev_priv->mmio);
-	iounmap(dev_priv->ramin);
-
-	kfree(dev_priv);
-	dev->dev_private = NULL;
-	return 0;
-}
-
-/* Wait until (value(reg) & mask) == val, up until timeout has hit */
-bool
-nouveau_wait_eq(struct drm_device *dev, uint64_t timeout,
-		uint32_t reg, uint32_t mask, uint32_t val)
-{
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct nouveau_timer_engine *ptimer = &dev_priv->engine.timer;
-	uint64_t start = ptimer->read(dev);
-
-	do {
-		if ((nv_rd32(dev, reg) & mask) == val)
-			return true;
-	} while (ptimer->read(dev) - start < timeout);
-
-	return false;
-}
-
-/* Wait until (value(reg) & mask) != val, up until timeout has hit */
-bool
-nouveau_wait_ne(struct drm_device *dev, uint64_t timeout,
-		uint32_t reg, uint32_t mask, uint32_t val)
-{
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct nouveau_timer_engine *ptimer = &dev_priv->engine.timer;
-	uint64_t start = ptimer->read(dev);
-
-	do {
-		if ((nv_rd32(dev, reg) & mask) != val)
-			return true;
-	} while (ptimer->read(dev) - start < timeout);
-
-	return false;
-}
-
-/* Wait until cond(data) == true, up until timeout has hit */
-bool
-nouveau_wait_cb(struct drm_device *dev, u64 timeout,
-		bool (*cond)(void *), void *data)
-{
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct nouveau_timer_engine *ptimer = &dev_priv->engine.timer;
-	u64 start = ptimer->read(dev);
-
-	do {
-		if (cond(data) == true)
-			return true;
-	} while (ptimer->read(dev) - start < timeout);
-
-	return false;
-}
-
-/* Waits for PGRAPH to go completely idle */
-bool nouveau_wait_for_idle(struct drm_device *dev)
-{
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	uint32_t mask = ~0;
-
-	if (dev_priv->card_type == NV_40)
-		mask &= ~NV40_PGRAPH_STATUS_SYNC_STALL;
-
-	if (!nv_wait(dev, NV04_PGRAPH_STATUS, mask, 0)) {
-		NV_ERROR(dev, "PGRAPH idle timed out with status 0x%08x\n",
-			 nv_rd32(dev, NV04_PGRAPH_STATUS));
-		return false;
-	}
-
-	return true;
-}
-
diff --git a/drivers/gpu/drm/nouveau/nouveau_temp.c b/drivers/gpu/drm/nouveau/nouveau_temp.c
deleted file mode 100644
index 1ad411dcc57a..000000000000
--- a/drivers/gpu/drm/nouveau/nouveau_temp.c
+++ /dev/null
@@ -1,331 +0,0 @@
-/*
- * Copyright 2010 PathScale inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Martin Peres
- */
-
-#include <linux/module.h>
-
-#include <drm/drmP.h>
-
-#include "nouveau_drv.h"
-#include "nouveau_pm.h"
-
-static void
-nouveau_temp_vbios_parse(struct drm_device *dev, u8 *temp)
-{
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
-	struct nouveau_pm_temp_sensor_constants *sensor = &pm->sensor_constants;
-	struct nouveau_pm_threshold_temp *temps = &pm->threshold_temp;
-	int i, headerlen, recordlen, entries;
-
-	if (!temp) {
-		NV_DEBUG(dev, "temperature table pointer invalid\n");
-		return;
-	}
-
-	/* Set the default sensor's contants */
-	sensor->offset_constant = 0;
-	sensor->offset_mult = 0;
-	sensor->offset_div = 1;
-	sensor->slope_mult = 1;
-	sensor->slope_div = 1;
-
-	/* Set the default temperature thresholds */
-	temps->critical = 110;
-	temps->down_clock = 100;
-	temps->fan_boost = 90;
-
-	/* Set the default range for the pwm fan */
-	pm->fan.min_duty = 30;
-	pm->fan.max_duty = 100;
-
-	/* Set the known default values to setup the temperature sensor */
-	if (dev_priv->card_type >= NV_40) {
-		switch (dev_priv->chipset) {
-		case 0x43:
-			sensor->offset_mult = 32060;
-			sensor->offset_div = 1000;
-			sensor->slope_mult = 792;
-			sensor->slope_div = 1000;
-			break;
-
-		case 0x44:
-		case 0x47:
-		case 0x4a:
-			sensor->offset_mult = 27839;
-			sensor->offset_div = 1000;
-			sensor->slope_mult = 780;
-			sensor->slope_div = 1000;
-			break;
-
-		case 0x46:
-			sensor->offset_mult = -24775;
-			sensor->offset_div = 100;
-			sensor->slope_mult = 467;
-			sensor->slope_div = 10000;
-			break;
-
-		case 0x49:
-			sensor->offset_mult = -25051;
-			sensor->offset_div = 100;
-			sensor->slope_mult = 458;
-			sensor->slope_div = 10000;
-			break;
-
-		case 0x4b:
-			sensor->offset_mult = -24088;
-			sensor->offset_div = 100;
-			sensor->slope_mult = 442;
-			sensor->slope_div = 10000;
-			break;
-
-		case 0x50:
-			sensor->offset_mult = -22749;
-			sensor->offset_div = 100;
-			sensor->slope_mult = 431;
-			sensor->slope_div = 10000;
-			break;
-
-		case 0x67:
-			sensor->offset_mult = -26149;
-			sensor->offset_div = 100;
-			sensor->slope_mult = 484;
-			sensor->slope_div = 10000;
-			break;
-		}
-	}
-
-	headerlen = temp[1];
-	recordlen = temp[2];
-	entries = temp[3];
-	temp = temp + headerlen;
-
-	/* Read the entries from the table */
-	for (i = 0; i < entries; i++) {
-		s16 value = ROM16(temp[1]);
-
-		switch (temp[0]) {
-		case 0x01:
-			if ((value & 0x8f) == 0)
-				sensor->offset_constant = (value >> 9) & 0x7f;
-			break;
-
-		case 0x04:
-			if ((value & 0xf00f) == 0xa000) /* core */
-				temps->critical = (value&0x0ff0) >> 4;
-			break;
-
-		case 0x07:
-			if ((value & 0xf00f) == 0xa000) /* core */
-				temps->down_clock = (value&0x0ff0) >> 4;
-			break;
-
-		case 0x08:
-			if ((value & 0xf00f) == 0xa000) /* core */
-				temps->fan_boost = (value&0x0ff0) >> 4;
-			break;
-
-		case 0x10:
-			sensor->offset_mult = value;
-			break;
-
-		case 0x11:
-			sensor->offset_div = value;
-			break;
-
-		case 0x12:
-			sensor->slope_mult = value;
-			break;
-
-		case 0x13:
-			sensor->slope_div = value;
-			break;
-		case 0x22:
-			pm->fan.min_duty = value & 0xff;
-			pm->fan.max_duty = (value & 0xff00) >> 8;
-			break;
-		case 0x26:
-			pm->fan.pwm_freq = value;
-			break;
-		}
-		temp += recordlen;
-	}
-
-	nouveau_temp_safety_checks(dev);
-
-	/* check the fan min/max settings */
-	if (pm->fan.min_duty < 10)
-		pm->fan.min_duty = 10;
-	if (pm->fan.max_duty > 100)
-		pm->fan.max_duty = 100;
-	if (pm->fan.max_duty < pm->fan.min_duty)
-		pm->fan.max_duty = pm->fan.min_duty;
-}
-
-static int
-nv40_sensor_setup(struct drm_device *dev)
-{
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
-	struct nouveau_pm_temp_sensor_constants *sensor = &pm->sensor_constants;
-	s32 offset = sensor->offset_mult / sensor->offset_div;
-	s32 sensor_calibration;
-
-	/* set up the sensors */
-	sensor_calibration = 120 - offset - sensor->offset_constant;
-	sensor_calibration = sensor_calibration * sensor->slope_div /
-				sensor->slope_mult;
-
-	if (dev_priv->chipset >= 0x46)
-		sensor_calibration |= 0x80000000;
-	else
-		sensor_calibration |= 0x10000000;
-
-	nv_wr32(dev, 0x0015b0, sensor_calibration);
-
-	/* Wait for the sensor to update */
-	msleep(5);
-
-	/* read */
-	return nv_rd32(dev, 0x0015b4) & 0x1fff;
-}
-
-int
-nv40_temp_get(struct drm_device *dev)
-{
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
-	struct nouveau_pm_temp_sensor_constants *sensor = &pm->sensor_constants;
-	int offset = sensor->offset_mult / sensor->offset_div;
-	int core_temp;
-
-	if (dev_priv->card_type >= NV_50) {
-		core_temp = nv_rd32(dev, 0x20008);
-	} else {
-		core_temp = nv_rd32(dev, 0x0015b4) & 0x1fff;
-		/* Setup the sensor if the temperature is 0 */
-		if (core_temp == 0)
-			core_temp = nv40_sensor_setup(dev);
-	}
-
-	core_temp = core_temp * sensor->slope_mult / sensor->slope_div;
-	core_temp = core_temp + offset + sensor->offset_constant;
-
-	return core_temp;
-}
-
-int
-nv84_temp_get(struct drm_device *dev)
-{
-	return nv_rd32(dev, 0x20400);
-}
-
-void
-nouveau_temp_safety_checks(struct drm_device *dev)
-{
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
-	struct nouveau_pm_threshold_temp *temps = &pm->threshold_temp;
-
-	if (temps->critical > 120)
-		temps->critical = 120;
-	else if (temps->critical < 80)
-		temps->critical = 80;
-
-	if (temps->down_clock > 110)
-		temps->down_clock = 110;
-	else if (temps->down_clock < 60)
-		temps->down_clock = 60;
-
-	if (temps->fan_boost > 100)
-		temps->fan_boost = 100;
-	else if (temps->fan_boost < 40)
-		temps->fan_boost = 40;
-}
-
-static bool
-probe_monitoring_device(struct nouveau_i2c_chan *i2c,
-			struct i2c_board_info *info)
-{
-	struct i2c_client *client;
-
-	request_module("%s%s", I2C_MODULE_PREFIX, info->type);
-
-	client = i2c_new_device(&i2c->adapter, info);
-	if (!client)
-		return false;
-
-	if (!client->driver || client->driver->detect(client, info)) {
-		i2c_unregister_device(client);
-		return false;
-	}
-
-	return true;
-}
-
-static void
-nouveau_temp_probe_i2c(struct drm_device *dev)
-{
-	struct i2c_board_info info[] = {
-		{ I2C_BOARD_INFO("w83l785ts", 0x2d) },
-		{ I2C_BOARD_INFO("w83781d", 0x2d) },
-		{ I2C_BOARD_INFO("adt7473", 0x2e) },
-		{ I2C_BOARD_INFO("f75375", 0x2e) },
-		{ I2C_BOARD_INFO("lm99", 0x4c) },
-		{ }
-	};
-
-	nouveau_i2c_identify(dev, "monitoring device", info,
-			     probe_monitoring_device, NV_I2C_DEFAULT(0));
-}
-
-void
-nouveau_temp_init(struct drm_device *dev)
-{
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct nvbios *bios = &dev_priv->vbios;
-	struct bit_entry P;
-	u8 *temp = NULL;
-
-	if (bios->type == NVBIOS_BIT) {
-		if (bit_table(dev, 'P', &P))
-			return;
-
-		if (P.version == 1)
-			temp = ROMPTR(dev, P.data[12]);
-		else if (P.version == 2)
-			temp = ROMPTR(dev, P.data[16]);
-		else
-			NV_WARN(dev, "unknown temp for BIT P %d\n", P.version);
-
-		nouveau_temp_vbios_parse(dev, temp);
-	}
-
-	nouveau_temp_probe_i2c(dev);
-}
-
-void
-nouveau_temp_fini(struct drm_device *dev)
-{
-
-}
diff --git a/drivers/gpu/drm/nouveau/nouveau_ttm.c b/drivers/gpu/drm/nouveau/nouveau_ttm.c
index 48de8dd69583..9be9cb58e19b 100644
--- a/drivers/gpu/drm/nouveau/nouveau_ttm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_ttm.c
@@ -24,21 +24,253 @@
  * USE OR OTHER DEALINGS IN THE SOFTWARE.
  */
 
-#include <drm/drmP.h>
+#include <subdev/fb.h>
+#include <subdev/vm.h>
+#include <subdev/instmem.h>
 
-#include "nouveau_drv.h"
+#include "nouveau_drm.h"
+#include "nouveau_ttm.h"
+#include "nouveau_gem.h"
+
+static int
+nouveau_vram_manager_init(struct ttm_mem_type_manager *man, unsigned long psize)
+{
+	/* nothing to do */
+	return 0;
+}
+
+static int
+nouveau_vram_manager_fini(struct ttm_mem_type_manager *man)
+{
+	/* nothing to do */
+	return 0;
+}
+
+static inline void
+nouveau_mem_node_cleanup(struct nouveau_mem *node)
+{
+	if (node->vma[0].node) {
+		nouveau_vm_unmap(&node->vma[0]);
+		nouveau_vm_put(&node->vma[0]);
+	}
+
+	if (node->vma[1].node) {
+		nouveau_vm_unmap(&node->vma[1]);
+		nouveau_vm_put(&node->vma[1]);
+	}
+}
+
+static void
+nouveau_vram_manager_del(struct ttm_mem_type_manager *man,
+			 struct ttm_mem_reg *mem)
+{
+	struct nouveau_drm *drm = nouveau_bdev(man->bdev);
+	struct nouveau_fb *pfb = nouveau_fb(drm->device);
+	nouveau_mem_node_cleanup(mem->mm_node);
+	pfb->ram.put(pfb, (struct nouveau_mem **)&mem->mm_node);
+}
+
+static int
+nouveau_vram_manager_new(struct ttm_mem_type_manager *man,
+			 struct ttm_buffer_object *bo,
+			 struct ttm_placement *placement,
+			 struct ttm_mem_reg *mem)
+{
+	struct nouveau_drm *drm = nouveau_bdev(man->bdev);
+	struct nouveau_fb *pfb = nouveau_fb(drm->device);
+	struct nouveau_bo *nvbo = nouveau_bo(bo);
+	struct nouveau_mem *node;
+	u32 size_nc = 0;
+	int ret;
+
+	if (nvbo->tile_flags & NOUVEAU_GEM_TILE_NONCONTIG)
+		size_nc = 1 << nvbo->page_shift;
+
+	ret = pfb->ram.get(pfb, mem->num_pages << PAGE_SHIFT,
+			   mem->page_alignment << PAGE_SHIFT, size_nc,
+			   (nvbo->tile_flags >> 8) & 0x3ff, &node);
+	if (ret) {
+		mem->mm_node = NULL;
+		return (ret == -ENOSPC) ? 0 : ret;
+	}
+
+	node->page_shift = nvbo->page_shift;
+
+	mem->mm_node = node;
+	mem->start   = node->offset >> PAGE_SHIFT;
+	return 0;
+}
+
+static void
+nouveau_vram_manager_debug(struct ttm_mem_type_manager *man, const char *prefix)
+{
+	struct nouveau_mm *mm = man->priv;
+	struct nouveau_mm_node *r;
+	u32 total = 0, free = 0;
+
+	mutex_lock(&mm->mutex);
+	list_for_each_entry(r, &mm->nodes, nl_entry) {
+		printk(KERN_DEBUG "%s %d: 0x%010llx 0x%010llx\n",
+		       prefix, r->type, ((u64)r->offset << 12),
+		       (((u64)r->offset + r->length) << 12));
+
+		total += r->length;
+		if (!r->type)
+			free += r->length;
+	}
+	mutex_unlock(&mm->mutex);
+
+	printk(KERN_DEBUG "%s  total: 0x%010llx free: 0x%010llx\n",
+	       prefix, (u64)total << 12, (u64)free << 12);
+	printk(KERN_DEBUG "%s  block: 0x%08x\n",
+	       prefix, mm->block_size << 12);
+}
+
+const struct ttm_mem_type_manager_func nouveau_vram_manager = {
+	nouveau_vram_manager_init,
+	nouveau_vram_manager_fini,
+	nouveau_vram_manager_new,
+	nouveau_vram_manager_del,
+	nouveau_vram_manager_debug
+};
+
+static int
+nouveau_gart_manager_init(struct ttm_mem_type_manager *man, unsigned long psize)
+{
+	return 0;
+}
+
+static int
+nouveau_gart_manager_fini(struct ttm_mem_type_manager *man)
+{
+	return 0;
+}
+
+static void
+nouveau_gart_manager_del(struct ttm_mem_type_manager *man,
+			 struct ttm_mem_reg *mem)
+{
+	nouveau_mem_node_cleanup(mem->mm_node);
+	kfree(mem->mm_node);
+	mem->mm_node = NULL;
+}
+
+static int
+nouveau_gart_manager_new(struct ttm_mem_type_manager *man,
+			 struct ttm_buffer_object *bo,
+			 struct ttm_placement *placement,
+			 struct ttm_mem_reg *mem)
+{
+	struct nouveau_mem *node;
+
+	if (unlikely((mem->num_pages << PAGE_SHIFT) >= 512 * 1024 * 1024))
+		return -ENOMEM;
+
+	node = kzalloc(sizeof(*node), GFP_KERNEL);
+	if (!node)
+		return -ENOMEM;
+	node->page_shift = 12;
+
+	mem->mm_node = node;
+	mem->start   = 0;
+	return 0;
+}
+
+static void
+nouveau_gart_manager_debug(struct ttm_mem_type_manager *man, const char *prefix)
+{
+}
+
+const struct ttm_mem_type_manager_func nouveau_gart_manager = {
+	nouveau_gart_manager_init,
+	nouveau_gart_manager_fini,
+	nouveau_gart_manager_new,
+	nouveau_gart_manager_del,
+	nouveau_gart_manager_debug
+};
+
+#include <core/subdev/vm/nv04.h>
+static int
+nv04_gart_manager_init(struct ttm_mem_type_manager *man, unsigned long psize)
+{
+	struct nouveau_drm *drm = nouveau_bdev(man->bdev);
+	struct nouveau_vmmgr *vmm = nouveau_vmmgr(drm->device);
+	struct nv04_vmmgr_priv *priv = (void *)vmm;
+	struct nouveau_vm *vm = NULL;
+	nouveau_vm_ref(priv->vm, &vm, NULL);
+	man->priv = vm;
+	return 0;
+}
+
+static int
+nv04_gart_manager_fini(struct ttm_mem_type_manager *man)
+{
+	struct nouveau_vm *vm = man->priv;
+	nouveau_vm_ref(NULL, &vm, NULL);
+	man->priv = NULL;
+	return 0;
+}
+
+static void
+nv04_gart_manager_del(struct ttm_mem_type_manager *man, struct ttm_mem_reg *mem)
+{
+	struct nouveau_mem *node = mem->mm_node;
+	if (node->vma[0].node)
+		nouveau_vm_put(&node->vma[0]);
+	kfree(mem->mm_node);
+	mem->mm_node = NULL;
+}
+
+static int
+nv04_gart_manager_new(struct ttm_mem_type_manager *man,
+		      struct ttm_buffer_object *bo,
+		      struct ttm_placement *placement,
+		      struct ttm_mem_reg *mem)
+{
+	struct nouveau_mem *node;
+	int ret;
+
+	node = kzalloc(sizeof(*node), GFP_KERNEL);
+	if (!node)
+		return -ENOMEM;
+
+	node->page_shift = 12;
+
+	ret = nouveau_vm_get(man->priv, mem->num_pages << 12, node->page_shift,
+			     NV_MEM_ACCESS_RW, &node->vma[0]);
+	if (ret) {
+		kfree(node);
+		return ret;
+	}
+
+	mem->mm_node = node;
+	mem->start   = node->vma[0].offset >> PAGE_SHIFT;
+	return 0;
+}
+
+static void
+nv04_gart_manager_debug(struct ttm_mem_type_manager *man, const char *prefix)
+{
+}
+
+const struct ttm_mem_type_manager_func nv04_gart_manager = {
+	nv04_gart_manager_init,
+	nv04_gart_manager_fini,
+	nv04_gart_manager_new,
+	nv04_gart_manager_del,
+	nv04_gart_manager_debug
+};
 
 int
 nouveau_ttm_mmap(struct file *filp, struct vm_area_struct *vma)
 {
 	struct drm_file *file_priv = filp->private_data;
-	struct drm_nouveau_private *dev_priv =
-		file_priv->minor->dev->dev_private;
+	struct nouveau_drm *drm = nouveau_drm(file_priv->minor->dev);
 
 	if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET))
 		return drm_mmap(filp, vma);
 
-	return ttm_bo_mmap(filp, vma, &dev_priv->ttm.bdev);
+	return ttm_bo_mmap(filp, vma, &drm->ttm.bdev);
 }
 
 static int
@@ -54,12 +286,12 @@ nouveau_ttm_mem_global_release(struct drm_global_reference *ref)
 }
 
 int
-nouveau_ttm_global_init(struct drm_nouveau_private *dev_priv)
+nouveau_ttm_global_init(struct nouveau_drm *drm)
 {
 	struct drm_global_reference *global_ref;
 	int ret;
 
-	global_ref = &dev_priv->ttm.mem_global_ref;
+	global_ref = &drm->ttm.mem_global_ref;
 	global_ref->global_type = DRM_GLOBAL_TTM_MEM;
 	global_ref->size = sizeof(struct ttm_mem_global);
 	global_ref->init = &nouveau_ttm_mem_global_init;
@@ -68,12 +300,12 @@ nouveau_ttm_global_init(struct drm_nouveau_private *dev_priv)
 	ret = drm_global_item_ref(global_ref);
 	if (unlikely(ret != 0)) {
 		DRM_ERROR("Failed setting up TTM memory accounting\n");
-		dev_priv->ttm.mem_global_ref.release = NULL;
+		drm->ttm.mem_global_ref.release = NULL;
 		return ret;
 	}
 
-	dev_priv->ttm.bo_global_ref.mem_glob = global_ref->object;
-	global_ref = &dev_priv->ttm.bo_global_ref.ref;
+	drm->ttm.bo_global_ref.mem_glob = global_ref->object;
+	global_ref = &drm->ttm.bo_global_ref.ref;
 	global_ref->global_type = DRM_GLOBAL_TTM_BO;
 	global_ref->size = sizeof(struct ttm_bo_global);
 	global_ref->init = &ttm_bo_global_init;
@@ -82,8 +314,8 @@ nouveau_ttm_global_init(struct drm_nouveau_private *dev_priv)
 	ret = drm_global_item_ref(global_ref);
 	if (unlikely(ret != 0)) {
 		DRM_ERROR("Failed setting up TTM BO subsystem\n");
-		drm_global_item_unref(&dev_priv->ttm.mem_global_ref);
-		dev_priv->ttm.mem_global_ref.release = NULL;
+		drm_global_item_unref(&drm->ttm.mem_global_ref);
+		drm->ttm.mem_global_ref.release = NULL;
 		return ret;
 	}
 
@@ -91,13 +323,101 @@ nouveau_ttm_global_init(struct drm_nouveau_private *dev_priv)
 }
 
 void
-nouveau_ttm_global_release(struct drm_nouveau_private *dev_priv)
+nouveau_ttm_global_release(struct nouveau_drm *drm)
 {
-	if (dev_priv->ttm.mem_global_ref.release == NULL)
+	if (drm->ttm.mem_global_ref.release == NULL)
 		return;
 
-	drm_global_item_unref(&dev_priv->ttm.bo_global_ref.ref);
-	drm_global_item_unref(&dev_priv->ttm.mem_global_ref);
-	dev_priv->ttm.mem_global_ref.release = NULL;
+	drm_global_item_unref(&drm->ttm.bo_global_ref.ref);
+	drm_global_item_unref(&drm->ttm.mem_global_ref);
+	drm->ttm.mem_global_ref.release = NULL;
 }
 
+int
+nouveau_ttm_init(struct nouveau_drm *drm)
+{
+	struct drm_device *dev = drm->dev;
+	u32 bits;
+	int ret;
+
+	bits = nouveau_vmmgr(drm->device)->dma_bits;
+	if ( drm->agp.stat == ENABLED ||
+	    !pci_dma_supported(dev->pdev, DMA_BIT_MASK(bits)))
+		bits = 32;
+
+	ret = pci_set_dma_mask(dev->pdev, DMA_BIT_MASK(bits));
+	if (ret)
+		return ret;
+
+	ret = pci_set_consistent_dma_mask(dev->pdev, DMA_BIT_MASK(bits));
+	if (ret)
+		pci_set_consistent_dma_mask(dev->pdev, DMA_BIT_MASK(32));
+
+	ret = nouveau_ttm_global_init(drm);
+	if (ret)
+		return ret;
+
+	ret = ttm_bo_device_init(&drm->ttm.bdev,
+				  drm->ttm.bo_global_ref.ref.object,
+				  &nouveau_bo_driver, DRM_FILE_PAGE_OFFSET,
+				  bits <= 32 ? true : false);
+	if (ret) {
+		NV_ERROR(drm, "error initialising bo driver, %d\n", ret);
+		return ret;
+	}
+
+	/* VRAM init */
+	drm->gem.vram_available  = nouveau_fb(drm->device)->ram.size;
+	drm->gem.vram_available -= nouveau_instmem(drm->device)->reserved;
+
+	ret = ttm_bo_init_mm(&drm->ttm.bdev, TTM_PL_VRAM,
+			      drm->gem.vram_available >> PAGE_SHIFT);
+	if (ret) {
+		NV_ERROR(drm, "VRAM mm init failed, %d\n", ret);
+		return ret;
+	}
+
+	drm->ttm.mtrr = drm_mtrr_add(pci_resource_start(dev->pdev, 1),
+				     pci_resource_len(dev->pdev, 1),
+				     DRM_MTRR_WC);
+
+	/* GART init */
+	if (drm->agp.stat != ENABLED) {
+		drm->gem.gart_available = nouveau_vmmgr(drm->device)->limit;
+		if (drm->gem.gart_available > 512 * 1024 * 1024)
+			drm->gem.gart_available = 512 * 1024 * 1024;
+	} else {
+		drm->gem.gart_available = drm->agp.size;
+	}
+
+	ret = ttm_bo_init_mm(&drm->ttm.bdev, TTM_PL_TT,
+			      drm->gem.gart_available >> PAGE_SHIFT);
+	if (ret) {
+		NV_ERROR(drm, "GART mm init failed, %d\n", ret);
+		return ret;
+	}
+
+	NV_INFO(drm, "VRAM: %d MiB\n", (u32)(drm->gem.vram_available >> 20));
+	NV_INFO(drm, "GART: %d MiB\n", (u32)(drm->gem.gart_available >> 20));
+	return 0;
+}
+
+void
+nouveau_ttm_fini(struct nouveau_drm *drm)
+{
+	mutex_lock(&drm->dev->struct_mutex);
+	ttm_bo_clean_mm(&drm->ttm.bdev, TTM_PL_VRAM);
+	ttm_bo_clean_mm(&drm->ttm.bdev, TTM_PL_TT);
+	mutex_unlock(&drm->dev->struct_mutex);
+
+	ttm_bo_device_release(&drm->ttm.bdev);
+
+	nouveau_ttm_global_release(drm);
+
+	if (drm->ttm.mtrr >= 0) {
+		drm_mtrr_del(drm->ttm.mtrr,
+			     pci_resource_start(drm->dev->pdev, 1),
+			     pci_resource_len(drm->dev->pdev, 1), DRM_MTRR_WC);
+		drm->ttm.mtrr = -1;
+	}
+}
diff --git a/drivers/gpu/drm/nouveau/nouveau_ttm.h b/drivers/gpu/drm/nouveau/nouveau_ttm.h
new file mode 100644
index 000000000000..25b0de413352
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_ttm.h
@@ -0,0 +1,25 @@
+#ifndef __NOUVEAU_TTM_H__
+#define __NOUVEAU_TTM_H__
+
+static inline struct nouveau_drm *
+nouveau_bdev(struct ttm_bo_device *bd)
+{
+	return container_of(bd, struct nouveau_drm, ttm.bdev);
+}
+
+extern const struct ttm_mem_type_manager_func nouveau_vram_manager;
+extern const struct ttm_mem_type_manager_func nouveau_gart_manager;
+extern const struct ttm_mem_type_manager_func nv04_gart_manager;
+
+struct ttm_tt *nouveau_sgdma_create_ttm(struct ttm_bo_device *,
+					unsigned long size, u32 page_flags,
+					struct page *dummy_read_page);
+
+int  nouveau_ttm_init(struct nouveau_drm *drm);
+void nouveau_ttm_fini(struct nouveau_drm *drm);
+int  nouveau_ttm_mmap(struct file *, struct vm_area_struct *);
+
+int  nouveau_ttm_global_init(struct nouveau_drm *);
+void nouveau_ttm_global_release(struct nouveau_drm *);
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_util.h b/drivers/gpu/drm/nouveau/nouveau_util.h
deleted file mode 100644
index b97719fbb739..000000000000
--- a/drivers/gpu/drm/nouveau/nouveau_util.h
+++ /dev/null
@@ -1,49 +0,0 @@
-/*
- * Copyright (C) 2010 Nouveau Project
- *
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining
- * a copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sublicense, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial
- * portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
- * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
- * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
- * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
- * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-
-#ifndef __NOUVEAU_UTIL_H__
-#define __NOUVEAU_UTIL_H__
-
-struct nouveau_bitfield {
-	u32 mask;
-	const char *name;
-};
-
-struct nouveau_enum {
-	u32 value;
-	const char *name;
-	void *data;
-};
-
-void nouveau_bitfield_print(const struct nouveau_bitfield *, u32 value);
-void nouveau_enum_print(const struct nouveau_enum *, u32 value);
-const struct nouveau_enum *
-nouveau_enum_find(const struct nouveau_enum *, u32 value);
-
-int nouveau_ratelimit(void);
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_vga.c b/drivers/gpu/drm/nouveau/nouveau_vga.c
new file mode 100644
index 000000000000..6f0ac64873df
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_vga.c
@@ -0,0 +1,99 @@
+#include <linux/vgaarb.h>
+#include <linux/vga_switcheroo.h>
+
+#include <drm/drmP.h>
+#include <drm/drm_crtc_helper.h>
+
+#include "nouveau_drm.h"
+#include "nouveau_acpi.h"
+#include "nouveau_fbcon.h"
+#include "nouveau_vga.h"
+
+static unsigned int
+nouveau_vga_set_decode(void *priv, bool state)
+{
+	struct nouveau_device *device = nouveau_dev(priv);
+
+	if (device->chipset >= 0x40)
+		nv_wr32(device, 0x088054, state);
+	else
+		nv_wr32(device, 0x001854, state);
+
+	if (state)
+		return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
+		       VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
+	else
+		return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
+}
+
+static void
+nouveau_switcheroo_set_state(struct pci_dev *pdev,
+			     enum vga_switcheroo_state state)
+{
+	struct drm_device *dev = pci_get_drvdata(pdev);
+	pm_message_t pmm = { .event = PM_EVENT_SUSPEND };
+
+	if (state == VGA_SWITCHEROO_ON) {
+		printk(KERN_ERR "VGA switcheroo: switched nouveau on\n");
+		dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
+		nouveau_drm_resume(pdev);
+		drm_kms_helper_poll_enable(dev);
+		dev->switch_power_state = DRM_SWITCH_POWER_ON;
+	} else {
+		printk(KERN_ERR "VGA switcheroo: switched nouveau off\n");
+		dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
+		drm_kms_helper_poll_disable(dev);
+		nouveau_switcheroo_optimus_dsm();
+		nouveau_drm_suspend(pdev, pmm);
+		dev->switch_power_state = DRM_SWITCH_POWER_OFF;
+	}
+}
+
+static void
+nouveau_switcheroo_reprobe(struct pci_dev *pdev)
+{
+	struct drm_device *dev = pci_get_drvdata(pdev);
+	nouveau_fbcon_output_poll_changed(dev);
+}
+
+static bool
+nouveau_switcheroo_can_switch(struct pci_dev *pdev)
+{
+	struct drm_device *dev = pci_get_drvdata(pdev);
+	bool can_switch;
+
+	spin_lock(&dev->count_lock);
+	can_switch = (dev->open_count == 0);
+	spin_unlock(&dev->count_lock);
+	return can_switch;
+}
+
+static const struct vga_switcheroo_client_ops
+nouveau_switcheroo_ops = {
+	.set_gpu_state = nouveau_switcheroo_set_state,
+	.reprobe = nouveau_switcheroo_reprobe,
+	.can_switch = nouveau_switcheroo_can_switch,
+};
+
+void
+nouveau_vga_init(struct nouveau_drm *drm)
+{
+	struct drm_device *dev = drm->dev;
+	vga_client_register(dev->pdev, dev, NULL, nouveau_vga_set_decode);
+	vga_switcheroo_register_client(dev->pdev, &nouveau_switcheroo_ops);
+}
+
+void
+nouveau_vga_fini(struct nouveau_drm *drm)
+{
+	struct drm_device *dev = drm->dev;
+	vga_switcheroo_unregister_client(dev->pdev);
+	vga_client_register(dev->pdev, NULL, NULL, NULL);
+}
+
+
+void
+nouveau_vga_lastclose(struct drm_device *dev)
+{
+	vga_switcheroo_process_delayed_switch();
+}
diff --git a/drivers/gpu/drm/nouveau/nouveau_vga.h b/drivers/gpu/drm/nouveau/nouveau_vga.h
new file mode 100644
index 000000000000..ea3ad6974c65
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_vga.h
@@ -0,0 +1,8 @@
+#ifndef __NOUVEAU_VGA_H__
+#define __NOUVEAU_VGA_H__
+
+void nouveau_vga_init(struct nouveau_drm *);
+void nouveau_vga_fini(struct nouveau_drm *);
+void nouveau_vga_lastclose(struct drm_device *dev);
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_volt.c b/drivers/gpu/drm/nouveau/nouveau_volt.c
index fbc3a1efd501..9976414cbe50 100644
--- a/drivers/gpu/drm/nouveau/nouveau_volt.c
+++ b/drivers/gpu/drm/nouveau/nouveau_volt.c
@@ -24,18 +24,21 @@
 
 #include <drm/drmP.h>
 
-#include "nouveau_drv.h"
+#include "nouveau_drm.h"
 #include "nouveau_pm.h"
-#include "nouveau_gpio.h"
 
-static const enum dcb_gpio_tag vidtag[] = { 0x04, 0x05, 0x06, 0x1a, 0x73 };
+#include <subdev/bios/gpio.h>
+#include <subdev/gpio.h>
+
+static const enum dcb_gpio_func_name vidtag[] = { 0x04, 0x05, 0x06, 0x1a, 0x73 };
 static int nr_vidtag = sizeof(vidtag) / sizeof(vidtag[0]);
 
 int
 nouveau_voltage_gpio_get(struct drm_device *dev)
 {
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct nouveau_pm_voltage *volt = &dev_priv->engine.pm.voltage;
+	struct nouveau_pm_voltage *volt = &nouveau_pm(dev)->voltage;
+	struct nouveau_device *device = nouveau_dev(dev);
+	struct nouveau_gpio *gpio = nouveau_gpio(device);
 	u8 vid = 0;
 	int i;
 
@@ -43,7 +46,7 @@ nouveau_voltage_gpio_get(struct drm_device *dev)
 		if (!(volt->vid_mask & (1 << i)))
 			continue;
 
-		vid |= nouveau_gpio_func_get(dev, vidtag[i]) << i;
+		vid |= gpio->get(gpio, 0, vidtag[i], 0xff) << i;
 	}
 
 	return nouveau_volt_lvl_lookup(dev, vid);
@@ -52,8 +55,9 @@ nouveau_voltage_gpio_get(struct drm_device *dev)
 int
 nouveau_voltage_gpio_set(struct drm_device *dev, int voltage)
 {
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct nouveau_pm_voltage *volt = &dev_priv->engine.pm.voltage;
+	struct nouveau_device *device = nouveau_dev(dev);
+	struct nouveau_gpio *gpio = nouveau_gpio(device);
+	struct nouveau_pm_voltage *volt = &nouveau_pm(dev)->voltage;
 	int vid, i;
 
 	vid = nouveau_volt_vid_lookup(dev, voltage);
@@ -64,7 +68,7 @@ nouveau_voltage_gpio_set(struct drm_device *dev, int voltage)
 		if (!(volt->vid_mask & (1 << i)))
 			continue;
 
-		nouveau_gpio_func_set(dev, vidtag[i], !!(vid & (1 << i)));
+		gpio->set(gpio, 0, vidtag[i], 0xff, !!(vid & (1 << i)));
 	}
 
 	return 0;
@@ -73,8 +77,7 @@ nouveau_voltage_gpio_set(struct drm_device *dev, int voltage)
 int
 nouveau_volt_vid_lookup(struct drm_device *dev, int voltage)
 {
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct nouveau_pm_voltage *volt = &dev_priv->engine.pm.voltage;
+	struct nouveau_pm_voltage *volt = &nouveau_pm(dev)->voltage;
 	int i;
 
 	for (i = 0; i < volt->nr_level; i++) {
@@ -88,8 +91,7 @@ nouveau_volt_vid_lookup(struct drm_device *dev, int voltage)
 int
 nouveau_volt_lvl_lookup(struct drm_device *dev, int vid)
 {
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct nouveau_pm_voltage *volt = &dev_priv->engine.pm.voltage;
+	struct nouveau_pm_voltage *volt = &nouveau_pm(dev)->voltage;
 	int i;
 
 	for (i = 0; i < volt->nr_level; i++) {
@@ -103,10 +105,12 @@ nouveau_volt_lvl_lookup(struct drm_device *dev, int vid)
 void
 nouveau_volt_init(struct drm_device *dev)
 {
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
+	struct nouveau_drm *drm = nouveau_drm(dev);
+	struct nouveau_gpio *gpio = nouveau_gpio(drm->device);
+	struct nouveau_pm *pm = nouveau_pm(dev);
 	struct nouveau_pm_voltage *voltage = &pm->voltage;
-	struct nvbios *bios = &dev_priv->vbios;
+	struct nvbios *bios = &drm->vbios;
+	struct dcb_gpio_func func;
 	struct bit_entry P;
 	u8 *volt = NULL, *entry;
 	int i, headerlen, recordlen, entries, vidmask, vidshift;
@@ -121,11 +125,11 @@ nouveau_volt_init(struct drm_device *dev)
 		if (P.version == 2)
 			volt = ROMPTR(dev, P.data[12]);
 		else {
-			NV_WARN(dev, "unknown volt for BIT P %d\n", P.version);
+			NV_WARN(drm, "unknown volt for BIT P %d\n", P.version);
 		}
 	} else {
 		if (bios->data[bios->offset + 6] < 0x27) {
-			NV_DEBUG(dev, "BMP version too old for voltage\n");
+			NV_DEBUG(drm, "BMP version too old for voltage\n");
 			return;
 		}
 
@@ -133,7 +137,7 @@ nouveau_volt_init(struct drm_device *dev)
 	}
 
 	if (!volt) {
-		NV_DEBUG(dev, "voltage table pointer invalid\n");
+		NV_DEBUG(drm, "voltage table pointer invalid\n");
 		return;
 	}
 
@@ -177,7 +181,7 @@ nouveau_volt_init(struct drm_device *dev)
 		vidshift  = 0;
 		break;
 	default:
-		NV_WARN(dev, "voltage table 0x%02x unknown\n", volt[0]);
+		NV_WARN(drm, "voltage table 0x%02x unknown\n", volt[0]);
 		return;
 	}
 
@@ -189,12 +193,12 @@ nouveau_volt_init(struct drm_device *dev)
 	i = 0;
 	while (vidmask) {
 		if (i > nr_vidtag) {
-			NV_DEBUG(dev, "vid bit %d unknown\n", i);
+			NV_DEBUG(drm, "vid bit %d unknown\n", i);
 			return;
 		}
 
-		if (!nouveau_gpio_func_valid(dev, vidtag[i])) {
-			NV_DEBUG(dev, "vid bit %d has no gpio tag\n", i);
+		if (gpio && gpio->find(gpio, 0, vidtag[i], 0xff, &func)) {
+			NV_DEBUG(drm, "vid bit %d has no gpio tag\n", i);
 			return;
 		}
 
@@ -240,8 +244,7 @@ nouveau_volt_init(struct drm_device *dev)
 void
 nouveau_volt_fini(struct drm_device *dev)
 {
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct nouveau_pm_voltage *volt = &dev_priv->engine.pm.voltage;
+	struct nouveau_pm_voltage *volt = &nouveau_pm(dev)->voltage;
 
 	kfree(volt->level);
 }
diff --git a/drivers/gpu/drm/nouveau/nv04_crtc.c b/drivers/gpu/drm/nouveau/nv04_crtc.c
index 37d5b5bf7587..82a0d9c6cda3 100644
--- a/drivers/gpu/drm/nouveau/nv04_crtc.c
+++ b/drivers/gpu/drm/nouveau/nv04_crtc.c
@@ -26,14 +26,20 @@
 #include <drm/drmP.h>
 #include <drm/drm_crtc_helper.h>
 
-#include "nouveau_drv.h"
+#include "nouveau_drm.h"
+#include "nouveau_reg.h"
+#include "nouveau_bo.h"
+#include "nouveau_gem.h"
 #include "nouveau_encoder.h"
 #include "nouveau_connector.h"
 #include "nouveau_crtc.h"
-#include "nouveau_fb.h"
 #include "nouveau_hw.h"
 #include "nvreg.h"
 #include "nouveau_fbcon.h"
+#include "nv04_display.h"
+
+#include <subdev/bios/pll.h>
+#include <subdev/clock.h>
 
 static int
 nv04_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
@@ -49,8 +55,8 @@ crtc_wr_cio_state(struct drm_crtc *crtc, struct nv04_crtc_reg *crtcstate, int in
 static void nv_crtc_set_digital_vibrance(struct drm_crtc *crtc, int level)
 {
 	struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
-	struct drm_nouveau_private *dev_priv = crtc->dev->dev_private;
-	struct nv04_crtc_reg *regp = &dev_priv->mode_reg.crtc_reg[nv_crtc->index];
+	struct drm_device *dev = crtc->dev;
+	struct nv04_crtc_reg *regp = &nv04_display(dev)->mode_reg.crtc_reg[nv_crtc->index];
 
 	regp->CRTC[NV_CIO_CRE_CSB] = nv_crtc->saturation = level;
 	if (nv_crtc->saturation && nv_gf4_disp_arch(crtc->dev)) {
@@ -64,8 +70,8 @@ static void nv_crtc_set_digital_vibrance(struct drm_crtc *crtc, int level)
 static void nv_crtc_set_image_sharpening(struct drm_crtc *crtc, int level)
 {
 	struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
-	struct drm_nouveau_private *dev_priv = crtc->dev->dev_private;
-	struct nv04_crtc_reg *regp = &dev_priv->mode_reg.crtc_reg[nv_crtc->index];
+	struct drm_device *dev = crtc->dev;
+	struct nv04_crtc_reg *regp = &nv04_display(dev)->mode_reg.crtc_reg[nv_crtc->index];
 
 	nv_crtc->sharpness = level;
 	if (level < 0)	/* blur is in hw range 0x3f -> 0x20 */
@@ -103,14 +109,17 @@ static void nv_crtc_set_image_sharpening(struct drm_crtc *crtc, int level)
 static void nv_crtc_calc_state_ext(struct drm_crtc *crtc, struct drm_display_mode * mode, int dot_clock)
 {
 	struct drm_device *dev = crtc->dev;
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_drm *drm = nouveau_drm(dev);
+	struct nouveau_bios *bios = nouveau_bios(drm->device);
+	struct nouveau_clock *clk = nouveau_clock(drm->device);
 	struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
-	struct nv04_mode_state *state = &dev_priv->mode_reg;
+	struct nv04_mode_state *state = &nv04_display(dev)->mode_reg;
 	struct nv04_crtc_reg *regp = &state->crtc_reg[nv_crtc->index];
 	struct nouveau_pll_vals *pv = &regp->pllvals;
-	struct pll_lims pll_lim;
+	struct nvbios_pll pll_lim;
 
-	if (get_pll_limits(dev, nv_crtc->index ? PLL_VPLL1 : PLL_VPLL0, &pll_lim))
+	if (nvbios_pll_parse(bios, nv_crtc->index ? PLL_VPLL1 : PLL_VPLL0,
+			    &pll_lim))
 		return;
 
 	/* NM2 == 0 is used to determine single stage mode on two stage plls */
@@ -126,28 +135,29 @@ static void nv_crtc_calc_state_ext(struct drm_crtc *crtc, struct drm_display_mod
 	 * has yet been observed in allowing the use a single stage pll on all
 	 * nv43 however.  the behaviour of single stage use is untested on nv40
 	 */
-	if (dev_priv->chipset > 0x40 && dot_clock <= (pll_lim.vco1.maxfreq / 2))
+	if (nv_device(drm->device)->chipset > 0x40 && dot_clock <= (pll_lim.vco1.max_freq / 2))
 		memset(&pll_lim.vco2, 0, sizeof(pll_lim.vco2));
 
-	if (!nouveau_calc_pll_mnp(dev, &pll_lim, dot_clock, pv))
+
+	if (!clk->pll_calc(clk, &pll_lim, dot_clock, pv))
 		return;
 
 	state->pllsel &= PLLSEL_VPLL1_MASK | PLLSEL_VPLL2_MASK | PLLSEL_TV_MASK;
 
 	/* The blob uses this always, so let's do the same */
-	if (dev_priv->card_type == NV_40)
+	if (nv_device(drm->device)->card_type == NV_40)
 		state->pllsel |= NV_PRAMDAC_PLL_COEFF_SELECT_USE_VPLL2_TRUE;
 	/* again nv40 and some nv43 act more like nv3x as described above */
-	if (dev_priv->chipset < 0x41)
+	if (nv_device(drm->device)->chipset < 0x41)
 		state->pllsel |= NV_PRAMDAC_PLL_COEFF_SELECT_SOURCE_PROG_MPLL |
 				 NV_PRAMDAC_PLL_COEFF_SELECT_SOURCE_PROG_NVPLL;
 	state->pllsel |= nv_crtc->index ? PLLSEL_VPLL2_MASK : PLLSEL_VPLL1_MASK;
 
 	if (pv->NM2)
-		NV_DEBUG_KMS(dev, "vpll: n1 %d n2 %d m1 %d m2 %d log2p %d\n",
+		NV_DEBUG(drm, "vpll: n1 %d n2 %d m1 %d m2 %d log2p %d\n",
 			 pv->N1, pv->N2, pv->M1, pv->M2, pv->log2P);
 	else
-		NV_DEBUG_KMS(dev, "vpll: n %d m %d log2p %d\n",
+		NV_DEBUG(drm, "vpll: n %d m %d log2p %d\n",
 			 pv->N1, pv->M1, pv->log2P);
 
 	nv_crtc->cursor.set_offset(nv_crtc, nv_crtc->cursor.offset);
@@ -158,10 +168,11 @@ nv_crtc_dpms(struct drm_crtc *crtc, int mode)
 {
 	struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
 	struct drm_device *dev = crtc->dev;
+	struct nouveau_drm *drm = nouveau_drm(dev);
 	unsigned char seq1 = 0, crtc17 = 0;
 	unsigned char crtc1A;
 
-	NV_DEBUG_KMS(dev, "Setting dpms mode %d on CRTC %d\n", mode,
+	NV_DEBUG(drm, "Setting dpms mode %d on CRTC %d\n", mode,
 							nv_crtc->index);
 
 	if (nv_crtc->last_dpms == mode) /* Don't do unnecessary mode changes. */
@@ -225,9 +236,8 @@ static void
 nv_crtc_mode_set_vga(struct drm_crtc *crtc, struct drm_display_mode *mode)
 {
 	struct drm_device *dev = crtc->dev;
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
 	struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
-	struct nv04_crtc_reg *regp = &dev_priv->mode_reg.crtc_reg[nv_crtc->index];
+	struct nv04_crtc_reg *regp = &nv04_display(dev)->mode_reg.crtc_reg[nv_crtc->index];
 	struct drm_framebuffer *fb = crtc->fb;
 
 	/* Calculate our timings */
@@ -251,8 +261,8 @@ nv_crtc_mode_set_vga(struct drm_crtc *crtc, struct drm_display_mode *mode)
 		struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
 
 		if (encoder->crtc == crtc &&
-		    (nv_encoder->dcb->type == OUTPUT_LVDS ||
-		     nv_encoder->dcb->type == OUTPUT_TMDS))
+		    (nv_encoder->dcb->type == DCB_OUTPUT_LVDS ||
+		     nv_encoder->dcb->type == DCB_OUTPUT_TMDS))
 			fp_output = true;
 	}
 
@@ -264,7 +274,7 @@ nv_crtc_mode_set_vga(struct drm_crtc *crtc, struct drm_display_mode *mode)
 		horizEnd = horizTotal - 2;
 		horizBlankEnd = horizTotal + 4;
 #if 0
-		if (dev->overlayAdaptor && dev_priv->card_type >= NV_10)
+		if (dev->overlayAdaptor && nv_device(drm->device)->card_type >= NV_10)
 			/* This reportedly works around some video overlay bandwidth problems */
 			horizTotal += 2;
 #endif
@@ -452,10 +462,10 @@ static void
 nv_crtc_mode_set_regs(struct drm_crtc *crtc, struct drm_display_mode * mode)
 {
 	struct drm_device *dev = crtc->dev;
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_drm *drm = nouveau_drm(dev);
 	struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
-	struct nv04_crtc_reg *regp = &dev_priv->mode_reg.crtc_reg[nv_crtc->index];
-	struct nv04_crtc_reg *savep = &dev_priv->saved_reg.crtc_reg[nv_crtc->index];
+	struct nv04_crtc_reg *regp = &nv04_display(dev)->mode_reg.crtc_reg[nv_crtc->index];
+	struct nv04_crtc_reg *savep = &nv04_display(dev)->saved_reg.crtc_reg[nv_crtc->index];
 	struct drm_encoder *encoder;
 	bool lvds_output = false, tmds_output = false, tv_output = false,
 		off_chip_digital = false;
@@ -467,11 +477,11 @@ nv_crtc_mode_set_regs(struct drm_crtc *crtc, struct drm_display_mode * mode)
 		if (encoder->crtc != crtc)
 			continue;
 
-		if (nv_encoder->dcb->type == OUTPUT_LVDS)
+		if (nv_encoder->dcb->type == DCB_OUTPUT_LVDS)
 			digital = lvds_output = true;
-		if (nv_encoder->dcb->type == OUTPUT_TV)
+		if (nv_encoder->dcb->type == DCB_OUTPUT_TV)
 			tv_output = true;
-		if (nv_encoder->dcb->type == OUTPUT_TMDS)
+		if (nv_encoder->dcb->type == DCB_OUTPUT_TMDS)
 			digital = tmds_output = true;
 		if (nv_encoder->dcb->location != DCB_LOC_ON_CHIP && digital)
 			off_chip_digital = true;
@@ -500,7 +510,7 @@ nv_crtc_mode_set_regs(struct drm_crtc *crtc, struct drm_display_mode * mode)
 	regp->cursor_cfg = NV_PCRTC_CURSOR_CONFIG_CUR_LINES_64 |
 			     NV_PCRTC_CURSOR_CONFIG_CUR_PIXELS_64 |
 			     NV_PCRTC_CURSOR_CONFIG_ADDRESS_SPACE_PNVM;
-	if (dev_priv->chipset >= 0x11)
+	if (nv_device(drm->device)->chipset >= 0x11)
 		regp->cursor_cfg |= NV_PCRTC_CURSOR_CONFIG_CUR_BPP_32;
 	if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
 		regp->cursor_cfg |= NV_PCRTC_CURSOR_CONFIG_DOUBLE_SCAN_ENABLE;
@@ -533,7 +543,7 @@ nv_crtc_mode_set_regs(struct drm_crtc *crtc, struct drm_display_mode * mode)
 
 	/* The blob seems to take the current value from crtc 0, add 4 to that
 	 * and reuse the old value for crtc 1 */
-	regp->CRTC[NV_CIO_CRE_TVOUT_LATENCY] = dev_priv->saved_reg.crtc_reg[0].CRTC[NV_CIO_CRE_TVOUT_LATENCY];
+	regp->CRTC[NV_CIO_CRE_TVOUT_LATENCY] = nv04_display(dev)->saved_reg.crtc_reg[0].CRTC[NV_CIO_CRE_TVOUT_LATENCY];
 	if (!nv_crtc->index)
 		regp->CRTC[NV_CIO_CRE_TVOUT_LATENCY] += 4;
 
@@ -541,26 +551,26 @@ nv_crtc_mode_set_regs(struct drm_crtc *crtc, struct drm_display_mode * mode)
 	 * 1 << 30 on 0x60.830), for no apparent reason */
 	regp->CRTC[NV_CIO_CRE_59] = off_chip_digital;
 
-	if (dev_priv->card_type >= NV_30)
+	if (nv_device(drm->device)->card_type >= NV_30)
 		regp->CRTC[0x9f] = off_chip_digital ? 0x11 : 0x1;
 
 	regp->crtc_830 = mode->crtc_vdisplay - 3;
 	regp->crtc_834 = mode->crtc_vdisplay - 1;
 
-	if (dev_priv->card_type == NV_40)
+	if (nv_device(drm->device)->card_type == NV_40)
 		/* This is what the blob does */
 		regp->crtc_850 = NVReadCRTC(dev, 0, NV_PCRTC_850);
 
-	if (dev_priv->card_type >= NV_30)
+	if (nv_device(drm->device)->card_type >= NV_30)
 		regp->gpio_ext = NVReadCRTC(dev, 0, NV_PCRTC_GPIO_EXT);
 
-	if (dev_priv->card_type >= NV_10)
+	if (nv_device(drm->device)->card_type >= NV_10)
 		regp->crtc_cfg = NV10_PCRTC_CONFIG_START_ADDRESS_HSYNC;
 	else
 		regp->crtc_cfg = NV04_PCRTC_CONFIG_START_ADDRESS_HSYNC;
 
 	/* Some misc regs */
-	if (dev_priv->card_type == NV_40) {
+	if (nv_device(drm->device)->card_type == NV_40) {
 		regp->CRTC[NV_CIO_CRE_85] = 0xFF;
 		regp->CRTC[NV_CIO_CRE_86] = 0x1;
 	}
@@ -572,7 +582,7 @@ nv_crtc_mode_set_regs(struct drm_crtc *crtc, struct drm_display_mode * mode)
 
 	/* Generic PRAMDAC regs */
 
-	if (dev_priv->card_type >= NV_10)
+	if (nv_device(drm->device)->card_type >= NV_10)
 		/* Only bit that bios and blob set. */
 		regp->nv10_cursync = (1 << 25);
 
@@ -581,7 +591,7 @@ nv_crtc_mode_set_regs(struct drm_crtc *crtc, struct drm_display_mode * mode)
 				NV_PRAMDAC_GENERAL_CONTROL_PIXMIX_ON;
 	if (crtc->fb->depth == 16)
 		regp->ramdac_gen_ctrl |= NV_PRAMDAC_GENERAL_CONTROL_ALT_MODE_SEL;
-	if (dev_priv->chipset >= 0x11)
+	if (nv_device(drm->device)->chipset >= 0x11)
 		regp->ramdac_gen_ctrl |= NV_PRAMDAC_GENERAL_CONTROL_PIPE_LONG;
 
 	regp->ramdac_630 = 0; /* turn off green mode (tv test pattern?) */
@@ -611,9 +621,9 @@ nv_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *mode,
 {
 	struct drm_device *dev = crtc->dev;
 	struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_drm *drm = nouveau_drm(dev);
 
-	NV_DEBUG_KMS(dev, "CTRC mode on CRTC %d:\n", nv_crtc->index);
+	NV_DEBUG(drm, "CTRC mode on CRTC %d:\n", nv_crtc->index);
 	drm_mode_debug_printmodeline(adjusted_mode);
 
 	/* unlock must come after turning off FP_TG_CONTROL in output_prepare */
@@ -621,8 +631,8 @@ nv_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *mode,
 
 	nv_crtc_mode_set_vga(crtc, adjusted_mode);
 	/* calculated in nv04_dfp_prepare, nv40 needs it written before calculating PLLs */
-	if (dev_priv->card_type == NV_40)
-		NVWriteRAMDAC(dev, 0, NV_PRAMDAC_SEL_CLK, dev_priv->mode_reg.sel_clk);
+	if (nv_device(drm->device)->card_type == NV_40)
+		NVWriteRAMDAC(dev, 0, NV_PRAMDAC_SEL_CLK, nv04_display(dev)->mode_reg.sel_clk);
 	nv_crtc_mode_set_regs(crtc, adjusted_mode);
 	nv_crtc_calc_state_ext(crtc, mode, adjusted_mode->clock);
 	return 0;
@@ -631,10 +641,10 @@ nv_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *mode,
 static void nv_crtc_save(struct drm_crtc *crtc)
 {
 	struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
-	struct drm_nouveau_private *dev_priv = crtc->dev->dev_private;
-	struct nv04_mode_state *state = &dev_priv->mode_reg;
+	struct drm_device *dev = crtc->dev;
+	struct nv04_mode_state *state = &nv04_display(dev)->mode_reg;
 	struct nv04_crtc_reg *crtc_state = &state->crtc_reg[nv_crtc->index];
-	struct nv04_mode_state *saved = &dev_priv->saved_reg;
+	struct nv04_mode_state *saved = &nv04_display(dev)->saved_reg;
 	struct nv04_crtc_reg *crtc_saved = &saved->crtc_reg[nv_crtc->index];
 
 	if (nv_two_heads(crtc->dev))
@@ -652,14 +662,14 @@ static void nv_crtc_save(struct drm_crtc *crtc)
 static void nv_crtc_restore(struct drm_crtc *crtc)
 {
 	struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
-	struct drm_nouveau_private *dev_priv = crtc->dev->dev_private;
+	struct drm_device *dev = crtc->dev;
 	int head = nv_crtc->index;
-	uint8_t saved_cr21 = dev_priv->saved_reg.crtc_reg[head].CRTC[NV_CIO_CRE_21];
+	uint8_t saved_cr21 = nv04_display(dev)->saved_reg.crtc_reg[head].CRTC[NV_CIO_CRE_21];
 
 	if (nv_two_heads(crtc->dev))
 		NVSetOwner(crtc->dev, head);
 
-	nouveau_hw_load_state(crtc->dev, head, &dev_priv->saved_reg);
+	nouveau_hw_load_state(crtc->dev, head, &nv04_display(dev)->saved_reg);
 	nv_lock_vga_crtc_shadow(crtc->dev, head, saved_cr21);
 
 	nv_crtc->last_dpms = NV_DPMS_CLEARED;
@@ -668,7 +678,7 @@ static void nv_crtc_restore(struct drm_crtc *crtc)
 static void nv_crtc_prepare(struct drm_crtc *crtc)
 {
 	struct drm_device *dev = crtc->dev;
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_drm *drm = nouveau_drm(dev);
 	struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
 	struct drm_crtc_helper_funcs *funcs = crtc->helper_private;
 
@@ -682,7 +692,7 @@ static void nv_crtc_prepare(struct drm_crtc *crtc)
 
 	/* Some more preparation. */
 	NVWriteCRTC(dev, nv_crtc->index, NV_PCRTC_CONFIG, NV_PCRTC_CONFIG_START_ADDRESS_NON_VGA);
-	if (dev_priv->card_type == NV_40) {
+	if (nv_device(drm->device)->card_type == NV_40) {
 		uint32_t reg900 = NVReadRAMDAC(dev, nv_crtc->index, NV_PRAMDAC_900);
 		NVWriteRAMDAC(dev, nv_crtc->index, NV_PRAMDAC_900, reg900 & ~0x10000);
 	}
@@ -692,10 +702,9 @@ static void nv_crtc_commit(struct drm_crtc *crtc)
 {
 	struct drm_device *dev = crtc->dev;
 	struct drm_crtc_helper_funcs *funcs = crtc->helper_private;
-	struct drm_nouveau_private *dev_priv = crtc->dev->dev_private;
 	struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
 
-	nouveau_hw_load_state(dev, nv_crtc->index, &dev_priv->mode_reg);
+	nouveau_hw_load_state(dev, nv_crtc->index, &nv04_display(dev)->mode_reg);
 	nv04_crtc_mode_set_base(crtc, crtc->x, crtc->y, NULL);
 
 #ifdef __BIG_ENDIAN
@@ -715,8 +724,6 @@ static void nv_crtc_destroy(struct drm_crtc *crtc)
 {
 	struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
 
-	NV_DEBUG_KMS(crtc->dev, "\n");
-
 	if (!nv_crtc)
 		return;
 
@@ -732,18 +739,17 @@ nv_crtc_gamma_load(struct drm_crtc *crtc)
 {
 	struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
 	struct drm_device *dev = nv_crtc->base.dev;
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
 	struct rgb { uint8_t r, g, b; } __attribute__((packed)) *rgbs;
 	int i;
 
-	rgbs = (struct rgb *)dev_priv->mode_reg.crtc_reg[nv_crtc->index].DAC;
+	rgbs = (struct rgb *)nv04_display(dev)->mode_reg.crtc_reg[nv_crtc->index].DAC;
 	for (i = 0; i < 256; i++) {
 		rgbs[i].r = nv_crtc->lut.r[i] >> 8;
 		rgbs[i].g = nv_crtc->lut.g[i] >> 8;
 		rgbs[i].b = nv_crtc->lut.b[i] >> 8;
 	}
 
-	nouveau_hw_load_state_palette(dev, nv_crtc->index, &dev_priv->mode_reg);
+	nouveau_hw_load_state_palette(dev, nv_crtc->index, &nv04_display(dev)->mode_reg);
 }
 
 static void
@@ -779,18 +785,18 @@ nv04_crtc_do_mode_set_base(struct drm_crtc *crtc,
 {
 	struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
 	struct drm_device *dev = crtc->dev;
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct nv04_crtc_reg *regp = &dev_priv->mode_reg.crtc_reg[nv_crtc->index];
+	struct nouveau_drm *drm = nouveau_drm(dev);
+	struct nv04_crtc_reg *regp = &nv04_display(dev)->mode_reg.crtc_reg[nv_crtc->index];
 	struct drm_framebuffer *drm_fb;
 	struct nouveau_framebuffer *fb;
 	int arb_burst, arb_lwm;
 	int ret;
 
-	NV_DEBUG_KMS(dev, "index %d\n", nv_crtc->index);
+	NV_DEBUG(drm, "index %d\n", nv_crtc->index);
 
 	/* no fb bound */
 	if (!atomic && !crtc->fb) {
-		NV_DEBUG_KMS(dev, "No FB bound\n");
+		NV_DEBUG(drm, "No FB bound\n");
 		return 0;
 	}
 
@@ -858,7 +864,7 @@ nv04_crtc_do_mode_set_base(struct drm_crtc *crtc,
 	crtc_wr_cio_state(crtc, regp, NV_CIO_CRE_FF_INDEX);
 	crtc_wr_cio_state(crtc, regp, NV_CIO_CRE_FFLWM__INDEX);
 
-	if (dev_priv->card_type >= NV_20) {
+	if (nv_device(drm->device)->card_type >= NV_20) {
 		regp->CRTC[NV_CIO_CRE_47] = arb_lwm >> 8;
 		crtc_wr_cio_state(crtc, regp, NV_CIO_CRE_47);
 	}
@@ -878,8 +884,8 @@ nv04_crtc_mode_set_base_atomic(struct drm_crtc *crtc,
 			       struct drm_framebuffer *fb,
 			       int x, int y, enum mode_set_atomic state)
 {
-	struct drm_nouveau_private *dev_priv = crtc->dev->dev_private;
-	struct drm_device *dev = dev_priv->dev;
+	struct nouveau_drm *drm = nouveau_drm(crtc->dev);
+	struct drm_device *dev = drm->dev;
 
 	if (state == ENTER_ATOMIC_MODE_SET)
 		nouveau_fbcon_save_disable_accel(dev);
@@ -934,9 +940,9 @@ static void nv11_cursor_upload(struct drm_device *dev, struct nouveau_bo *src,
 
 #ifdef __BIG_ENDIAN
 		{
-			struct drm_nouveau_private *dev_priv = dev->dev_private;
+			struct nouveau_drm *drm = nouveau_drm(dev);
 
-			if (dev_priv->chipset == 0x11) {
+			if (nv_device(drm->device)->chipset == 0x11) {
 				pixel = ((pixel & 0x000000ff) << 24) |
 					((pixel & 0x0000ff00) << 8) |
 					((pixel & 0x00ff0000) >> 8) |
@@ -953,8 +959,8 @@ static int
 nv04_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv,
 		     uint32_t buffer_handle, uint32_t width, uint32_t height)
 {
-	struct drm_nouveau_private *dev_priv = crtc->dev->dev_private;
-	struct drm_device *dev = dev_priv->dev;
+	struct nouveau_drm *drm = nouveau_drm(crtc->dev);
+	struct drm_device *dev = drm->dev;
 	struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
 	struct nouveau_bo *cursor = NULL;
 	struct drm_gem_object *gem;
@@ -977,7 +983,7 @@ nv04_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv,
 	if (ret)
 		goto out;
 
-	if (dev_priv->chipset >= 0x11)
+	if (nv_device(drm->device)->chipset >= 0x11)
 		nv11_cursor_upload(dev, cursor, nv_crtc->cursor.nvbo);
 	else
 		nv04_cursor_upload(dev, cursor, nv_crtc->cursor.nvbo);
diff --git a/drivers/gpu/drm/nouveau/nv04_cursor.c b/drivers/gpu/drm/nouveau/nv04_cursor.c
index 6463870ef19e..fe86f0de348f 100644
--- a/drivers/gpu/drm/nouveau/nv04_cursor.c
+++ b/drivers/gpu/drm/nouveau/nv04_cursor.c
@@ -1,6 +1,7 @@
 #include <drm/drmP.h>
+#include <drm/drm_mode.h>
+#include "nouveau_drm.h"
 #include "nouveau_reg.h"
-#include "nouveau_drv.h"
 #include "nouveau_crtc.h"
 #include "nouveau_hw.h"
 
@@ -37,8 +38,8 @@ static void
 nv04_cursor_set_offset(struct nouveau_crtc *nv_crtc, uint32_t offset)
 {
 	struct drm_device *dev = nv_crtc->base.dev;
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct nv04_crtc_reg *regp = &dev_priv->mode_reg.crtc_reg[nv_crtc->index];
+	struct nouveau_drm *drm = nouveau_drm(dev);
+	struct nv04_crtc_reg *regp = &nv04_display(dev)->mode_reg.crtc_reg[nv_crtc->index];
 	struct drm_crtc *crtc = &nv_crtc->base;
 
 	regp->CRTC[NV_CIO_CRE_HCUR_ADDR0_INDEX] =
@@ -54,7 +55,7 @@ nv04_cursor_set_offset(struct nouveau_crtc *nv_crtc, uint32_t offset)
 	crtc_wr_cio_state(crtc, regp, NV_CIO_CRE_HCUR_ADDR0_INDEX);
 	crtc_wr_cio_state(crtc, regp, NV_CIO_CRE_HCUR_ADDR1_INDEX);
 	crtc_wr_cio_state(crtc, regp, NV_CIO_CRE_HCUR_ADDR2_INDEX);
-	if (dev_priv->card_type == NV_40)
+	if (nv_device(drm->device)->card_type == NV_40)
 		nv_fix_nv40_hw_cursor(dev, nv_crtc->index);
 }
 
diff --git a/drivers/gpu/drm/nouveau/nv04_dac.c b/drivers/gpu/drm/nouveau/nv04_dac.c
index 981e6d4f4c76..347a3bd78d04 100644
--- a/drivers/gpu/drm/nouveau/nv04_dac.c
+++ b/drivers/gpu/drm/nouveau/nv04_dac.c
@@ -27,22 +27,25 @@
 #include <drm/drmP.h>
 #include <drm/drm_crtc_helper.h>
 
-#include "nouveau_drv.h"
+#include "nouveau_drm.h"
 #include "nouveau_encoder.h"
 #include "nouveau_connector.h"
 #include "nouveau_crtc.h"
 #include "nouveau_hw.h"
-#include "nouveau_gpio.h"
 #include "nvreg.h"
 
+#include <subdev/bios/gpio.h>
+#include <subdev/gpio.h>
+#include <subdev/timer.h>
+
 int nv04_dac_output_offset(struct drm_encoder *encoder)
 {
-	struct dcb_entry *dcb = nouveau_encoder(encoder)->dcb;
+	struct dcb_output *dcb = nouveau_encoder(encoder)->dcb;
 	int offset = 0;
 
-	if (dcb->or & (8 | OUTPUT_C))
+	if (dcb->or & (8 | DCB_OUTPUT_C))
 		offset += 0x68;
-	if (dcb->or & (8 | OUTPUT_B))
+	if (dcb->or & (8 | DCB_OUTPUT_B))
 		offset += 0x2000;
 
 	return offset;
@@ -62,6 +65,8 @@ int nv04_dac_output_offset(struct drm_encoder *encoder)
 
 static int sample_load_twice(struct drm_device *dev, bool sense[2])
 {
+	struct nouveau_device *device = nouveau_dev(dev);
+	struct nouveau_timer *ptimer = nouveau_timer(device);
 	int i;
 
 	for (i = 0; i < 2; i++) {
@@ -75,27 +80,30 @@ static int sample_load_twice(struct drm_device *dev, bool sense[2])
 		 * use a 10ms timeout (guards against crtc being inactive, in
 		 * which case blank state would never change)
 		 */
-		if (!nouveau_wait_eq(dev, 10000000, NV_PRMCIO_INP0__COLOR,
-				     0x00000001, 0x00000000))
+		if (!nouveau_timer_wait_eq(ptimer, 10000000,
+					   NV_PRMCIO_INP0__COLOR,
+					   0x00000001, 0x00000000))
 			return -EBUSY;
-		if (!nouveau_wait_eq(dev, 10000000, NV_PRMCIO_INP0__COLOR,
-				     0x00000001, 0x00000001))
+		if (!nouveau_timer_wait_eq(ptimer, 10000000,
+					   NV_PRMCIO_INP0__COLOR,
+					   0x00000001, 0x00000001))
 			return -EBUSY;
-		if (!nouveau_wait_eq(dev, 10000000, NV_PRMCIO_INP0__COLOR,
-				     0x00000001, 0x00000000))
+		if (!nouveau_timer_wait_eq(ptimer, 10000000,
+					   NV_PRMCIO_INP0__COLOR,
+					   0x00000001, 0x00000000))
 			return -EBUSY;
 
 		udelay(100);
 		/* when level triggers, sense is _LO_ */
-		sense_a = nv_rd08(dev, NV_PRMCIO_INP0) & 0x10;
+		sense_a = nv_rd08(device, NV_PRMCIO_INP0) & 0x10;
 
 		/* take another reading until it agrees with sense_a... */
 		do {
 			udelay(100);
-			sense_b = nv_rd08(dev, NV_PRMCIO_INP0) & 0x10;
+			sense_b = nv_rd08(device, NV_PRMCIO_INP0) & 0x10;
 			if (sense_a != sense_b) {
 				sense_b_prime =
-					nv_rd08(dev, NV_PRMCIO_INP0) & 0x10;
+					nv_rd08(device, NV_PRMCIO_INP0) & 0x10;
 				if (sense_b == sense_b_prime) {
 					/* ... unless two consecutive subsequent
 					 * samples agree; sense_a is replaced */
@@ -120,6 +128,8 @@ static enum drm_connector_status nv04_dac_detect(struct drm_encoder *encoder,
 						 struct drm_connector *connector)
 {
 	struct drm_device *dev = encoder->dev;
+	struct nouveau_device *device = nouveau_dev(dev);
+	struct nouveau_drm *drm = nouveau_drm(dev);
 	uint8_t saved_seq1, saved_pi, saved_rpc1, saved_cr_mode;
 	uint8_t saved_palette0[3], saved_palette_mask;
 	uint32_t saved_rtest_ctrl, saved_rgen_ctrl;
@@ -154,11 +164,11 @@ static enum drm_connector_status nv04_dac_detect(struct drm_encoder *encoder,
 	saved_rpc1 = NVReadVgaCrtc(dev, 0, NV_CIO_CRE_RPC1_INDEX);
 	NVWriteVgaCrtc(dev, 0, NV_CIO_CRE_RPC1_INDEX, saved_rpc1 & ~0xc0);
 
-	nv_wr08(dev, NV_PRMDIO_READ_MODE_ADDRESS, 0x0);
+	nv_wr08(device, NV_PRMDIO_READ_MODE_ADDRESS, 0x0);
 	for (i = 0; i < 3; i++)
-		saved_palette0[i] = nv_rd08(dev, NV_PRMDIO_PALETTE_DATA);
-	saved_palette_mask = nv_rd08(dev, NV_PRMDIO_PIXEL_MASK);
-	nv_wr08(dev, NV_PRMDIO_PIXEL_MASK, 0);
+		saved_palette0[i] = nv_rd08(device, NV_PRMDIO_PALETTE_DATA);
+	saved_palette_mask = nv_rd08(device, NV_PRMDIO_PIXEL_MASK);
+	nv_wr08(device, NV_PRMDIO_PIXEL_MASK, 0);
 
 	saved_rgen_ctrl = NVReadRAMDAC(dev, 0, NV_PRAMDAC_GENERAL_CONTROL);
 	NVWriteRAMDAC(dev, 0, NV_PRAMDAC_GENERAL_CONTROL,
@@ -171,11 +181,11 @@ static enum drm_connector_status nv04_dac_detect(struct drm_encoder *encoder,
 	do {
 		bool sense_pair[2];
 
-		nv_wr08(dev, NV_PRMDIO_WRITE_MODE_ADDRESS, 0);
-		nv_wr08(dev, NV_PRMDIO_PALETTE_DATA, 0);
-		nv_wr08(dev, NV_PRMDIO_PALETTE_DATA, 0);
+		nv_wr08(device, NV_PRMDIO_WRITE_MODE_ADDRESS, 0);
+		nv_wr08(device, NV_PRMDIO_PALETTE_DATA, 0);
+		nv_wr08(device, NV_PRMDIO_PALETTE_DATA, 0);
 		/* testing blue won't find monochrome monitors.  I don't care */
-		nv_wr08(dev, NV_PRMDIO_PALETTE_DATA, blue);
+		nv_wr08(device, NV_PRMDIO_PALETTE_DATA, blue);
 
 		i = 0;
 		/* take sample pairs until both samples in the pair agree */
@@ -198,11 +208,11 @@ static enum drm_connector_status nv04_dac_detect(struct drm_encoder *encoder,
 	} while (++blue < 0x18 && sense);
 
 out:
-	nv_wr08(dev, NV_PRMDIO_PIXEL_MASK, saved_palette_mask);
+	nv_wr08(device, NV_PRMDIO_PIXEL_MASK, saved_palette_mask);
 	NVWriteRAMDAC(dev, 0, NV_PRAMDAC_GENERAL_CONTROL, saved_rgen_ctrl);
-	nv_wr08(dev, NV_PRMDIO_WRITE_MODE_ADDRESS, 0);
+	nv_wr08(device, NV_PRMDIO_WRITE_MODE_ADDRESS, 0);
 	for (i = 0; i < 3; i++)
-		nv_wr08(dev, NV_PRMDIO_PALETTE_DATA, saved_palette0[i]);
+		nv_wr08(device, NV_PRMDIO_PALETTE_DATA, saved_palette0[i]);
 	NVWriteRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL, saved_rtest_ctrl);
 	NVWriteVgaCrtc(dev, 0, NV_CIO_CRE_PIXEL_INDEX, saved_pi);
 	NVWriteVgaCrtc(dev, 0, NV_CIO_CRE_RPC1_INDEX, saved_rpc1);
@@ -210,7 +220,7 @@ out:
 	NVWriteVgaCrtc(dev, 0, NV_CIO_CR_MODE_INDEX, saved_cr_mode);
 
 	if (blue == 0x18) {
-		NV_INFO(dev, "Load detected on head A\n");
+		NV_INFO(drm, "Load detected on head A\n");
 		return connector_status_connected;
 	}
 
@@ -220,43 +230,46 @@ out:
 uint32_t nv17_dac_sample_load(struct drm_encoder *encoder)
 {
 	struct drm_device *dev = encoder->dev;
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct dcb_entry *dcb = nouveau_encoder(encoder)->dcb;
+	struct nouveau_drm *drm = nouveau_drm(dev);
+	struct nouveau_device *device = nouveau_dev(dev);
+	struct nouveau_gpio *gpio = nouveau_gpio(device);
+	struct dcb_output *dcb = nouveau_encoder(encoder)->dcb;
 	uint32_t sample, testval, regoffset = nv04_dac_output_offset(encoder);
 	uint32_t saved_powerctrl_2 = 0, saved_powerctrl_4 = 0, saved_routput,
-		saved_rtest_ctrl, saved_gpio0, saved_gpio1, temp, routput;
+		saved_rtest_ctrl, saved_gpio0 = 0, saved_gpio1 = 0, temp, routput;
 	int head;
 
 #define RGB_TEST_DATA(r, g, b) (r << 0 | g << 10 | b << 20)
-	if (dcb->type == OUTPUT_TV) {
+	if (dcb->type == DCB_OUTPUT_TV) {
 		testval = RGB_TEST_DATA(0xa0, 0xa0, 0xa0);
 
-		if (dev_priv->vbios.tvdactestval)
-			testval = dev_priv->vbios.tvdactestval;
+		if (drm->vbios.tvdactestval)
+			testval = drm->vbios.tvdactestval;
 	} else {
 		testval = RGB_TEST_DATA(0x140, 0x140, 0x140); /* 0x94050140 */
 
-		if (dev_priv->vbios.dactestval)
-			testval = dev_priv->vbios.dactestval;
+		if (drm->vbios.dactestval)
+			testval = drm->vbios.dactestval;
 	}
 
 	saved_rtest_ctrl = NVReadRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + regoffset);
 	NVWriteRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + regoffset,
 		      saved_rtest_ctrl & ~NV_PRAMDAC_TEST_CONTROL_PWRDWN_DAC_OFF);
 
-	saved_powerctrl_2 = nvReadMC(dev, NV_PBUS_POWERCTRL_2);
+	saved_powerctrl_2 = nv_rd32(device, NV_PBUS_POWERCTRL_2);
 
-	nvWriteMC(dev, NV_PBUS_POWERCTRL_2, saved_powerctrl_2 & 0xd7ffffff);
+	nv_wr32(device, NV_PBUS_POWERCTRL_2, saved_powerctrl_2 & 0xd7ffffff);
 	if (regoffset == 0x68) {
-		saved_powerctrl_4 = nvReadMC(dev, NV_PBUS_POWERCTRL_4);
-		nvWriteMC(dev, NV_PBUS_POWERCTRL_4, saved_powerctrl_4 & 0xffffffcf);
+		saved_powerctrl_4 = nv_rd32(device, NV_PBUS_POWERCTRL_4);
+		nv_wr32(device, NV_PBUS_POWERCTRL_4, saved_powerctrl_4 & 0xffffffcf);
 	}
 
-	saved_gpio1 = nouveau_gpio_func_get(dev, DCB_GPIO_TVDAC1);
-	saved_gpio0 = nouveau_gpio_func_get(dev, DCB_GPIO_TVDAC0);
-
-	nouveau_gpio_func_set(dev, DCB_GPIO_TVDAC1, dcb->type == OUTPUT_TV);
-	nouveau_gpio_func_set(dev, DCB_GPIO_TVDAC0, dcb->type == OUTPUT_TV);
+	if (gpio) {
+		saved_gpio1 = gpio->get(gpio, 0, DCB_GPIO_TVDAC1, 0xff);
+		saved_gpio0 = gpio->get(gpio, 0, DCB_GPIO_TVDAC0, 0xff);
+		gpio->set(gpio, 0, DCB_GPIO_TVDAC1, 0xff, dcb->type == DCB_OUTPUT_TV);
+		gpio->set(gpio, 0, DCB_GPIO_TVDAC0, 0xff, dcb->type == DCB_OUTPUT_TV);
+	}
 
 	msleep(4);
 
@@ -270,8 +283,8 @@ uint32_t nv17_dac_sample_load(struct drm_encoder *encoder)
 	/* nv driver and nv31 use 0xfffffeee, nv34 and 6600 use 0xfffffece */
 	routput = (saved_routput & 0xfffffece) | head << 8;
 
-	if (dev_priv->card_type >= NV_40) {
-		if (dcb->type == OUTPUT_TV)
+	if (nv_device(drm->device)->card_type >= NV_40) {
+		if (dcb->type == DCB_OUTPUT_TV)
 			routput |= 0x1a << 16;
 		else
 			routput &= ~(0x1a << 16);
@@ -303,11 +316,13 @@ uint32_t nv17_dac_sample_load(struct drm_encoder *encoder)
 	NVWriteRAMDAC(dev, 0, NV_PRAMDAC_DACCLK + regoffset, saved_routput);
 	NVWriteRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + regoffset, saved_rtest_ctrl);
 	if (regoffset == 0x68)
-		nvWriteMC(dev, NV_PBUS_POWERCTRL_4, saved_powerctrl_4);
-	nvWriteMC(dev, NV_PBUS_POWERCTRL_2, saved_powerctrl_2);
+		nv_wr32(device, NV_PBUS_POWERCTRL_4, saved_powerctrl_4);
+	nv_wr32(device, NV_PBUS_POWERCTRL_2, saved_powerctrl_2);
 
-	nouveau_gpio_func_set(dev, DCB_GPIO_TVDAC1, saved_gpio1);
-	nouveau_gpio_func_set(dev, DCB_GPIO_TVDAC0, saved_gpio0);
+	if (gpio) {
+		gpio->set(gpio, 0, DCB_GPIO_TVDAC1, 0xff, saved_gpio1);
+		gpio->set(gpio, 0, DCB_GPIO_TVDAC0, 0xff, saved_gpio0);
+	}
 
 	return sample;
 }
@@ -315,15 +330,15 @@ uint32_t nv17_dac_sample_load(struct drm_encoder *encoder)
 static enum drm_connector_status
 nv17_dac_detect(struct drm_encoder *encoder, struct drm_connector *connector)
 {
-	struct drm_device *dev = encoder->dev;
-	struct dcb_entry *dcb = nouveau_encoder(encoder)->dcb;
+	struct nouveau_drm *drm = nouveau_drm(encoder->dev);
+	struct dcb_output *dcb = nouveau_encoder(encoder)->dcb;
 
 	if (nv04_dac_in_use(encoder))
 		return connector_status_disconnected;
 
 	if (nv17_dac_sample_load(encoder) &
 	    NV_PRAMDAC_TEST_CONTROL_SENSEB_ALLHI) {
-		NV_INFO(dev, "Load detected on output %c\n",
+		NV_INFO(drm, "Load detected on output %c\n",
 			'@' + ffs(dcb->or));
 		return connector_status_connected;
 	} else {
@@ -357,7 +372,7 @@ static void nv04_dac_mode_set(struct drm_encoder *encoder,
 			      struct drm_display_mode *adjusted_mode)
 {
 	struct drm_device *dev = encoder->dev;
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_drm *drm = nouveau_drm(dev);
 	int head = nouveau_crtc(encoder->crtc)->index;
 
 	if (nv_gf4_disp_arch(dev)) {
@@ -372,7 +387,7 @@ static void nv04_dac_mode_set(struct drm_encoder *encoder,
 		/* force any other vga encoders to bind to the other crtc */
 		list_for_each_entry(rebind, &dev->mode_config.encoder_list, head) {
 			if (rebind == encoder
-			    || nouveau_encoder(rebind)->dcb->type != OUTPUT_ANALOG)
+			    || nouveau_encoder(rebind)->dcb->type != DCB_OUTPUT_ANALOG)
 				continue;
 
 			dac_offset = nv04_dac_output_offset(rebind);
@@ -383,7 +398,7 @@ static void nv04_dac_mode_set(struct drm_encoder *encoder,
 	}
 
 	/* This could use refinement for flatpanels, but it should work this way */
-	if (dev_priv->chipset < 0x44)
+	if (nv_device(drm->device)->chipset < 0x44)
 		NVWriteRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + nv04_dac_output_offset(encoder), 0xf0000000);
 	else
 		NVWriteRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + nv04_dac_output_offset(encoder), 0x00100000);
@@ -392,13 +407,13 @@ static void nv04_dac_mode_set(struct drm_encoder *encoder,
 static void nv04_dac_commit(struct drm_encoder *encoder)
 {
 	struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
-	struct drm_device *dev = encoder->dev;
+	struct nouveau_drm *drm = nouveau_drm(encoder->dev);
 	struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc);
 	struct drm_encoder_helper_funcs *helper = encoder->helper_private;
 
 	helper->dpms(encoder, DRM_MODE_DPMS_ON);
 
-	NV_INFO(dev, "Output %s is running on CRTC %d using output %c\n",
+	NV_INFO(drm, "Output %s is running on CRTC %d using output %c\n",
 		drm_get_connector_name(&nouveau_encoder_connector_get(nv_encoder)->base),
 		nv_crtc->index, '@' + ffs(nv_encoder->dcb->or));
 }
@@ -406,11 +421,10 @@ static void nv04_dac_commit(struct drm_encoder *encoder)
 void nv04_dac_update_dacclk(struct drm_encoder *encoder, bool enable)
 {
 	struct drm_device *dev = encoder->dev;
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct dcb_entry *dcb = nouveau_encoder(encoder)->dcb;
+	struct dcb_output *dcb = nouveau_encoder(encoder)->dcb;
 
 	if (nv_gf4_disp_arch(dev)) {
-		uint32_t *dac_users = &dev_priv->dac_users[ffs(dcb->or) - 1];
+		uint32_t *dac_users = &nv04_display(dev)->dac_users[ffs(dcb->or) - 1];
 		int dacclk_off = NV_PRAMDAC_DACCLK + nv04_dac_output_offset(encoder);
 		uint32_t dacclk = NVReadRAMDAC(dev, 0, dacclk_off);
 
@@ -431,23 +445,23 @@ void nv04_dac_update_dacclk(struct drm_encoder *encoder, bool enable)
  * someone else. */
 bool nv04_dac_in_use(struct drm_encoder *encoder)
 {
-	struct drm_nouveau_private *dev_priv = encoder->dev->dev_private;
-	struct dcb_entry *dcb = nouveau_encoder(encoder)->dcb;
+	struct drm_device *dev = encoder->dev;
+	struct dcb_output *dcb = nouveau_encoder(encoder)->dcb;
 
 	return nv_gf4_disp_arch(encoder->dev) &&
-		(dev_priv->dac_users[ffs(dcb->or) - 1] & ~(1 << dcb->index));
+		(nv04_display(dev)->dac_users[ffs(dcb->or) - 1] & ~(1 << dcb->index));
 }
 
 static void nv04_dac_dpms(struct drm_encoder *encoder, int mode)
 {
-	struct drm_device *dev = encoder->dev;
 	struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+	struct nouveau_drm *drm = nouveau_drm(encoder->dev);
 
 	if (nv_encoder->last_dpms == mode)
 		return;
 	nv_encoder->last_dpms = mode;
 
-	NV_INFO(dev, "Setting dpms mode %d on vga encoder (output %d)\n",
+	NV_INFO(drm, "Setting dpms mode %d on vga encoder (output %d)\n",
 		     mode, nv_encoder->dcb->index);
 
 	nv04_dac_update_dacclk(encoder, mode == DRM_MODE_DPMS_ON);
@@ -479,8 +493,6 @@ static void nv04_dac_destroy(struct drm_encoder *encoder)
 {
 	struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
 
-	NV_DEBUG_KMS(encoder->dev, "\n");
-
 	drm_encoder_cleanup(encoder);
 	kfree(nv_encoder);
 }
@@ -512,7 +524,7 @@ static const struct drm_encoder_funcs nv04_dac_funcs = {
 };
 
 int
-nv04_dac_create(struct drm_connector *connector, struct dcb_entry *entry)
+nv04_dac_create(struct drm_connector *connector, struct dcb_output *entry)
 {
 	const struct drm_encoder_helper_funcs *helper;
 	struct nouveau_encoder *nv_encoder = NULL;
diff --git a/drivers/gpu/drm/nouveau/nv04_dfp.c b/drivers/gpu/drm/nouveau/nv04_dfp.c
index 55ad2dd653fc..da55d7642c8c 100644
--- a/drivers/gpu/drm/nouveau/nv04_dfp.c
+++ b/drivers/gpu/drm/nouveau/nv04_dfp.c
@@ -27,7 +27,8 @@
 #include <drm/drmP.h>
 #include <drm/drm_crtc_helper.h>
 
-#include "nouveau_drv.h"
+#include "nouveau_drm.h"
+#include "nouveau_reg.h"
 #include "nouveau_encoder.h"
 #include "nouveau_connector.h"
 #include "nouveau_crtc.h"
@@ -36,6 +37,8 @@
 
 #include <drm/i2c/sil164.h>
 
+#include <subdev/i2c.h>
+
 #define FP_TG_CONTROL_ON  (NV_PRAMDAC_FP_TG_CONTROL_DISPEN_POS |	\
 			   NV_PRAMDAC_FP_TG_CONTROL_HSYNC_POS |		\
 			   NV_PRAMDAC_FP_TG_CONTROL_VSYNC_POS)
@@ -49,20 +52,20 @@ static inline bool is_fpc_off(uint32_t fpc)
 			FP_TG_CONTROL_OFF);
 }
 
-int nv04_dfp_get_bound_head(struct drm_device *dev, struct dcb_entry *dcbent)
+int nv04_dfp_get_bound_head(struct drm_device *dev, struct dcb_output *dcbent)
 {
 	/* special case of nv_read_tmds to find crtc associated with an output.
 	 * this does not give a correct answer for off-chip dvi, but there's no
 	 * use for such an answer anyway
 	 */
-	int ramdac = (dcbent->or & OUTPUT_C) >> 2;
+	int ramdac = (dcbent->or & DCB_OUTPUT_C) >> 2;
 
 	NVWriteRAMDAC(dev, ramdac, NV_PRAMDAC_FP_TMDS_CONTROL,
 	NV_PRAMDAC_FP_TMDS_CONTROL_WRITE_DISABLE | 0x4);
 	return ((NVReadRAMDAC(dev, ramdac, NV_PRAMDAC_FP_TMDS_DATA) & 0x8) >> 3) ^ ramdac;
 }
 
-void nv04_dfp_bind_head(struct drm_device *dev, struct dcb_entry *dcbent,
+void nv04_dfp_bind_head(struct drm_device *dev, struct dcb_output *dcbent,
 			int head, bool dl)
 {
 	/* The BIOS scripts don't do this for us, sadly
@@ -72,13 +75,13 @@ void nv04_dfp_bind_head(struct drm_device *dev, struct dcb_entry *dcbent,
 	 * (for VT restore etc.)
 	 */
 
-	int ramdac = (dcbent->or & OUTPUT_C) >> 2;
+	int ramdac = (dcbent->or & DCB_OUTPUT_C) >> 2;
 	uint8_t tmds04 = 0x80;
 
 	if (head != ramdac)
 		tmds04 = 0x88;
 
-	if (dcbent->type == OUTPUT_LVDS)
+	if (dcbent->type == DCB_OUTPUT_LVDS)
 		tmds04 |= 0x01;
 
 	nv_write_tmds(dev, dcbent->or, 0, 0x04, tmds04);
@@ -89,8 +92,7 @@ void nv04_dfp_bind_head(struct drm_device *dev, struct dcb_entry *dcbent,
 
 void nv04_dfp_disable(struct drm_device *dev, int head)
 {
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct nv04_crtc_reg *crtcstate = dev_priv->mode_reg.crtc_reg;
+	struct nv04_crtc_reg *crtcstate = nv04_display(dev)->mode_reg.crtc_reg;
 
 	if (NVReadRAMDAC(dev, head, NV_PRAMDAC_FP_TG_CONTROL) &
 	    FP_TG_CONTROL_ON) {
@@ -111,14 +113,13 @@ void nv04_dfp_disable(struct drm_device *dev, int head)
 void nv04_dfp_update_fp_control(struct drm_encoder *encoder, int mode)
 {
 	struct drm_device *dev = encoder->dev;
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
 	struct drm_crtc *crtc;
 	struct nouveau_crtc *nv_crtc;
 	uint32_t *fpc;
 
 	if (mode == DRM_MODE_DPMS_ON) {
 		nv_crtc = nouveau_crtc(encoder->crtc);
-		fpc = &dev_priv->mode_reg.crtc_reg[nv_crtc->index].fp_control;
+		fpc = &nv04_display(dev)->mode_reg.crtc_reg[nv_crtc->index].fp_control;
 
 		if (is_fpc_off(*fpc)) {
 			/* using saved value is ok, as (is_digital && dpms_on &&
@@ -133,7 +134,7 @@ void nv04_dfp_update_fp_control(struct drm_encoder *encoder, int mode)
 	} else {
 		list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
 			nv_crtc = nouveau_crtc(crtc);
-			fpc = &dev_priv->mode_reg.crtc_reg[nv_crtc->index].fp_control;
+			fpc = &nv04_display(dev)->mode_reg.crtc_reg[nv_crtc->index].fp_control;
 
 			nv_crtc->fp_users &= ~(1 << nouveau_encoder(encoder)->dcb->index);
 			if (!is_fpc_off(*fpc) && !nv_crtc->fp_users) {
@@ -151,10 +152,10 @@ void nv04_dfp_update_fp_control(struct drm_encoder *encoder, int mode)
 static struct drm_encoder *get_tmds_slave(struct drm_encoder *encoder)
 {
 	struct drm_device *dev = encoder->dev;
-	struct dcb_entry *dcb = nouveau_encoder(encoder)->dcb;
+	struct dcb_output *dcb = nouveau_encoder(encoder)->dcb;
 	struct drm_encoder *slave;
 
-	if (dcb->type != OUTPUT_TMDS || dcb->location == DCB_LOC_ON_CHIP)
+	if (dcb->type != DCB_OUTPUT_TMDS || dcb->location == DCB_LOC_ON_CHIP)
 		return NULL;
 
 	/* Some BIOSes (e.g. the one in a Quadro FX1000) report several
@@ -168,9 +169,9 @@ static struct drm_encoder *get_tmds_slave(struct drm_encoder *encoder)
 	 * let's do the same.
 	 */
 	list_for_each_entry(slave, &dev->mode_config.encoder_list, head) {
-		struct dcb_entry *slave_dcb = nouveau_encoder(slave)->dcb;
+		struct dcb_output *slave_dcb = nouveau_encoder(slave)->dcb;
 
-		if (slave_dcb->type == OUTPUT_TMDS && get_slave_funcs(slave) &&
+		if (slave_dcb->type == DCB_OUTPUT_TMDS && get_slave_funcs(slave) &&
 		    slave_dcb->tmdsconf.slave_addr == dcb->tmdsconf.slave_addr)
 			return slave;
 	}
@@ -202,9 +203,8 @@ static bool nv04_dfp_mode_fixup(struct drm_encoder *encoder,
 static void nv04_dfp_prepare_sel_clk(struct drm_device *dev,
 				     struct nouveau_encoder *nv_encoder, int head)
 {
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct nv04_mode_state *state = &dev_priv->mode_reg;
-	uint32_t bits1618 = nv_encoder->dcb->or & OUTPUT_A ? 0x10000 : 0x40000;
+	struct nv04_mode_state *state = &nv04_display(dev)->mode_reg;
+	uint32_t bits1618 = nv_encoder->dcb->or & DCB_OUTPUT_A ? 0x10000 : 0x40000;
 
 	if (nv_encoder->dcb->location != DCB_LOC_ON_CHIP)
 		return;
@@ -233,8 +233,8 @@ static void nv04_dfp_prepare_sel_clk(struct drm_device *dev,
 	 * 	and which bit-pair to use, is unclear on nv40 (for earlier cards, the fp table
 	 * 	entry has the necessary info)
 	 */
-	if (nv_encoder->dcb->type == OUTPUT_LVDS && dev_priv->saved_reg.sel_clk & 0xf0) {
-		int shift = (dev_priv->saved_reg.sel_clk & 0x50) ? 0 : 1;
+	if (nv_encoder->dcb->type == DCB_OUTPUT_LVDS && nv04_display(dev)->saved_reg.sel_clk & 0xf0) {
+		int shift = (nv04_display(dev)->saved_reg.sel_clk & 0x50) ? 0 : 1;
 
 		state->sel_clk &= ~0xf0;
 		state->sel_clk |= (head ? 0x40 : 0x10) << shift;
@@ -246,9 +246,8 @@ static void nv04_dfp_prepare(struct drm_encoder *encoder)
 	struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
 	struct drm_encoder_helper_funcs *helper = encoder->helper_private;
 	struct drm_device *dev = encoder->dev;
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
 	int head = nouveau_crtc(encoder->crtc)->index;
-	struct nv04_crtc_reg *crtcstate = dev_priv->mode_reg.crtc_reg;
+	struct nv04_crtc_reg *crtcstate = nv04_display(dev)->mode_reg.crtc_reg;
 	uint8_t *cr_lcd = &crtcstate[head].CRTC[NV_CIO_CRE_LCD__INDEX];
 	uint8_t *cr_lcd_oth = &crtcstate[head ^ 1].CRTC[NV_CIO_CRE_LCD__INDEX];
 
@@ -263,7 +262,7 @@ static void nv04_dfp_prepare(struct drm_encoder *encoder)
 			*cr_lcd |= head ? 0x0 : 0x8;
 		else {
 			*cr_lcd |= (nv_encoder->dcb->or << 4) & 0x30;
-			if (nv_encoder->dcb->type == OUTPUT_LVDS)
+			if (nv_encoder->dcb->type == DCB_OUTPUT_LVDS)
 				*cr_lcd |= 0x30;
 			if ((*cr_lcd & 0x30) == (*cr_lcd_oth & 0x30)) {
 				/* avoid being connected to both crtcs */
@@ -282,17 +281,18 @@ static void nv04_dfp_mode_set(struct drm_encoder *encoder,
 			      struct drm_display_mode *adjusted_mode)
 {
 	struct drm_device *dev = encoder->dev;
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_device *device = nouveau_dev(dev);
+	struct nouveau_drm *drm = nouveau_drm(dev);
 	struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc);
-	struct nv04_crtc_reg *regp = &dev_priv->mode_reg.crtc_reg[nv_crtc->index];
-	struct nv04_crtc_reg *savep = &dev_priv->saved_reg.crtc_reg[nv_crtc->index];
+	struct nv04_crtc_reg *regp = &nv04_display(dev)->mode_reg.crtc_reg[nv_crtc->index];
+	struct nv04_crtc_reg *savep = &nv04_display(dev)->saved_reg.crtc_reg[nv_crtc->index];
 	struct nouveau_connector *nv_connector = nouveau_crtc_connector_get(nv_crtc);
 	struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
 	struct drm_display_mode *output_mode = &nv_encoder->mode;
 	struct drm_connector *connector = &nv_connector->base;
 	uint32_t mode_ratio, panel_ratio;
 
-	NV_DEBUG_KMS(dev, "Output mode on CRTC %d:\n", nv_crtc->index);
+	NV_DEBUG(drm, "Output mode on CRTC %d:\n", nv_crtc->index);
 	drm_mode_debug_printmodeline(output_mode);
 
 	/* Initialize the FP registers in this CRTC. */
@@ -300,10 +300,10 @@ static void nv04_dfp_mode_set(struct drm_encoder *encoder,
 	regp->fp_horiz_regs[FP_TOTAL] = output_mode->htotal - 1;
 	if (!nv_gf4_disp_arch(dev) ||
 	    (output_mode->hsync_start - output_mode->hdisplay) >=
-					dev_priv->vbios.digital_min_front_porch)
+					drm->vbios.digital_min_front_porch)
 		regp->fp_horiz_regs[FP_CRTC] = output_mode->hdisplay;
 	else
-		regp->fp_horiz_regs[FP_CRTC] = output_mode->hsync_start - dev_priv->vbios.digital_min_front_porch - 1;
+		regp->fp_horiz_regs[FP_CRTC] = output_mode->hsync_start - drm->vbios.digital_min_front_porch - 1;
 	regp->fp_horiz_regs[FP_SYNC_START] = output_mode->hsync_start - 1;
 	regp->fp_horiz_regs[FP_SYNC_END] = output_mode->hsync_end - 1;
 	regp->fp_horiz_regs[FP_VALID_START] = output_mode->hskew;
@@ -335,12 +335,12 @@ static void nv04_dfp_mode_set(struct drm_encoder *encoder,
 		regp->fp_control |= NV_PRAMDAC_FP_TG_CONTROL_MODE_NATIVE;
 	else /* gpu needs to scale */
 		regp->fp_control |= NV_PRAMDAC_FP_TG_CONTROL_MODE_SCALE;
-	if (nvReadEXTDEV(dev, NV_PEXTDEV_BOOT_0) & NV_PEXTDEV_BOOT_0_STRAP_FP_IFACE_12BIT)
+	if (nv_rd32(device, NV_PEXTDEV_BOOT_0) & NV_PEXTDEV_BOOT_0_STRAP_FP_IFACE_12BIT)
 		regp->fp_control |= NV_PRAMDAC_FP_TG_CONTROL_WIDTH_12;
 	if (nv_encoder->dcb->location != DCB_LOC_ON_CHIP &&
 	    output_mode->clock > 165000)
 		regp->fp_control |= (2 << 24);
-	if (nv_encoder->dcb->type == OUTPUT_LVDS) {
+	if (nv_encoder->dcb->type == DCB_OUTPUT_LVDS) {
 		bool duallink = false, dummy;
 		if (nv_connector->edid &&
 		    nv_connector->type == DCB_CONNECTOR_LVDS_SPWG) {
@@ -416,7 +416,7 @@ static void nv04_dfp_mode_set(struct drm_encoder *encoder,
 	if ((nv_connector->dithering_mode == DITHERING_MODE_ON) ||
 	    (nv_connector->dithering_mode == DITHERING_MODE_AUTO &&
 	     encoder->crtc->fb->depth > connector->display_info.bpc * 3)) {
-		if (dev_priv->chipset == 0x11)
+		if (nv_device(drm->device)->chipset == 0x11)
 			regp->dither = savep->dither | 0x00010000;
 		else {
 			int i;
@@ -427,7 +427,7 @@ static void nv04_dfp_mode_set(struct drm_encoder *encoder,
 			}
 		}
 	} else {
-		if (dev_priv->chipset != 0x11) {
+		if (nv_device(drm->device)->chipset != 0x11) {
 			/* reset them */
 			int i;
 			for (i = 0; i < 3; i++) {
@@ -444,26 +444,26 @@ static void nv04_dfp_mode_set(struct drm_encoder *encoder,
 static void nv04_dfp_commit(struct drm_encoder *encoder)
 {
 	struct drm_device *dev = encoder->dev;
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_drm *drm = nouveau_drm(dev);
 	struct drm_encoder_helper_funcs *helper = encoder->helper_private;
 	struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc);
 	struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
-	struct dcb_entry *dcbe = nv_encoder->dcb;
+	struct dcb_output *dcbe = nv_encoder->dcb;
 	int head = nouveau_crtc(encoder->crtc)->index;
 	struct drm_encoder *slave_encoder;
 
-	if (dcbe->type == OUTPUT_TMDS)
+	if (dcbe->type == DCB_OUTPUT_TMDS)
 		run_tmds_table(dev, dcbe, head, nv_encoder->mode.clock);
-	else if (dcbe->type == OUTPUT_LVDS)
+	else if (dcbe->type == DCB_OUTPUT_LVDS)
 		call_lvds_script(dev, dcbe, head, LVDS_RESET, nv_encoder->mode.clock);
 
 	/* update fp_control state for any changes made by scripts,
 	 * so correct value is written at DPMS on */
-	dev_priv->mode_reg.crtc_reg[head].fp_control =
+	nv04_display(dev)->mode_reg.crtc_reg[head].fp_control =
 		NVReadRAMDAC(dev, head, NV_PRAMDAC_FP_TG_CONTROL);
 
 	/* This could use refinement for flatpanels, but it should work this way */
-	if (dev_priv->chipset < 0x44)
+	if (nv_device(drm->device)->chipset < 0x44)
 		NVWriteRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + nv04_dac_output_offset(encoder), 0xf0000000);
 	else
 		NVWriteRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + nv04_dac_output_offset(encoder), 0x00100000);
@@ -476,7 +476,7 @@ static void nv04_dfp_commit(struct drm_encoder *encoder)
 
 	helper->dpms(encoder, DRM_MODE_DPMS_ON);
 
-	NV_INFO(dev, "Output %s is running on CRTC %d using output %c\n",
+	NV_INFO(drm, "Output %s is running on CRTC %d using output %c\n",
 		drm_get_connector_name(&nouveau_encoder_connector_get(nv_encoder)->base),
 		nv_crtc->index, '@' + ffs(nv_encoder->dcb->or));
 }
@@ -485,6 +485,7 @@ static void nv04_dfp_update_backlight(struct drm_encoder *encoder, int mode)
 {
 #ifdef __powerpc__
 	struct drm_device *dev = encoder->dev;
+	struct nouveau_device *device = nouveau_dev(dev);
 
 	/* BIOS scripts usually take care of the backlight, thanks
 	 * Apple for your consistency.
@@ -492,11 +493,11 @@ static void nv04_dfp_update_backlight(struct drm_encoder *encoder, int mode)
 	if (dev->pci_device == 0x0179 || dev->pci_device == 0x0189 ||
 	    dev->pci_device == 0x0329) {
 		if (mode == DRM_MODE_DPMS_ON) {
-			nv_mask(dev, NV_PBUS_DEBUG_DUALHEAD_CTL, 0, 1 << 31);
-			nv_mask(dev, NV_PCRTC_GPIO_EXT, 3, 1);
+			nv_mask(device, NV_PBUS_DEBUG_DUALHEAD_CTL, 0, 1 << 31);
+			nv_mask(device, NV_PCRTC_GPIO_EXT, 3, 1);
 		} else {
-			nv_mask(dev, NV_PBUS_DEBUG_DUALHEAD_CTL, 1 << 31, 0);
-			nv_mask(dev, NV_PCRTC_GPIO_EXT, 3, 0);
+			nv_mask(device, NV_PBUS_DEBUG_DUALHEAD_CTL, 1 << 31, 0);
+			nv_mask(device, NV_PCRTC_GPIO_EXT, 3, 0);
 		}
 	}
 #endif
@@ -511,7 +512,7 @@ static void nv04_lvds_dpms(struct drm_encoder *encoder, int mode)
 {
 	struct drm_device *dev = encoder->dev;
 	struct drm_crtc *crtc = encoder->crtc;
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_drm *drm = nouveau_drm(dev);
 	struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
 	bool was_powersaving = is_powersaving_dpms(nv_encoder->last_dpms);
 
@@ -519,7 +520,7 @@ static void nv04_lvds_dpms(struct drm_encoder *encoder, int mode)
 		return;
 	nv_encoder->last_dpms = mode;
 
-	NV_INFO(dev, "Setting dpms mode %d on lvds encoder (output %d)\n",
+	NV_INFO(drm, "Setting dpms mode %d on lvds encoder (output %d)\n",
 		     mode, nv_encoder->dcb->index);
 
 	if (was_powersaving && is_powersaving_dpms(mode))
@@ -549,22 +550,22 @@ static void nv04_lvds_dpms(struct drm_encoder *encoder, int mode)
 	if (mode == DRM_MODE_DPMS_ON)
 		nv04_dfp_prepare_sel_clk(dev, nv_encoder, nouveau_crtc(crtc)->index);
 	else {
-		dev_priv->mode_reg.sel_clk = NVReadRAMDAC(dev, 0, NV_PRAMDAC_SEL_CLK);
-		dev_priv->mode_reg.sel_clk &= ~0xf0;
+		nv04_display(dev)->mode_reg.sel_clk = NVReadRAMDAC(dev, 0, NV_PRAMDAC_SEL_CLK);
+		nv04_display(dev)->mode_reg.sel_clk &= ~0xf0;
 	}
-	NVWriteRAMDAC(dev, 0, NV_PRAMDAC_SEL_CLK, dev_priv->mode_reg.sel_clk);
+	NVWriteRAMDAC(dev, 0, NV_PRAMDAC_SEL_CLK, nv04_display(dev)->mode_reg.sel_clk);
 }
 
 static void nv04_tmds_dpms(struct drm_encoder *encoder, int mode)
 {
-	struct drm_device *dev = encoder->dev;
+	struct nouveau_drm *drm = nouveau_drm(encoder->dev);
 	struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
 
 	if (nv_encoder->last_dpms == mode)
 		return;
 	nv_encoder->last_dpms = mode;
 
-	NV_INFO(dev, "Setting dpms mode %d on tmds encoder (output %d)\n",
+	NV_INFO(drm, "Setting dpms mode %d on tmds encoder (output %d)\n",
 		     mode, nv_encoder->dcb->index);
 
 	nv04_dfp_update_backlight(encoder, mode);
@@ -585,10 +586,9 @@ static void nv04_dfp_restore(struct drm_encoder *encoder)
 {
 	struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
 	struct drm_device *dev = encoder->dev;
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
 	int head = nv_encoder->restore.head;
 
-	if (nv_encoder->dcb->type == OUTPUT_LVDS) {
+	if (nv_encoder->dcb->type == DCB_OUTPUT_LVDS) {
 		struct nouveau_connector *connector =
 			nouveau_encoder_connector_get(nv_encoder);
 
@@ -597,9 +597,9 @@ static void nv04_dfp_restore(struct drm_encoder *encoder)
 					 LVDS_PANEL_ON,
 					 connector->native_mode->clock);
 
-	} else if (nv_encoder->dcb->type == OUTPUT_TMDS) {
+	} else if (nv_encoder->dcb->type == DCB_OUTPUT_TMDS) {
 		int clock = nouveau_hw_pllvals_to_clk
-					(&dev_priv->saved_reg.crtc_reg[head].pllvals);
+					(&nv04_display(dev)->saved_reg.crtc_reg[head].pllvals);
 
 		run_tmds_table(dev, nv_encoder->dcb, head, clock);
 	}
@@ -611,8 +611,6 @@ static void nv04_dfp_destroy(struct drm_encoder *encoder)
 {
 	struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
 
-	NV_DEBUG_KMS(encoder->dev, "\n");
-
 	if (get_slave_funcs(encoder))
 		get_slave_funcs(encoder)->destroy(encoder);
 
@@ -623,8 +621,10 @@ static void nv04_dfp_destroy(struct drm_encoder *encoder)
 static void nv04_tmds_slave_init(struct drm_encoder *encoder)
 {
 	struct drm_device *dev = encoder->dev;
-	struct dcb_entry *dcb = nouveau_encoder(encoder)->dcb;
-	struct nouveau_i2c_chan *i2c = nouveau_i2c_find(dev, 2);
+	struct dcb_output *dcb = nouveau_encoder(encoder)->dcb;
+	struct nouveau_drm *drm = nouveau_drm(dev);
+	struct nouveau_i2c *i2c = nouveau_i2c(drm->device);
+	struct nouveau_i2c_port *port = i2c->find(i2c, 2);
 	struct i2c_board_info info[] = {
 		{
 			.type = "sil164",
@@ -637,16 +637,16 @@ static void nv04_tmds_slave_init(struct drm_encoder *encoder)
 	};
 	int type;
 
-	if (!nv_gf4_disp_arch(dev) || !i2c ||
+	if (!nv_gf4_disp_arch(dev) || !port ||
 	    get_tmds_slave(encoder))
 		return;
 
-	type = nouveau_i2c_identify(dev, "TMDS transmitter", info, NULL, 2);
+	type = i2c->identify(i2c, 2, "TMDS transmitter", info, NULL);
 	if (type < 0)
 		return;
 
 	drm_i2c_encoder_init(dev, to_encoder_slave(encoder),
-			     &i2c->adapter, &info[type]);
+			     &port->adapter, &info[type]);
 }
 
 static const struct drm_encoder_helper_funcs nv04_lvds_helper_funcs = {
@@ -676,7 +676,7 @@ static const struct drm_encoder_funcs nv04_dfp_funcs = {
 };
 
 int
-nv04_dfp_create(struct drm_connector *connector, struct dcb_entry *entry)
+nv04_dfp_create(struct drm_connector *connector, struct dcb_output *entry)
 {
 	const struct drm_encoder_helper_funcs *helper;
 	struct nouveau_encoder *nv_encoder = NULL;
@@ -684,11 +684,11 @@ nv04_dfp_create(struct drm_connector *connector, struct dcb_entry *entry)
 	int type;
 
 	switch (entry->type) {
-	case OUTPUT_TMDS:
+	case DCB_OUTPUT_TMDS:
 		type = DRM_MODE_ENCODER_TMDS;
 		helper = &nv04_tmds_helper_funcs;
 		break;
-	case OUTPUT_LVDS:
+	case DCB_OUTPUT_LVDS:
 		type = DRM_MODE_ENCODER_LVDS;
 		helper = &nv04_lvds_helper_funcs;
 		break;
@@ -711,7 +711,7 @@ nv04_dfp_create(struct drm_connector *connector, struct dcb_entry *entry)
 	encoder->possible_crtcs = entry->heads;
 	encoder->possible_clones = 0;
 
-	if (entry->type == OUTPUT_TMDS &&
+	if (entry->type == DCB_OUTPUT_TMDS &&
 	    entry->location != DCB_LOC_ON_CHIP)
 		nv04_tmds_slave_init(encoder);
 
diff --git a/drivers/gpu/drm/nouveau/nv04_display.c b/drivers/gpu/drm/nouveau/nv04_display.c
index ea1e47a34ddf..846050f04c23 100644
--- a/drivers/gpu/drm/nouveau/nv04_display.c
+++ b/drivers/gpu/drm/nouveau/nv04_display.c
@@ -25,78 +25,15 @@
 #include <drm/drmP.h>
 #include <drm/drm_crtc_helper.h>
 
-#include "nouveau_drv.h"
-#include "nouveau_fb.h"
+#include "nouveau_drm.h"
+#include "nouveau_reg.h"
 #include "nouveau_hw.h"
 #include "nouveau_encoder.h"
 #include "nouveau_connector.h"
 
-static void nv04_vblank_crtc0_isr(struct drm_device *);
-static void nv04_vblank_crtc1_isr(struct drm_device *);
-
-static void
-nv04_display_store_initial_head_owner(struct drm_device *dev)
-{
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-
-	if (dev_priv->chipset != 0x11) {
-		dev_priv->crtc_owner = NVReadVgaCrtc(dev, 0, NV_CIO_CRE_44);
-		return;
-	}
-
-	/* reading CR44 is broken on nv11, so we attempt to infer it */
-	if (nvReadMC(dev, NV_PBUS_DEBUG_1) & (1 << 28))	/* heads tied, restore both */
-		dev_priv->crtc_owner = 0x4;
-	else {
-		uint8_t slaved_on_A, slaved_on_B;
-		bool tvA = false;
-		bool tvB = false;
-
-		slaved_on_B = NVReadVgaCrtc(dev, 1, NV_CIO_CRE_PIXEL_INDEX) &
-									0x80;
-		if (slaved_on_B)
-			tvB = !(NVReadVgaCrtc(dev, 1, NV_CIO_CRE_LCD__INDEX) &
-					MASK(NV_CIO_CRE_LCD_LCD_SELECT));
-
-		slaved_on_A = NVReadVgaCrtc(dev, 0, NV_CIO_CRE_PIXEL_INDEX) &
-									0x80;
-		if (slaved_on_A)
-			tvA = !(NVReadVgaCrtc(dev, 0, NV_CIO_CRE_LCD__INDEX) &
-					MASK(NV_CIO_CRE_LCD_LCD_SELECT));
-
-		if (slaved_on_A && !tvA)
-			dev_priv->crtc_owner = 0x0;
-		else if (slaved_on_B && !tvB)
-			dev_priv->crtc_owner = 0x3;
-		else if (slaved_on_A)
-			dev_priv->crtc_owner = 0x0;
-		else if (slaved_on_B)
-			dev_priv->crtc_owner = 0x3;
-		else
-			dev_priv->crtc_owner = 0x0;
-	}
-}
-
 int
 nv04_display_early_init(struct drm_device *dev)
 {
-	/* Make the I2C buses accessible. */
-	if (!nv_gf4_disp_arch(dev)) {
-		uint32_t pmc_enable = nv_rd32(dev, NV03_PMC_ENABLE);
-
-		if (!(pmc_enable & 1))
-			nv_wr32(dev, NV03_PMC_ENABLE, pmc_enable | 1);
-	}
-
-	/* Unlock the VGA CRTCs. */
-	NVLockVgaCrtcs(dev, false);
-
-	/* Make sure the CRTCs aren't in slaved mode. */
-	if (nv_two_heads(dev)) {
-		nv04_display_store_initial_head_owner(dev);
-		NVSetOwner(dev, 0);
-	}
-
 	/* ensure vblank interrupts are off, they can't be enabled until
 	 * drm_vblank has been initialised
 	 */
@@ -110,25 +47,29 @@ nv04_display_early_init(struct drm_device *dev)
 void
 nv04_display_late_takedown(struct drm_device *dev)
 {
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-
-	if (nv_two_heads(dev))
-		NVSetOwner(dev, dev_priv->crtc_owner);
-
-	NVLockVgaCrtcs(dev, true);
 }
 
 int
 nv04_display_create(struct drm_device *dev)
 {
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct dcb_table *dcb = &dev_priv->vbios.dcb;
+	struct nouveau_drm *drm = nouveau_drm(dev);
+	struct dcb_table *dcb = &drm->vbios.dcb;
 	struct drm_connector *connector, *ct;
 	struct drm_encoder *encoder;
 	struct drm_crtc *crtc;
+	struct nv04_display *disp;
 	int i, ret;
 
-	NV_DEBUG_KMS(dev, "\n");
+	NV_DEBUG(drm, "\n");
+
+	disp = kzalloc(sizeof(*disp), GFP_KERNEL);
+	if (!disp)
+		return -ENOMEM;
+
+	nouveau_display(dev)->priv = disp;
+	nouveau_display(dev)->dtor = nv04_display_destroy;
+	nouveau_display(dev)->init = nv04_display_init;
+	nouveau_display(dev)->fini = nv04_display_fini;
 
 	nouveau_hw_save_vga_fonts(dev, 1);
 
@@ -137,28 +78,28 @@ nv04_display_create(struct drm_device *dev)
 		nv04_crtc_create(dev, 1);
 
 	for (i = 0; i < dcb->entries; i++) {
-		struct dcb_entry *dcbent = &dcb->entry[i];
+		struct dcb_output *dcbent = &dcb->entry[i];
 
 		connector = nouveau_connector_create(dev, dcbent->connector);
 		if (IS_ERR(connector))
 			continue;
 
 		switch (dcbent->type) {
-		case OUTPUT_ANALOG:
+		case DCB_OUTPUT_ANALOG:
 			ret = nv04_dac_create(connector, dcbent);
 			break;
-		case OUTPUT_LVDS:
-		case OUTPUT_TMDS:
+		case DCB_OUTPUT_LVDS:
+		case DCB_OUTPUT_TMDS:
 			ret = nv04_dfp_create(connector, dcbent);
 			break;
-		case OUTPUT_TV:
+		case DCB_OUTPUT_TV:
 			if (dcbent->location == DCB_LOC_ON_CHIP)
 				ret = nv17_tv_create(connector, dcbent);
 			else
 				ret = nv04_tv_create(connector, dcbent);
 			break;
 		default:
-			NV_WARN(dev, "DCB type %d not known\n", dcbent->type);
+			NV_WARN(drm, "DCB type %d not known\n", dcbent->type);
 			continue;
 		}
 
@@ -169,7 +110,7 @@ nv04_display_create(struct drm_device *dev)
 	list_for_each_entry_safe(connector, ct,
 				 &dev->mode_config.connector_list, head) {
 		if (!connector->encoder_ids[0]) {
-			NV_WARN(dev, "%s has no encoders, removing\n",
+			NV_WARN(drm, "%s has no encoders, removing\n",
 				drm_get_connector_name(connector));
 			connector->funcs->destroy(connector);
 		}
@@ -185,21 +126,18 @@ nv04_display_create(struct drm_device *dev)
 		func->save(encoder);
 	}
 
-	nouveau_irq_register(dev, 24, nv04_vblank_crtc0_isr);
-	nouveau_irq_register(dev, 25, nv04_vblank_crtc1_isr);
 	return 0;
 }
 
 void
 nv04_display_destroy(struct drm_device *dev)
 {
+	struct nouveau_drm *drm = nouveau_drm(dev);
+	struct nv04_display *disp = nv04_display(dev);
 	struct drm_encoder *encoder;
 	struct drm_crtc *crtc;
 
-	NV_DEBUG_KMS(dev, "\n");
-
-	nouveau_irq_unregister(dev, 24);
-	nouveau_irq_unregister(dev, 25);
+	NV_DEBUG(drm, "\n");
 
 	/* Turn every CRTC off. */
 	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
@@ -221,6 +159,9 @@ nv04_display_destroy(struct drm_device *dev)
 		crtc->funcs->restore(crtc);
 
 	nouveau_hw_save_vga_fonts(dev, 0);
+
+	nouveau_display(dev)->priv = NULL;
+	kfree(disp);
 }
 
 int
@@ -257,17 +198,3 @@ nv04_display_fini(struct drm_device *dev)
 	if (nv_two_heads(dev))
 		NVWriteCRTC(dev, 1, NV_PCRTC_INTR_EN_0, 0);
 }
-
-static void
-nv04_vblank_crtc0_isr(struct drm_device *dev)
-{
-	nv_wr32(dev, NV_CRTC0_INTSTAT, NV_CRTC_INTR_VBLANK);
-	drm_handle_vblank(dev, 0);
-}
-
-static void
-nv04_vblank_crtc1_isr(struct drm_device *dev)
-{
-	nv_wr32(dev, NV_CRTC1_INTSTAT, NV_CRTC_INTR_VBLANK);
-	drm_handle_vblank(dev, 1);
-}
diff --git a/drivers/gpu/drm/nouveau/nv04_display.h b/drivers/gpu/drm/nouveau/nv04_display.h
new file mode 100644
index 000000000000..45322802e37d
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv04_display.h
@@ -0,0 +1,184 @@
+#ifndef __NV04_DISPLAY_H__
+#define __NV04_DISPLAY_H__
+
+#include <subdev/bios/pll.h>
+
+#include "nouveau_display.h"
+
+enum nv04_fp_display_regs {
+	FP_DISPLAY_END,
+	FP_TOTAL,
+	FP_CRTC,
+	FP_SYNC_START,
+	FP_SYNC_END,
+	FP_VALID_START,
+	FP_VALID_END
+};
+
+struct nv04_crtc_reg {
+	unsigned char MiscOutReg;
+	uint8_t CRTC[0xa0];
+	uint8_t CR58[0x10];
+	uint8_t Sequencer[5];
+	uint8_t Graphics[9];
+	uint8_t Attribute[21];
+	unsigned char DAC[768];
+
+	/* PCRTC regs */
+	uint32_t fb_start;
+	uint32_t crtc_cfg;
+	uint32_t cursor_cfg;
+	uint32_t gpio_ext;
+	uint32_t crtc_830;
+	uint32_t crtc_834;
+	uint32_t crtc_850;
+	uint32_t crtc_eng_ctrl;
+
+	/* PRAMDAC regs */
+	uint32_t nv10_cursync;
+	struct nouveau_pll_vals pllvals;
+	uint32_t ramdac_gen_ctrl;
+	uint32_t ramdac_630;
+	uint32_t ramdac_634;
+	uint32_t tv_setup;
+	uint32_t tv_vtotal;
+	uint32_t tv_vskew;
+	uint32_t tv_vsync_delay;
+	uint32_t tv_htotal;
+	uint32_t tv_hskew;
+	uint32_t tv_hsync_delay;
+	uint32_t tv_hsync_delay2;
+	uint32_t fp_horiz_regs[7];
+	uint32_t fp_vert_regs[7];
+	uint32_t dither;
+	uint32_t fp_control;
+	uint32_t dither_regs[6];
+	uint32_t fp_debug_0;
+	uint32_t fp_debug_1;
+	uint32_t fp_debug_2;
+	uint32_t fp_margin_color;
+	uint32_t ramdac_8c0;
+	uint32_t ramdac_a20;
+	uint32_t ramdac_a24;
+	uint32_t ramdac_a34;
+	uint32_t ctv_regs[38];
+};
+
+struct nv04_output_reg {
+	uint32_t output;
+	int head;
+};
+
+struct nv04_mode_state {
+	struct nv04_crtc_reg crtc_reg[2];
+	uint32_t pllsel;
+	uint32_t sel_clk;
+};
+
+struct nv04_display {
+	struct nv04_mode_state mode_reg;
+	struct nv04_mode_state saved_reg;
+	uint32_t saved_vga_font[4][16384];
+	uint32_t dac_users[4];
+};
+
+static inline struct nv04_display *
+nv04_display(struct drm_device *dev)
+{
+	return nouveau_display(dev)->priv;
+}
+
+/* nv04_display.c */
+int nv04_display_early_init(struct drm_device *);
+void nv04_display_late_takedown(struct drm_device *);
+int nv04_display_create(struct drm_device *);
+void nv04_display_destroy(struct drm_device *);
+int nv04_display_init(struct drm_device *);
+void nv04_display_fini(struct drm_device *);
+
+/* nv04_crtc.c */
+int nv04_crtc_create(struct drm_device *, int index);
+
+/* nv04_dac.c */
+int nv04_dac_create(struct drm_connector *, struct dcb_output *);
+uint32_t nv17_dac_sample_load(struct drm_encoder *encoder);
+int nv04_dac_output_offset(struct drm_encoder *encoder);
+void nv04_dac_update_dacclk(struct drm_encoder *encoder, bool enable);
+bool nv04_dac_in_use(struct drm_encoder *encoder);
+
+/* nv04_dfp.c */
+int nv04_dfp_create(struct drm_connector *, struct dcb_output *);
+int nv04_dfp_get_bound_head(struct drm_device *dev, struct dcb_output *dcbent);
+void nv04_dfp_bind_head(struct drm_device *dev, struct dcb_output *dcbent,
+			       int head, bool dl);
+void nv04_dfp_disable(struct drm_device *dev, int head);
+void nv04_dfp_update_fp_control(struct drm_encoder *encoder, int mode);
+
+/* nv04_tv.c */
+int nv04_tv_identify(struct drm_device *dev, int i2c_index);
+int nv04_tv_create(struct drm_connector *, struct dcb_output *);
+
+/* nv17_tv.c */
+int nv17_tv_create(struct drm_connector *, struct dcb_output *);
+
+static inline bool
+nv_two_heads(struct drm_device *dev)
+{
+	struct nouveau_drm *drm = nouveau_drm(dev);
+	const int impl = dev->pci_device & 0x0ff0;
+
+	if (nv_device(drm->device)->card_type >= NV_10 && impl != 0x0100 &&
+	    impl != 0x0150 && impl != 0x01a0 && impl != 0x0200)
+		return true;
+
+	return false;
+}
+
+static inline bool
+nv_gf4_disp_arch(struct drm_device *dev)
+{
+	return nv_two_heads(dev) && (dev->pci_device & 0x0ff0) != 0x0110;
+}
+
+static inline bool
+nv_two_reg_pll(struct drm_device *dev)
+{
+	struct nouveau_drm *drm = nouveau_drm(dev);
+	const int impl = dev->pci_device & 0x0ff0;
+
+	if (impl == 0x0310 || impl == 0x0340 || nv_device(drm->device)->card_type >= NV_40)
+		return true;
+	return false;
+}
+
+static inline bool
+nv_match_device(struct drm_device *dev, unsigned device,
+		unsigned sub_vendor, unsigned sub_device)
+{
+	return dev->pdev->device == device &&
+		dev->pdev->subsystem_vendor == sub_vendor &&
+		dev->pdev->subsystem_device == sub_device;
+}
+
+#include <subdev/bios.h>
+#include <subdev/bios/init.h>
+
+static inline void
+nouveau_bios_run_init_table(struct drm_device *dev, u16 table,
+			    struct dcb_output *outp, int crtc)
+{
+	struct nouveau_device *device = nouveau_dev(dev);
+	struct nouveau_bios *bios = nouveau_bios(device);
+	struct nvbios_init init = {
+		.subdev = nv_subdev(bios),
+		.bios = bios,
+		.offset = table,
+		.outp = outp,
+		.crtc = crtc,
+		.execute = 1,
+	};
+
+	nvbios_exec(&init);
+}
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/nv04_fb.c b/drivers/gpu/drm/nouveau/nv04_fb.c
deleted file mode 100644
index 375f5533c313..000000000000
--- a/drivers/gpu/drm/nouveau/nv04_fb.c
+++ /dev/null
@@ -1,54 +0,0 @@
-#include <drm/drmP.h>
-#include "nouveau_drv.h"
-#include <drm/nouveau_drm.h>
-
-int
-nv04_fb_vram_init(struct drm_device *dev)
-{
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	u32 boot0 = nv_rd32(dev, NV04_PFB_BOOT_0);
-
-	if (boot0 & 0x00000100) {
-		dev_priv->vram_size  = ((boot0 >> 12) & 0xf) * 2 + 2;
-		dev_priv->vram_size *= 1024 * 1024;
-	} else {
-		switch (boot0 & NV04_PFB_BOOT_0_RAM_AMOUNT) {
-		case NV04_PFB_BOOT_0_RAM_AMOUNT_32MB:
-			dev_priv->vram_size = 32 * 1024 * 1024;
-			break;
-		case NV04_PFB_BOOT_0_RAM_AMOUNT_16MB:
-			dev_priv->vram_size = 16 * 1024 * 1024;
-			break;
-		case NV04_PFB_BOOT_0_RAM_AMOUNT_8MB:
-			dev_priv->vram_size = 8 * 1024 * 1024;
-			break;
-		case NV04_PFB_BOOT_0_RAM_AMOUNT_4MB:
-			dev_priv->vram_size = 4 * 1024 * 1024;
-			break;
-		}
-	}
-
-	if ((boot0 & 0x00000038) <= 0x10)
-		dev_priv->vram_type = NV_MEM_TYPE_SGRAM;
-	else
-		dev_priv->vram_type = NV_MEM_TYPE_SDRAM;
-
-	return 0;
-}
-
-int
-nv04_fb_init(struct drm_device *dev)
-{
-	/* This is what the DDX did for NV_ARCH_04, but a mmio-trace shows
-	 * nvidia reading PFB_CFG_0, then writing back its original value.
-	 * (which was 0x701114 in this case)
-	 */
-
-	nv_wr32(dev, NV04_PFB_CFG0, 0x1114);
-	return 0;
-}
-
-void
-nv04_fb_takedown(struct drm_device *dev)
-{
-}
diff --git a/drivers/gpu/drm/nouveau/nv04_fbcon.c b/drivers/gpu/drm/nouveau/nv04_fbcon.c
index fc53a3922bce..77dcc9c50777 100644
--- a/drivers/gpu/drm/nouveau/nv04_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nv04_fbcon.c
@@ -22,19 +22,18 @@
  * DEALINGS IN THE SOFTWARE.
  */
 
-#include <drm/drmP.h>
-#include "nouveau_drv.h"
+#include <core/object.h>
+
+#include "nouveau_drm.h"
 #include "nouveau_dma.h"
-#include "nouveau_ramht.h"
 #include "nouveau_fbcon.h"
 
 int
 nv04_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region)
 {
 	struct nouveau_fbdev *nfbdev = info->par;
-	struct drm_device *dev = nfbdev->dev;
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct nouveau_channel *chan = dev_priv->channel;
+	struct nouveau_drm *drm = nouveau_drm(nfbdev->dev);
+	struct nouveau_channel *chan = drm->channel;
 	int ret;
 
 	ret = RING_SPACE(chan, 4);
@@ -53,9 +52,8 @@ int
 nv04_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
 {
 	struct nouveau_fbdev *nfbdev = info->par;
-	struct drm_device *dev = nfbdev->dev;
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct nouveau_channel *chan = dev_priv->channel;
+	struct nouveau_drm *drm = nouveau_drm(nfbdev->dev);
+	struct nouveau_channel *chan = drm->channel;
 	int ret;
 
 	ret = RING_SPACE(chan, 7);
@@ -81,9 +79,8 @@ int
 nv04_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
 {
 	struct nouveau_fbdev *nfbdev = info->par;
-	struct drm_device *dev = nfbdev->dev;
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct nouveau_channel *chan = dev_priv->channel;
+	struct nouveau_drm *drm = nouveau_drm(nfbdev->dev);
+	struct nouveau_channel *chan = drm->channel;
 	uint32_t fg;
 	uint32_t bg;
 	uint32_t dsize;
@@ -142,9 +139,10 @@ nv04_fbcon_accel_init(struct fb_info *info)
 {
 	struct nouveau_fbdev *nfbdev = info->par;
 	struct drm_device *dev = nfbdev->dev;
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct nouveau_channel *chan = dev_priv->channel;
-	const int sub = NvSubCtxSurf2D;
+	struct nouveau_drm *drm = nouveau_drm(dev);
+	struct nouveau_channel *chan = drm->channel;
+	struct nouveau_device *device = nv_device(drm->device);
+	struct nouveau_object *object;
 	int surface_fmt, pattern_fmt, rect_fmt;
 	int ret;
 
@@ -176,31 +174,35 @@ nv04_fbcon_accel_init(struct fb_info *info)
 		return -EINVAL;
 	}
 
-	ret = nouveau_gpuobj_gr_new(chan, NvCtxSurf2D,
-				    dev_priv->card_type >= NV_10 ?
-				    0x0062 : 0x0042);
+	ret = nouveau_object_new(nv_object(chan->cli), NVDRM_CHAN, NvCtxSurf2D,
+				 device->card_type >= NV_10 ? 0x0062 : 0x0042,
+				 NULL, 0, &object);
 	if (ret)
 		return ret;
 
-	ret = nouveau_gpuobj_gr_new(chan, NvClipRect, 0x0019);
+	ret = nouveau_object_new(nv_object(chan->cli), NVDRM_CHAN, NvClipRect,
+				 0x0019, NULL, 0, &object);
 	if (ret)
 		return ret;
 
-	ret = nouveau_gpuobj_gr_new(chan, NvRop, 0x0043);
+	ret = nouveau_object_new(nv_object(chan->cli), NVDRM_CHAN, NvRop,
+				 0x0043, NULL, 0, &object);
 	if (ret)
 		return ret;
 
-	ret = nouveau_gpuobj_gr_new(chan, NvImagePatt, 0x0044);
+	ret = nouveau_object_new(nv_object(chan->cli), NVDRM_CHAN, NvImagePatt,
+				 0x0044, NULL, 0, &object);
 	if (ret)
 		return ret;
 
-	ret = nouveau_gpuobj_gr_new(chan, NvGdiRect, 0x004a);
+	ret = nouveau_object_new(nv_object(chan->cli), NVDRM_CHAN, NvGdiRect,
+				 0x004a, NULL, 0, &object);
 	if (ret)
 		return ret;
 
-	ret = nouveau_gpuobj_gr_new(chan, NvImageBlit,
-				    dev_priv->chipset >= 0x11 ?
-				    0x009f : 0x005f);
+	ret = nouveau_object_new(nv_object(chan->cli), NVDRM_CHAN, NvImageBlit,
+				 device->chipset >= 0x11 ? 0x009f : 0x005f,
+				 NULL, 0, &object);
 	if (ret)
 		return ret;
 
@@ -209,25 +211,25 @@ nv04_fbcon_accel_init(struct fb_info *info)
 		return 0;
 	}
 
-	BEGIN_NV04(chan, sub, 0x0000, 1);
+	BEGIN_NV04(chan, NvSubCtxSurf2D, 0x0000, 1);
 	OUT_RING(chan, NvCtxSurf2D);
-	BEGIN_NV04(chan, sub, 0x0184, 2);
+	BEGIN_NV04(chan, NvSubCtxSurf2D, 0x0184, 2);
 	OUT_RING(chan, NvDmaFB);
 	OUT_RING(chan, NvDmaFB);
-	BEGIN_NV04(chan, sub, 0x0300, 4);
+	BEGIN_NV04(chan, NvSubCtxSurf2D, 0x0300, 4);
 	OUT_RING(chan, surface_fmt);
 	OUT_RING(chan, info->fix.line_length | (info->fix.line_length << 16));
 	OUT_RING(chan, info->fix.smem_start - dev->mode_config.fb_base);
 	OUT_RING(chan, info->fix.smem_start - dev->mode_config.fb_base);
 
-	BEGIN_NV04(chan, sub, 0x0000, 1);
+	BEGIN_NV04(chan, NvSubCtxSurf2D, 0x0000, 1);
 	OUT_RING(chan, NvRop);
-	BEGIN_NV04(chan, sub, 0x0300, 1);
+	BEGIN_NV04(chan, NvSubCtxSurf2D, 0x0300, 1);
 	OUT_RING(chan, 0x55);
 
-	BEGIN_NV04(chan, sub, 0x0000, 1);
+	BEGIN_NV04(chan, NvSubCtxSurf2D, 0x0000, 1);
 	OUT_RING(chan, NvImagePatt);
-	BEGIN_NV04(chan, sub, 0x0300, 8);
+	BEGIN_NV04(chan, NvSubCtxSurf2D, 0x0300, 8);
 	OUT_RING(chan, pattern_fmt);
 #ifdef __BIG_ENDIAN
 	OUT_RING(chan, 2);
@@ -241,9 +243,9 @@ nv04_fbcon_accel_init(struct fb_info *info)
 	OUT_RING(chan, ~0);
 	OUT_RING(chan, ~0);
 
-	BEGIN_NV04(chan, sub, 0x0000, 1);
+	BEGIN_NV04(chan, NvSubCtxSurf2D, 0x0000, 1);
 	OUT_RING(chan, NvClipRect);
-	BEGIN_NV04(chan, sub, 0x0300, 2);
+	BEGIN_NV04(chan, NvSubCtxSurf2D, 0x0300, 2);
 	OUT_RING(chan, 0);
 	OUT_RING(chan, (info->var.yres_virtual << 16) | info->var.xres_virtual);
 
diff --git a/drivers/gpu/drm/nouveau/nv04_fence.c b/drivers/gpu/drm/nouveau/nv04_fence.c
index aa6859270662..a220b94ba9f2 100644
--- a/drivers/gpu/drm/nouveau/nv04_fence.c
+++ b/drivers/gpu/drm/nouveau/nv04_fence.c
@@ -22,15 +22,14 @@
  * Authors: Ben Skeggs
  */
 
-#include <drm/drmP.h>
-#include "nouveau_drv.h"
+#include <engine/fifo.h>
+
+#include "nouveau_drm.h"
 #include "nouveau_dma.h"
-#include "nouveau_ramht.h"
 #include "nouveau_fence.h"
 
 struct nv04_fence_chan {
 	struct nouveau_fence_chan base;
-	atomic_t sequence;
 };
 
 struct nv04_fence_priv {
@@ -57,84 +56,56 @@ nv04_fence_sync(struct nouveau_fence *fence,
 	return -ENODEV;
 }
 
-int
-nv04_fence_mthd(struct nouveau_channel *chan, u32 class, u32 mthd, u32 data)
-{
-	struct nv04_fence_chan *fctx = chan->engctx[NVOBJ_ENGINE_FENCE];
-	atomic_set(&fctx->sequence, data);
-	return 0;
-}
-
 static u32
 nv04_fence_read(struct nouveau_channel *chan)
 {
-	struct nv04_fence_chan *fctx = chan->engctx[NVOBJ_ENGINE_FENCE];
-	return atomic_read(&fctx->sequence);
+	struct nouveau_fifo_chan *fifo = (void *)chan->object;
+	return atomic_read(&fifo->refcnt);
 }
 
 static void
-nv04_fence_context_del(struct nouveau_channel *chan, int engine)
+nv04_fence_context_del(struct nouveau_channel *chan)
 {
-	struct nv04_fence_chan *fctx = chan->engctx[engine];
+	struct nv04_fence_chan *fctx = chan->fence;
 	nouveau_fence_context_del(&fctx->base);
-	chan->engctx[engine] = NULL;
+	chan->fence = NULL;
 	kfree(fctx);
 }
 
 static int
-nv04_fence_context_new(struct nouveau_channel *chan, int engine)
+nv04_fence_context_new(struct nouveau_channel *chan)
 {
 	struct nv04_fence_chan *fctx = kzalloc(sizeof(*fctx), GFP_KERNEL);
 	if (fctx) {
 		nouveau_fence_context_new(&fctx->base);
-		atomic_set(&fctx->sequence, 0);
-		chan->engctx[engine] = fctx;
+		chan->fence = fctx;
 		return 0;
 	}
 	return -ENOMEM;
 }
 
-static int
-nv04_fence_fini(struct drm_device *dev, int engine, bool suspend)
-{
-	return 0;
-}
-
-static int
-nv04_fence_init(struct drm_device *dev, int engine)
-{
-	return 0;
-}
-
 static void
-nv04_fence_destroy(struct drm_device *dev, int engine)
+nv04_fence_destroy(struct nouveau_drm *drm)
 {
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct nv04_fence_priv *priv = nv_engine(dev, engine);
-
-	dev_priv->eng[engine] = NULL;
+	struct nv04_fence_priv *priv = drm->fence;
+	drm->fence = NULL;
 	kfree(priv);
 }
 
 int
-nv04_fence_create(struct drm_device *dev)
+nv04_fence_create(struct nouveau_drm *drm)
 {
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
 	struct nv04_fence_priv *priv;
-	int ret;
 
-	priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+	priv = drm->fence = kzalloc(sizeof(*priv), GFP_KERNEL);
 	if (!priv)
 		return -ENOMEM;
 
-	priv->base.engine.destroy = nv04_fence_destroy;
-	priv->base.engine.init = nv04_fence_init;
-	priv->base.engine.fini = nv04_fence_fini;
-	priv->base.engine.context_new = nv04_fence_context_new;
-	priv->base.engine.context_del = nv04_fence_context_del;
+	priv->base.dtor = nv04_fence_destroy;
+	priv->base.context_new = nv04_fence_context_new;
+	priv->base.context_del = nv04_fence_context_del;
 	priv->base.emit = nv04_fence_emit;
 	priv->base.sync = nv04_fence_sync;
 	priv->base.read = nv04_fence_read;
-	dev_priv->eng[NVOBJ_ENGINE_FENCE] = &priv->base.engine;
-	return ret;
+	return 0;
 }
diff --git a/drivers/gpu/drm/nouveau/nv04_fifo.c b/drivers/gpu/drm/nouveau/nv04_fifo.c
deleted file mode 100644
index 65f966deeee6..000000000000
--- a/drivers/gpu/drm/nouveau/nv04_fifo.c
+++ /dev/null
@@ -1,505 +0,0 @@
-/*
- * Copyright (C) 2012 Ben Skeggs.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining
- * a copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sublicense, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial
- * portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
- * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
- * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
- * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
- * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-
-#include <drm/drmP.h>
-#include "nouveau_drv.h"
-#include "nouveau_fifo.h"
-#include "nouveau_util.h"
-#include "nouveau_ramht.h"
-#include "nouveau_software.h"
-
-static struct ramfc_desc {
-	unsigned bits:6;
-	unsigned ctxs:5;
-	unsigned ctxp:8;
-	unsigned regs:5;
-	unsigned regp;
-} nv04_ramfc[] = {
-	{ 32,  0, 0x00,  0, NV04_PFIFO_CACHE1_DMA_PUT },
-	{ 32,  0, 0x04,  0, NV04_PFIFO_CACHE1_DMA_GET },
-	{ 16,  0, 0x08,  0, NV04_PFIFO_CACHE1_DMA_INSTANCE },
-	{ 16, 16, 0x08,  0, NV04_PFIFO_CACHE1_DMA_DCOUNT },
-	{ 32,  0, 0x0c,  0, NV04_PFIFO_CACHE1_DMA_STATE },
-	{ 32,  0, 0x10,  0, NV04_PFIFO_CACHE1_DMA_FETCH },
-	{ 32,  0, 0x14,  0, NV04_PFIFO_CACHE1_ENGINE },
-	{ 32,  0, 0x18,  0, NV04_PFIFO_CACHE1_PULL1 },
-	{}
-};
-
-struct nv04_fifo_priv {
-	struct nouveau_fifo_priv base;
-	struct ramfc_desc *ramfc_desc;
-};
-
-struct nv04_fifo_chan {
-	struct nouveau_fifo_chan base;
-	struct nouveau_gpuobj *ramfc;
-};
-
-bool
-nv04_fifo_cache_pull(struct drm_device *dev, bool enable)
-{
-	int pull = nv_mask(dev, NV04_PFIFO_CACHE1_PULL0, 1, enable);
-
-	if (!enable) {
-		/* In some cases the PFIFO puller may be left in an
-		 * inconsistent state if you try to stop it when it's
-		 * busy translating handles. Sometimes you get a
-		 * PFIFO_CACHE_ERROR, sometimes it just fails silently
-		 * sending incorrect instance offsets to PGRAPH after
-		 * it's started up again. To avoid the latter we
-		 * invalidate the most recently calculated instance.
-		 */
-		if (!nv_wait(dev, NV04_PFIFO_CACHE1_PULL0,
-				  NV04_PFIFO_CACHE1_PULL0_HASH_BUSY, 0))
-			NV_ERROR(dev, "Timeout idling the PFIFO puller.\n");
-
-		if (nv_rd32(dev, NV04_PFIFO_CACHE1_PULL0) &
-				 NV04_PFIFO_CACHE1_PULL0_HASH_FAILED)
-			nv_wr32(dev, NV03_PFIFO_INTR_0,
-				     NV_PFIFO_INTR_CACHE_ERROR);
-
-		nv_wr32(dev, NV04_PFIFO_CACHE1_HASH, 0);
-	}
-
-	return pull & 1;
-}
-
-static int
-nv04_fifo_context_new(struct nouveau_channel *chan, int engine)
-{
-	struct drm_device *dev = chan->dev;
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct nv04_fifo_priv *priv = nv_engine(dev, engine);
-	struct nv04_fifo_chan *fctx;
-	unsigned long flags;
-	int ret;
-
-	fctx = chan->engctx[engine] = kzalloc(sizeof(*fctx), GFP_KERNEL);
-	if (!fctx)
-		return -ENOMEM;
-
-	/* map channel control registers */
-	chan->user = ioremap(pci_resource_start(dev->pdev, 0) +
-			     NV03_USER(chan->id), PAGE_SIZE);
-	if (!chan->user) {
-		ret = -ENOMEM;
-		goto error;
-	}
-
-	/* initialise default fifo context */
-	ret = nouveau_gpuobj_new_fake(dev, dev_priv->ramfc->pinst +
-				      chan->id * 32, ~0, 32,
-				      NVOBJ_FLAG_ZERO_FREE, &fctx->ramfc);
-	if (ret)
-		goto error;
-
-	nv_wo32(fctx->ramfc, 0x00, chan->pushbuf_base);
-	nv_wo32(fctx->ramfc, 0x04, chan->pushbuf_base);
-	nv_wo32(fctx->ramfc, 0x08, chan->pushbuf->pinst >> 4);
-	nv_wo32(fctx->ramfc, 0x0c, 0x00000000);
-	nv_wo32(fctx->ramfc, 0x10, NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES |
-				   NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES |
-#ifdef __BIG_ENDIAN
-				   NV_PFIFO_CACHE1_BIG_ENDIAN |
-#endif
-				   NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8);
-	nv_wo32(fctx->ramfc, 0x14, 0x00000000);
-	nv_wo32(fctx->ramfc, 0x18, 0x00000000);
-	nv_wo32(fctx->ramfc, 0x1c, 0x00000000);
-
-	/* enable dma mode on the channel */
-	spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
-	nv_mask(dev, NV04_PFIFO_MODE, (1 << chan->id), (1 << chan->id));
-	spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
-
-error:
-	if (ret)
-		priv->base.base.context_del(chan, engine);
-	return ret;
-}
-
-void
-nv04_fifo_context_del(struct nouveau_channel *chan, int engine)
-{
-	struct drm_device *dev = chan->dev;
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct nv04_fifo_priv *priv = nv_engine(chan->dev, engine);
-	struct nv04_fifo_chan *fctx = chan->engctx[engine];
-	struct ramfc_desc *c = priv->ramfc_desc;
-	unsigned long flags;
-	int chid;
-
-	/* prevent fifo context switches */
-	spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
-	nv_wr32(dev, NV03_PFIFO_CACHES, 0);
-
-	/* if this channel is active, replace it with a null context */
-	chid = nv_rd32(dev, NV03_PFIFO_CACHE1_PUSH1) & priv->base.channels;
-	if (chid == chan->id) {
-		nv_mask(dev, NV04_PFIFO_CACHE1_DMA_PUSH, 0x00000001, 0);
-		nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH0, 0);
-		nv_mask(dev, NV04_PFIFO_CACHE1_PULL0, 0x00000001, 0);
-
-		do {
-			u32 mask = ((1ULL << c->bits) - 1) << c->regs;
-			nv_mask(dev, c->regp, mask, 0x00000000);
-		} while ((++c)->bits);
-
-		nv_wr32(dev, NV03_PFIFO_CACHE1_GET, 0);
-		nv_wr32(dev, NV03_PFIFO_CACHE1_PUT, 0);
-		nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH1, priv->base.channels);
-		nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH0, 1);
-		nv_wr32(dev, NV04_PFIFO_CACHE1_PULL0, 1);
-	}
-
-	/* restore normal operation, after disabling dma mode */
-	nv_mask(dev, NV04_PFIFO_MODE, 1 << chan->id, 0);
-	nv_wr32(dev, NV03_PFIFO_CACHES, 1);
-	spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
-
-	/* clean up */
-	nouveau_gpuobj_ref(NULL, &fctx->ramfc);
-	nouveau_gpuobj_ref(NULL, &chan->ramfc); /*XXX: nv40 */
-	if (chan->user) {
-		iounmap(chan->user);
-		chan->user = NULL;
-	}
-}
-
-int
-nv04_fifo_init(struct drm_device *dev, int engine)
-{
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct nv04_fifo_priv *priv = nv_engine(dev, engine);
-	int i;
-
-	nv_mask(dev, NV03_PMC_ENABLE, NV_PMC_ENABLE_PFIFO, 0);
-	nv_mask(dev, NV03_PMC_ENABLE, NV_PMC_ENABLE_PFIFO, NV_PMC_ENABLE_PFIFO);
-
-	nv_wr32(dev, NV04_PFIFO_DELAY_0, 0x000000ff);
-	nv_wr32(dev, NV04_PFIFO_DMA_TIMESLICE, 0x0101ffff);
-
-	nv_wr32(dev, NV03_PFIFO_RAMHT, (0x03 << 24) /* search 128 */ |
-				       ((dev_priv->ramht->bits - 9) << 16) |
-				       (dev_priv->ramht->gpuobj->pinst >> 8));
-	nv_wr32(dev, NV03_PFIFO_RAMRO, dev_priv->ramro->pinst >> 8);
-	nv_wr32(dev, NV03_PFIFO_RAMFC, dev_priv->ramfc->pinst >> 8);
-
-	nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH1, priv->base.channels);
-
-	nv_wr32(dev, NV03_PFIFO_INTR_0, 0xffffffff);
-	nv_wr32(dev, NV03_PFIFO_INTR_EN_0, 0xffffffff);
-
-	nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH0, 1);
-	nv_wr32(dev, NV04_PFIFO_CACHE1_PULL0, 1);
-	nv_wr32(dev, NV03_PFIFO_CACHES, 1);
-
-	for (i = 0; i < priv->base.channels; i++) {
-		if (dev_priv->channels.ptr[i])
-			nv_mask(dev, NV04_PFIFO_MODE, (1 << i), (1 << i));
-	}
-
-	return 0;
-}
-
-int
-nv04_fifo_fini(struct drm_device *dev, int engine, bool suspend)
-{
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct nv04_fifo_priv *priv = nv_engine(dev, engine);
-	struct nouveau_channel *chan;
-	int chid;
-
-	/* prevent context switches and halt fifo operation */
-	nv_wr32(dev, NV03_PFIFO_CACHES, 0);
-	nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_PUSH, 0);
-	nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH0, 0);
-	nv_wr32(dev, NV04_PFIFO_CACHE1_PULL0, 0);
-
-	/* store current fifo context in ramfc */
-	chid = nv_rd32(dev, NV03_PFIFO_CACHE1_PUSH1) & priv->base.channels;
-	chan = dev_priv->channels.ptr[chid];
-	if (suspend && chid != priv->base.channels && chan) {
-		struct nv04_fifo_chan *fctx = chan->engctx[engine];
-		struct nouveau_gpuobj *ctx = fctx->ramfc;
-		struct ramfc_desc *c = priv->ramfc_desc;
-		do {
-			u32 rm = ((1ULL << c->bits) - 1) << c->regs;
-			u32 cm = ((1ULL << c->bits) - 1) << c->ctxs;
-			u32 rv = (nv_rd32(dev, c->regp) &  rm) >> c->regs;
-			u32 cv = (nv_ro32(ctx, c->ctxp) & ~cm);
-			nv_wo32(ctx, c->ctxp, cv | (rv << c->ctxs));
-		} while ((++c)->bits);
-	}
-
-	nv_wr32(dev, NV03_PFIFO_INTR_EN_0, 0x00000000);
-	return 0;
-}
-
-static bool
-nouveau_fifo_swmthd(struct drm_device *dev, u32 chid, u32 addr, u32 data)
-{
-	struct nouveau_fifo_priv *pfifo = nv_engine(dev, NVOBJ_ENGINE_FIFO);
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct nouveau_channel *chan = NULL;
-	struct nouveau_gpuobj *obj;
-	unsigned long flags;
-	const int subc = (addr >> 13) & 0x7;
-	const int mthd = addr & 0x1ffc;
-	bool handled = false;
-	u32 engine;
-
-	spin_lock_irqsave(&dev_priv->channels.lock, flags);
-	if (likely(chid >= 0 && chid < pfifo->channels))
-		chan = dev_priv->channels.ptr[chid];
-	if (unlikely(!chan))
-		goto out;
-
-	switch (mthd) {
-	case 0x0000: /* bind object to subchannel */
-		obj = nouveau_ramht_find(chan, data);
-		if (unlikely(!obj || obj->engine != NVOBJ_ENGINE_SW))
-			break;
-
-		engine = 0x0000000f << (subc * 4);
-
-		nv_mask(dev, NV04_PFIFO_CACHE1_ENGINE, engine, 0x00000000);
-		handled = true;
-		break;
-	default:
-		engine = nv_rd32(dev, NV04_PFIFO_CACHE1_ENGINE);
-		if (unlikely(((engine >> (subc * 4)) & 0xf) != 0))
-			break;
-
-		if (!nouveau_gpuobj_mthd_call(chan, nouveau_software_class(dev),
-					      mthd, data))
-			handled = true;
-		break;
-	}
-
-out:
-	spin_unlock_irqrestore(&dev_priv->channels.lock, flags);
-	return handled;
-}
-
-static const char *nv_dma_state_err(u32 state)
-{
-	static const char * const desc[] = {
-		"NONE", "CALL_SUBR_ACTIVE", "INVALID_MTHD", "RET_SUBR_INACTIVE",
-		"INVALID_CMD", "IB_EMPTY"/* NV50+ */, "MEM_FAULT", "UNK"
-	};
-	return desc[(state >> 29) & 0x7];
-}
-
-void
-nv04_fifo_isr(struct drm_device *dev)
-{
-	struct nouveau_fifo_priv *pfifo = nv_engine(dev, NVOBJ_ENGINE_FIFO);
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	uint32_t status, reassign;
-	int cnt = 0;
-
-	reassign = nv_rd32(dev, NV03_PFIFO_CACHES) & 1;
-	while ((status = nv_rd32(dev, NV03_PFIFO_INTR_0)) && (cnt++ < 100)) {
-		uint32_t chid, get;
-
-		nv_wr32(dev, NV03_PFIFO_CACHES, 0);
-
-		chid = nv_rd32(dev, NV03_PFIFO_CACHE1_PUSH1) & pfifo->channels;
-		get  = nv_rd32(dev, NV03_PFIFO_CACHE1_GET);
-
-		if (status & NV_PFIFO_INTR_CACHE_ERROR) {
-			uint32_t mthd, data;
-			int ptr;
-
-			/* NV_PFIFO_CACHE1_GET actually goes to 0xffc before
-			 * wrapping on my G80 chips, but CACHE1 isn't big
-			 * enough for this much data.. Tests show that it
-			 * wraps around to the start at GET=0x800.. No clue
-			 * as to why..
-			 */
-			ptr = (get & 0x7ff) >> 2;
-
-			if (dev_priv->card_type < NV_40) {
-				mthd = nv_rd32(dev,
-					NV04_PFIFO_CACHE1_METHOD(ptr));
-				data = nv_rd32(dev,
-					NV04_PFIFO_CACHE1_DATA(ptr));
-			} else {
-				mthd = nv_rd32(dev,
-					NV40_PFIFO_CACHE1_METHOD(ptr));
-				data = nv_rd32(dev,
-					NV40_PFIFO_CACHE1_DATA(ptr));
-			}
-
-			if (!nouveau_fifo_swmthd(dev, chid, mthd, data)) {
-				NV_INFO(dev, "PFIFO_CACHE_ERROR - Ch %d/%d "
-					     "Mthd 0x%04x Data 0x%08x\n",
-					chid, (mthd >> 13) & 7, mthd & 0x1ffc,
-					data);
-			}
-
-			nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_PUSH, 0);
-			nv_wr32(dev, NV03_PFIFO_INTR_0,
-						NV_PFIFO_INTR_CACHE_ERROR);
-
-			nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH0,
-				nv_rd32(dev, NV03_PFIFO_CACHE1_PUSH0) & ~1);
-			nv_wr32(dev, NV03_PFIFO_CACHE1_GET, get + 4);
-			nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH0,
-				nv_rd32(dev, NV03_PFIFO_CACHE1_PUSH0) | 1);
-			nv_wr32(dev, NV04_PFIFO_CACHE1_HASH, 0);
-
-			nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_PUSH,
-				nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_PUSH) | 1);
-			nv_wr32(dev, NV04_PFIFO_CACHE1_PULL0, 1);
-
-			status &= ~NV_PFIFO_INTR_CACHE_ERROR;
-		}
-
-		if (status & NV_PFIFO_INTR_DMA_PUSHER) {
-			u32 dma_get = nv_rd32(dev, 0x003244);
-			u32 dma_put = nv_rd32(dev, 0x003240);
-			u32 push = nv_rd32(dev, 0x003220);
-			u32 state = nv_rd32(dev, 0x003228);
-
-			if (dev_priv->card_type == NV_50) {
-				u32 ho_get = nv_rd32(dev, 0x003328);
-				u32 ho_put = nv_rd32(dev, 0x003320);
-				u32 ib_get = nv_rd32(dev, 0x003334);
-				u32 ib_put = nv_rd32(dev, 0x003330);
-
-				if (nouveau_ratelimit())
-					NV_INFO(dev, "PFIFO_DMA_PUSHER - Ch %d Get 0x%02x%08x "
-					     "Put 0x%02x%08x IbGet 0x%08x IbPut 0x%08x "
-					     "State 0x%08x (err: %s) Push 0x%08x\n",
-						chid, ho_get, dma_get, ho_put,
-						dma_put, ib_get, ib_put, state,
-						nv_dma_state_err(state),
-						push);
-
-				/* METHOD_COUNT, in DMA_STATE on earlier chipsets */
-				nv_wr32(dev, 0x003364, 0x00000000);
-				if (dma_get != dma_put || ho_get != ho_put) {
-					nv_wr32(dev, 0x003244, dma_put);
-					nv_wr32(dev, 0x003328, ho_put);
-				} else
-				if (ib_get != ib_put) {
-					nv_wr32(dev, 0x003334, ib_put);
-				}
-			} else {
-				NV_INFO(dev, "PFIFO_DMA_PUSHER - Ch %d Get 0x%08x "
-					     "Put 0x%08x State 0x%08x (err: %s) Push 0x%08x\n",
-					chid, dma_get, dma_put, state,
-					nv_dma_state_err(state), push);
-
-				if (dma_get != dma_put)
-					nv_wr32(dev, 0x003244, dma_put);
-			}
-
-			nv_wr32(dev, 0x003228, 0x00000000);
-			nv_wr32(dev, 0x003220, 0x00000001);
-			nv_wr32(dev, 0x002100, NV_PFIFO_INTR_DMA_PUSHER);
-			status &= ~NV_PFIFO_INTR_DMA_PUSHER;
-		}
-
-		if (status & NV_PFIFO_INTR_SEMAPHORE) {
-			uint32_t sem;
-
-			status &= ~NV_PFIFO_INTR_SEMAPHORE;
-			nv_wr32(dev, NV03_PFIFO_INTR_0,
-				NV_PFIFO_INTR_SEMAPHORE);
-
-			sem = nv_rd32(dev, NV10_PFIFO_CACHE1_SEMAPHORE);
-			nv_wr32(dev, NV10_PFIFO_CACHE1_SEMAPHORE, sem | 0x1);
-
-			nv_wr32(dev, NV03_PFIFO_CACHE1_GET, get + 4);
-			nv_wr32(dev, NV04_PFIFO_CACHE1_PULL0, 1);
-		}
-
-		if (dev_priv->card_type == NV_50) {
-			if (status & 0x00000010) {
-				nv50_fb_vm_trap(dev, nouveau_ratelimit());
-				status &= ~0x00000010;
-				nv_wr32(dev, 0x002100, 0x00000010);
-			}
-		}
-
-		if (status) {
-			if (nouveau_ratelimit())
-				NV_INFO(dev, "PFIFO_INTR 0x%08x - Ch %d\n",
-					status, chid);
-			nv_wr32(dev, NV03_PFIFO_INTR_0, status);
-			status = 0;
-		}
-
-		nv_wr32(dev, NV03_PFIFO_CACHES, reassign);
-	}
-
-	if (status) {
-		NV_INFO(dev, "PFIFO still angry after %d spins, halt\n", cnt);
-		nv_wr32(dev, 0x2140, 0);
-		nv_wr32(dev, 0x140, 0);
-	}
-
-	nv_wr32(dev, NV03_PMC_INTR_0, NV_PMC_INTR_0_PFIFO_PENDING);
-}
-
-void
-nv04_fifo_destroy(struct drm_device *dev, int engine)
-{
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct nv04_fifo_priv *priv = nv_engine(dev, engine);
-
-	nouveau_irq_unregister(dev, 8);
-
-	dev_priv->eng[engine] = NULL;
-	kfree(priv);
-}
-
-int
-nv04_fifo_create(struct drm_device *dev)
-{
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct nv04_fifo_priv *priv;
-
-	priv = kzalloc(sizeof(*priv), GFP_KERNEL);
-	if (!priv)
-		return -ENOMEM;
-
-	priv->base.base.destroy = nv04_fifo_destroy;
-	priv->base.base.init = nv04_fifo_init;
-	priv->base.base.fini = nv04_fifo_fini;
-	priv->base.base.context_new = nv04_fifo_context_new;
-	priv->base.base.context_del = nv04_fifo_context_del;
-	priv->base.channels = 15;
-	priv->ramfc_desc = nv04_ramfc;
-	dev_priv->eng[NVOBJ_ENGINE_FIFO] = &priv->base.base;
-
-	nouveau_irq_register(dev, 8, nv04_fifo_isr);
-	return 0;
-}
diff --git a/drivers/gpu/drm/nouveau/nv04_graph.c b/drivers/gpu/drm/nouveau/nv04_graph.c
deleted file mode 100644
index 68cce6023461..000000000000
--- a/drivers/gpu/drm/nouveau/nv04_graph.c
+++ /dev/null
@@ -1,1325 +0,0 @@
-/*
- * Copyright 2007 Stephane Marchesin
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-#include <drm/drmP.h>
-#include <drm/nouveau_drm.h>
-#include "nouveau_drv.h"
-#include "nouveau_hw.h"
-#include "nouveau_util.h"
-#include "nouveau_ramht.h"
-
-struct nv04_graph_engine {
-	struct nouveau_exec_engine base;
-};
-
-static uint32_t nv04_graph_ctx_regs[] = {
-	0x0040053c,
-	0x00400544,
-	0x00400540,
-	0x00400548,
-	NV04_PGRAPH_CTX_SWITCH1,
-	NV04_PGRAPH_CTX_SWITCH2,
-	NV04_PGRAPH_CTX_SWITCH3,
-	NV04_PGRAPH_CTX_SWITCH4,
-	NV04_PGRAPH_CTX_CACHE1,
-	NV04_PGRAPH_CTX_CACHE2,
-	NV04_PGRAPH_CTX_CACHE3,
-	NV04_PGRAPH_CTX_CACHE4,
-	0x00400184,
-	0x004001a4,
-	0x004001c4,
-	0x004001e4,
-	0x00400188,
-	0x004001a8,
-	0x004001c8,
-	0x004001e8,
-	0x0040018c,
-	0x004001ac,
-	0x004001cc,
-	0x004001ec,
-	0x00400190,
-	0x004001b0,
-	0x004001d0,
-	0x004001f0,
-	0x00400194,
-	0x004001b4,
-	0x004001d4,
-	0x004001f4,
-	0x00400198,
-	0x004001b8,
-	0x004001d8,
-	0x004001f8,
-	0x0040019c,
-	0x004001bc,
-	0x004001dc,
-	0x004001fc,
-	0x00400174,
-	NV04_PGRAPH_DMA_START_0,
-	NV04_PGRAPH_DMA_START_1,
-	NV04_PGRAPH_DMA_LENGTH,
-	NV04_PGRAPH_DMA_MISC,
-	NV04_PGRAPH_DMA_PITCH,
-	NV04_PGRAPH_BOFFSET0,
-	NV04_PGRAPH_BBASE0,
-	NV04_PGRAPH_BLIMIT0,
-	NV04_PGRAPH_BOFFSET1,
-	NV04_PGRAPH_BBASE1,
-	NV04_PGRAPH_BLIMIT1,
-	NV04_PGRAPH_BOFFSET2,
-	NV04_PGRAPH_BBASE2,
-	NV04_PGRAPH_BLIMIT2,
-	NV04_PGRAPH_BOFFSET3,
-	NV04_PGRAPH_BBASE3,
-	NV04_PGRAPH_BLIMIT3,
-	NV04_PGRAPH_BOFFSET4,
-	NV04_PGRAPH_BBASE4,
-	NV04_PGRAPH_BLIMIT4,
-	NV04_PGRAPH_BOFFSET5,
-	NV04_PGRAPH_BBASE5,
-	NV04_PGRAPH_BLIMIT5,
-	NV04_PGRAPH_BPITCH0,
-	NV04_PGRAPH_BPITCH1,
-	NV04_PGRAPH_BPITCH2,
-	NV04_PGRAPH_BPITCH3,
-	NV04_PGRAPH_BPITCH4,
-	NV04_PGRAPH_SURFACE,
-	NV04_PGRAPH_STATE,
-	NV04_PGRAPH_BSWIZZLE2,
-	NV04_PGRAPH_BSWIZZLE5,
-	NV04_PGRAPH_BPIXEL,
-	NV04_PGRAPH_NOTIFY,
-	NV04_PGRAPH_PATT_COLOR0,
-	NV04_PGRAPH_PATT_COLOR1,
-	NV04_PGRAPH_PATT_COLORRAM+0x00,
-	NV04_PGRAPH_PATT_COLORRAM+0x04,
-	NV04_PGRAPH_PATT_COLORRAM+0x08,
-	NV04_PGRAPH_PATT_COLORRAM+0x0c,
-	NV04_PGRAPH_PATT_COLORRAM+0x10,
-	NV04_PGRAPH_PATT_COLORRAM+0x14,
-	NV04_PGRAPH_PATT_COLORRAM+0x18,
-	NV04_PGRAPH_PATT_COLORRAM+0x1c,
-	NV04_PGRAPH_PATT_COLORRAM+0x20,
-	NV04_PGRAPH_PATT_COLORRAM+0x24,
-	NV04_PGRAPH_PATT_COLORRAM+0x28,
-	NV04_PGRAPH_PATT_COLORRAM+0x2c,
-	NV04_PGRAPH_PATT_COLORRAM+0x30,
-	NV04_PGRAPH_PATT_COLORRAM+0x34,
-	NV04_PGRAPH_PATT_COLORRAM+0x38,
-	NV04_PGRAPH_PATT_COLORRAM+0x3c,
-	NV04_PGRAPH_PATT_COLORRAM+0x40,
-	NV04_PGRAPH_PATT_COLORRAM+0x44,
-	NV04_PGRAPH_PATT_COLORRAM+0x48,
-	NV04_PGRAPH_PATT_COLORRAM+0x4c,
-	NV04_PGRAPH_PATT_COLORRAM+0x50,
-	NV04_PGRAPH_PATT_COLORRAM+0x54,
-	NV04_PGRAPH_PATT_COLORRAM+0x58,
-	NV04_PGRAPH_PATT_COLORRAM+0x5c,
-	NV04_PGRAPH_PATT_COLORRAM+0x60,
-	NV04_PGRAPH_PATT_COLORRAM+0x64,
-	NV04_PGRAPH_PATT_COLORRAM+0x68,
-	NV04_PGRAPH_PATT_COLORRAM+0x6c,
-	NV04_PGRAPH_PATT_COLORRAM+0x70,
-	NV04_PGRAPH_PATT_COLORRAM+0x74,
-	NV04_PGRAPH_PATT_COLORRAM+0x78,
-	NV04_PGRAPH_PATT_COLORRAM+0x7c,
-	NV04_PGRAPH_PATT_COLORRAM+0x80,
-	NV04_PGRAPH_PATT_COLORRAM+0x84,
-	NV04_PGRAPH_PATT_COLORRAM+0x88,
-	NV04_PGRAPH_PATT_COLORRAM+0x8c,
-	NV04_PGRAPH_PATT_COLORRAM+0x90,
-	NV04_PGRAPH_PATT_COLORRAM+0x94,
-	NV04_PGRAPH_PATT_COLORRAM+0x98,
-	NV04_PGRAPH_PATT_COLORRAM+0x9c,
-	NV04_PGRAPH_PATT_COLORRAM+0xa0,
-	NV04_PGRAPH_PATT_COLORRAM+0xa4,
-	NV04_PGRAPH_PATT_COLORRAM+0xa8,
-	NV04_PGRAPH_PATT_COLORRAM+0xac,
-	NV04_PGRAPH_PATT_COLORRAM+0xb0,
-	NV04_PGRAPH_PATT_COLORRAM+0xb4,
-	NV04_PGRAPH_PATT_COLORRAM+0xb8,
-	NV04_PGRAPH_PATT_COLORRAM+0xbc,
-	NV04_PGRAPH_PATT_COLORRAM+0xc0,
-	NV04_PGRAPH_PATT_COLORRAM+0xc4,
-	NV04_PGRAPH_PATT_COLORRAM+0xc8,
-	NV04_PGRAPH_PATT_COLORRAM+0xcc,
-	NV04_PGRAPH_PATT_COLORRAM+0xd0,
-	NV04_PGRAPH_PATT_COLORRAM+0xd4,
-	NV04_PGRAPH_PATT_COLORRAM+0xd8,
-	NV04_PGRAPH_PATT_COLORRAM+0xdc,
-	NV04_PGRAPH_PATT_COLORRAM+0xe0,
-	NV04_PGRAPH_PATT_COLORRAM+0xe4,
-	NV04_PGRAPH_PATT_COLORRAM+0xe8,
-	NV04_PGRAPH_PATT_COLORRAM+0xec,
-	NV04_PGRAPH_PATT_COLORRAM+0xf0,
-	NV04_PGRAPH_PATT_COLORRAM+0xf4,
-	NV04_PGRAPH_PATT_COLORRAM+0xf8,
-	NV04_PGRAPH_PATT_COLORRAM+0xfc,
-	NV04_PGRAPH_PATTERN,
-	0x0040080c,
-	NV04_PGRAPH_PATTERN_SHAPE,
-	0x00400600,
-	NV04_PGRAPH_ROP3,
-	NV04_PGRAPH_CHROMA,
-	NV04_PGRAPH_BETA_AND,
-	NV04_PGRAPH_BETA_PREMULT,
-	NV04_PGRAPH_CONTROL0,
-	NV04_PGRAPH_CONTROL1,
-	NV04_PGRAPH_CONTROL2,
-	NV04_PGRAPH_BLEND,
-	NV04_PGRAPH_STORED_FMT,
-	NV04_PGRAPH_SOURCE_COLOR,
-	0x00400560,
-	0x00400568,
-	0x00400564,
-	0x0040056c,
-	0x00400400,
-	0x00400480,
-	0x00400404,
-	0x00400484,
-	0x00400408,
-	0x00400488,
-	0x0040040c,
-	0x0040048c,
-	0x00400410,
-	0x00400490,
-	0x00400414,
-	0x00400494,
-	0x00400418,
-	0x00400498,
-	0x0040041c,
-	0x0040049c,
-	0x00400420,
-	0x004004a0,
-	0x00400424,
-	0x004004a4,
-	0x00400428,
-	0x004004a8,
-	0x0040042c,
-	0x004004ac,
-	0x00400430,
-	0x004004b0,
-	0x00400434,
-	0x004004b4,
-	0x00400438,
-	0x004004b8,
-	0x0040043c,
-	0x004004bc,
-	0x00400440,
-	0x004004c0,
-	0x00400444,
-	0x004004c4,
-	0x00400448,
-	0x004004c8,
-	0x0040044c,
-	0x004004cc,
-	0x00400450,
-	0x004004d0,
-	0x00400454,
-	0x004004d4,
-	0x00400458,
-	0x004004d8,
-	0x0040045c,
-	0x004004dc,
-	0x00400460,
-	0x004004e0,
-	0x00400464,
-	0x004004e4,
-	0x00400468,
-	0x004004e8,
-	0x0040046c,
-	0x004004ec,
-	0x00400470,
-	0x004004f0,
-	0x00400474,
-	0x004004f4,
-	0x00400478,
-	0x004004f8,
-	0x0040047c,
-	0x004004fc,
-	0x00400534,
-	0x00400538,
-	0x00400514,
-	0x00400518,
-	0x0040051c,
-	0x00400520,
-	0x00400524,
-	0x00400528,
-	0x0040052c,
-	0x00400530,
-	0x00400d00,
-	0x00400d40,
-	0x00400d80,
-	0x00400d04,
-	0x00400d44,
-	0x00400d84,
-	0x00400d08,
-	0x00400d48,
-	0x00400d88,
-	0x00400d0c,
-	0x00400d4c,
-	0x00400d8c,
-	0x00400d10,
-	0x00400d50,
-	0x00400d90,
-	0x00400d14,
-	0x00400d54,
-	0x00400d94,
-	0x00400d18,
-	0x00400d58,
-	0x00400d98,
-	0x00400d1c,
-	0x00400d5c,
-	0x00400d9c,
-	0x00400d20,
-	0x00400d60,
-	0x00400da0,
-	0x00400d24,
-	0x00400d64,
-	0x00400da4,
-	0x00400d28,
-	0x00400d68,
-	0x00400da8,
-	0x00400d2c,
-	0x00400d6c,
-	0x00400dac,
-	0x00400d30,
-	0x00400d70,
-	0x00400db0,
-	0x00400d34,
-	0x00400d74,
-	0x00400db4,
-	0x00400d38,
-	0x00400d78,
-	0x00400db8,
-	0x00400d3c,
-	0x00400d7c,
-	0x00400dbc,
-	0x00400590,
-	0x00400594,
-	0x00400598,
-	0x0040059c,
-	0x004005a8,
-	0x004005ac,
-	0x004005b0,
-	0x004005b4,
-	0x004005c0,
-	0x004005c4,
-	0x004005c8,
-	0x004005cc,
-	0x004005d0,
-	0x004005d4,
-	0x004005d8,
-	0x004005dc,
-	0x004005e0,
-	NV04_PGRAPH_PASSTHRU_0,
-	NV04_PGRAPH_PASSTHRU_1,
-	NV04_PGRAPH_PASSTHRU_2,
-	NV04_PGRAPH_DVD_COLORFMT,
-	NV04_PGRAPH_SCALED_FORMAT,
-	NV04_PGRAPH_MISC24_0,
-	NV04_PGRAPH_MISC24_1,
-	NV04_PGRAPH_MISC24_2,
-	0x00400500,
-	0x00400504,
-	NV04_PGRAPH_VALID1,
-	NV04_PGRAPH_VALID2,
-	NV04_PGRAPH_DEBUG_3
-};
-
-struct graph_state {
-	uint32_t nv04[ARRAY_SIZE(nv04_graph_ctx_regs)];
-};
-
-static struct nouveau_channel *
-nv04_graph_channel(struct drm_device *dev)
-{
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	int chid = 15;
-
-	if (nv_rd32(dev, NV04_PGRAPH_CTX_CONTROL) & 0x00010000)
-		chid = nv_rd32(dev, NV04_PGRAPH_CTX_USER) >> 24;
-
-	if (chid > 15)
-		return NULL;
-
-	return dev_priv->channels.ptr[chid];
-}
-
-static uint32_t *ctx_reg(struct graph_state *ctx, uint32_t reg)
-{
-	int i;
-
-	for (i = 0; i < ARRAY_SIZE(nv04_graph_ctx_regs); i++) {
-		if (nv04_graph_ctx_regs[i] == reg)
-			return &ctx->nv04[i];
-	}
-
-	return NULL;
-}
-
-static int
-nv04_graph_load_context(struct nouveau_channel *chan)
-{
-	struct graph_state *pgraph_ctx = chan->engctx[NVOBJ_ENGINE_GR];
-	struct drm_device *dev = chan->dev;
-	uint32_t tmp;
-	int i;
-
-	for (i = 0; i < ARRAY_SIZE(nv04_graph_ctx_regs); i++)
-		nv_wr32(dev, nv04_graph_ctx_regs[i], pgraph_ctx->nv04[i]);
-
-	nv_wr32(dev, NV04_PGRAPH_CTX_CONTROL, 0x10010100);
-
-	tmp  = nv_rd32(dev, NV04_PGRAPH_CTX_USER) & 0x00ffffff;
-	nv_wr32(dev, NV04_PGRAPH_CTX_USER, tmp | chan->id << 24);
-
-	tmp = nv_rd32(dev, NV04_PGRAPH_FFINTFC_ST2);
-	nv_wr32(dev, NV04_PGRAPH_FFINTFC_ST2, tmp & 0x000fffff);
-
-	return 0;
-}
-
-static int
-nv04_graph_unload_context(struct drm_device *dev)
-{
-	struct nouveau_channel *chan = NULL;
-	struct graph_state *ctx;
-	uint32_t tmp;
-	int i;
-
-	chan = nv04_graph_channel(dev);
-	if (!chan)
-		return 0;
-	ctx = chan->engctx[NVOBJ_ENGINE_GR];
-
-	for (i = 0; i < ARRAY_SIZE(nv04_graph_ctx_regs); i++)
-		ctx->nv04[i] = nv_rd32(dev, nv04_graph_ctx_regs[i]);
-
-	nv_wr32(dev, NV04_PGRAPH_CTX_CONTROL, 0x10000000);
-	tmp  = nv_rd32(dev, NV04_PGRAPH_CTX_USER) & 0x00ffffff;
-	tmp |= 15 << 24;
-	nv_wr32(dev, NV04_PGRAPH_CTX_USER, tmp);
-	return 0;
-}
-
-static int
-nv04_graph_context_new(struct nouveau_channel *chan, int engine)
-{
-	struct graph_state *pgraph_ctx;
-	NV_DEBUG(chan->dev, "nv04_graph_context_create %d\n", chan->id);
-
-	pgraph_ctx = kzalloc(sizeof(*pgraph_ctx), GFP_KERNEL);
-	if (pgraph_ctx == NULL)
-		return -ENOMEM;
-
-	*ctx_reg(pgraph_ctx, NV04_PGRAPH_DEBUG_3) = 0xfad4ff31;
-
-	chan->engctx[engine] = pgraph_ctx;
-	return 0;
-}
-
-static void
-nv04_graph_context_del(struct nouveau_channel *chan, int engine)
-{
-	struct drm_device *dev = chan->dev;
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct graph_state *pgraph_ctx = chan->engctx[engine];
-	unsigned long flags;
-
-	spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
-	nv_mask(dev, NV04_PGRAPH_FIFO, 0x00000001, 0x00000000);
-
-	/* Unload the context if it's the currently active one */
-	if (nv04_graph_channel(dev) == chan)
-		nv04_graph_unload_context(dev);
-
-	nv_mask(dev, NV04_PGRAPH_FIFO, 0x00000001, 0x00000001);
-	spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
-
-	/* Free the context resources */
-	kfree(pgraph_ctx);
-	chan->engctx[engine] = NULL;
-}
-
-int
-nv04_graph_object_new(struct nouveau_channel *chan, int engine,
-		      u32 handle, u16 class)
-{
-	struct drm_device *dev = chan->dev;
-	struct nouveau_gpuobj *obj = NULL;
-	int ret;
-
-	ret = nouveau_gpuobj_new(dev, chan, 16, 16, NVOBJ_FLAG_ZERO_FREE, &obj);
-	if (ret)
-		return ret;
-	obj->engine = 1;
-	obj->class  = class;
-
-#ifdef __BIG_ENDIAN
-	nv_wo32(obj, 0x00, 0x00080000 | class);
-#else
-	nv_wo32(obj, 0x00, class);
-#endif
-	nv_wo32(obj, 0x04, 0x00000000);
-	nv_wo32(obj, 0x08, 0x00000000);
-	nv_wo32(obj, 0x0c, 0x00000000);
-
-	ret = nouveau_ramht_insert(chan, handle, obj);
-	nouveau_gpuobj_ref(NULL, &obj);
-	return ret;
-}
-
-static int
-nv04_graph_init(struct drm_device *dev, int engine)
-{
-	uint32_t tmp;
-
-	nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) &
-			~NV_PMC_ENABLE_PGRAPH);
-	nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) |
-			 NV_PMC_ENABLE_PGRAPH);
-
-	/* Enable PGRAPH interrupts */
-	nv_wr32(dev, NV03_PGRAPH_INTR, 0xFFFFFFFF);
-	nv_wr32(dev, NV03_PGRAPH_INTR_EN, 0xFFFFFFFF);
-
-	nv_wr32(dev, NV04_PGRAPH_VALID1, 0);
-	nv_wr32(dev, NV04_PGRAPH_VALID2, 0);
-	/*nv_wr32(dev, NV04_PGRAPH_DEBUG_0, 0x000001FF);
-	nv_wr32(dev, NV04_PGRAPH_DEBUG_0, 0x001FFFFF);*/
-	nv_wr32(dev, NV04_PGRAPH_DEBUG_0, 0x1231c000);
-	/*1231C000 blob, 001 haiku*/
-	/*V_WRITE(NV04_PGRAPH_DEBUG_1, 0xf2d91100);*/
-	nv_wr32(dev, NV04_PGRAPH_DEBUG_1, 0x72111100);
-	/*0x72111100 blob , 01 haiku*/
-	/*nv_wr32(dev, NV04_PGRAPH_DEBUG_2, 0x11d5f870);*/
-	nv_wr32(dev, NV04_PGRAPH_DEBUG_2, 0x11d5f071);
-	/*haiku same*/
-
-	/*nv_wr32(dev, NV04_PGRAPH_DEBUG_3, 0xfad4ff31);*/
-	nv_wr32(dev, NV04_PGRAPH_DEBUG_3, 0xf0d4ff31);
-	/*haiku and blob 10d4*/
-
-	nv_wr32(dev, NV04_PGRAPH_STATE        , 0xFFFFFFFF);
-	nv_wr32(dev, NV04_PGRAPH_CTX_CONTROL  , 0x10000100);
-	tmp  = nv_rd32(dev, NV04_PGRAPH_CTX_USER) & 0x00ffffff;
-	tmp |= 15 << 24;
-	nv_wr32(dev, NV04_PGRAPH_CTX_USER, tmp);
-
-	/* These don't belong here, they're part of a per-channel context */
-	nv_wr32(dev, NV04_PGRAPH_PATTERN_SHAPE, 0x00000000);
-	nv_wr32(dev, NV04_PGRAPH_BETA_AND     , 0xFFFFFFFF);
-
-	return 0;
-}
-
-static int
-nv04_graph_fini(struct drm_device *dev, int engine, bool suspend)
-{
-	nv_mask(dev, NV04_PGRAPH_FIFO, 0x00000001, 0x00000000);
-	if (!nv_wait(dev, NV04_PGRAPH_STATUS, ~0, 0) && suspend) {
-		nv_mask(dev, NV04_PGRAPH_FIFO, 0x00000001, 0x00000001);
-		return -EBUSY;
-	}
-	nv04_graph_unload_context(dev);
-	nv_wr32(dev, NV03_PGRAPH_INTR_EN, 0x00000000);
-	return 0;
-}
-
-/*
- * Software methods, why they are needed, and how they all work:
- *
- * NV04 and NV05 keep most of the state in PGRAPH context itself, but some
- * 2d engine settings are kept inside the grobjs themselves. The grobjs are
- * 3 words long on both. grobj format on NV04 is:
- *
- * word 0:
- *  - bits 0-7: class
- *  - bit 12: color key active
- *  - bit 13: clip rect active
- *  - bit 14: if set, destination surface is swizzled and taken from buffer 5
- *            [set by NV04_SWIZZLED_SURFACE], otherwise it's linear and taken
- *            from buffer 0 [set by NV04_CONTEXT_SURFACES_2D or
- *            NV03_CONTEXT_SURFACE_DST].
- *  - bits 15-17: 2d operation [aka patch config]
- *  - bit 24: patch valid [enables rendering using this object]
- *  - bit 25: surf3d valid [for tex_tri and multitex_tri only]
- * word 1:
- *  - bits 0-1: mono format
- *  - bits 8-13: color format
- *  - bits 16-31: DMA_NOTIFY instance
- * word 2:
- *  - bits 0-15: DMA_A instance
- *  - bits 16-31: DMA_B instance
- *
- * On NV05 it's:
- *
- * word 0:
- *  - bits 0-7: class
- *  - bit 12: color key active
- *  - bit 13: clip rect active
- *  - bit 14: if set, destination surface is swizzled and taken from buffer 5
- *            [set by NV04_SWIZZLED_SURFACE], otherwise it's linear and taken
- *            from buffer 0 [set by NV04_CONTEXT_SURFACES_2D or
- *            NV03_CONTEXT_SURFACE_DST].
- *  - bits 15-17: 2d operation [aka patch config]
- *  - bits 20-22: dither mode
- *  - bit 24: patch valid [enables rendering using this object]
- *  - bit 25: surface_dst/surface_color/surf2d/surf3d valid
- *  - bit 26: surface_src/surface_zeta valid
- *  - bit 27: pattern valid
- *  - bit 28: rop valid
- *  - bit 29: beta1 valid
- *  - bit 30: beta4 valid
- * word 1:
- *  - bits 0-1: mono format
- *  - bits 8-13: color format
- *  - bits 16-31: DMA_NOTIFY instance
- * word 2:
- *  - bits 0-15: DMA_A instance
- *  - bits 16-31: DMA_B instance
- *
- * NV05 will set/unset the relevant valid bits when you poke the relevant
- * object-binding methods with object of the proper type, or with the NULL
- * type. It'll only allow rendering using the grobj if all needed objects
- * are bound. The needed set of objects depends on selected operation: for
- * example rop object is needed by ROP_AND, but not by SRCCOPY_AND.
- *
- * NV04 doesn't have these methods implemented at all, and doesn't have the
- * relevant bits in grobj. Instead, it'll allow rendering whenever bit 24
- * is set. So we have to emulate them in software, internally keeping the
- * same bits as NV05 does. Since grobjs are aligned to 16 bytes on nv04,
- * but the last word isn't actually used for anything, we abuse it for this
- * purpose.
- *
- * Actually, NV05 can optionally check bit 24 too, but we disable this since
- * there's no use for it.
- *
- * For unknown reasons, NV04 implements surf3d binding in hardware as an
- * exception. Also for unknown reasons, NV04 doesn't implement the clipping
- * methods on the surf3d object, so we have to emulate them too.
- */
-
-static void
-nv04_graph_set_ctx1(struct nouveau_channel *chan, u32 mask, u32 value)
-{
-	struct drm_device *dev = chan->dev;
-	u32 instance = (nv_rd32(dev, NV04_PGRAPH_CTX_SWITCH4) & 0xffff) << 4;
-	int subc = (nv_rd32(dev, NV04_PGRAPH_TRAPPED_ADDR) >> 13) & 0x7;
-	u32 tmp;
-
-	tmp  = nv_ri32(dev, instance);
-	tmp &= ~mask;
-	tmp |= value;
-
-	nv_wi32(dev, instance, tmp);
-	nv_wr32(dev, NV04_PGRAPH_CTX_SWITCH1, tmp);
-	nv_wr32(dev, NV04_PGRAPH_CTX_CACHE1 + (subc<<2), tmp);
-}
-
-static void
-nv04_graph_set_ctx_val(struct nouveau_channel *chan, u32 mask, u32 value)
-{
-	struct drm_device *dev = chan->dev;
-	u32 instance = (nv_rd32(dev, NV04_PGRAPH_CTX_SWITCH4) & 0xffff) << 4;
-	u32 tmp, ctx1;
-	int class, op, valid = 1;
-
-	ctx1 = nv_ri32(dev, instance);
-	class = ctx1 & 0xff;
-	op = (ctx1 >> 15) & 7;
-	tmp  = nv_ri32(dev, instance + 0xc);
-	tmp &= ~mask;
-	tmp |= value;
-	nv_wi32(dev, instance + 0xc, tmp);
-
-	/* check for valid surf2d/surf_dst/surf_color */
-	if (!(tmp & 0x02000000))
-		valid = 0;
-	/* check for valid surf_src/surf_zeta */
-	if ((class == 0x1f || class == 0x48) && !(tmp & 0x04000000))
-		valid = 0;
-
-	switch (op) {
-	/* SRCCOPY_AND, SRCCOPY: no extra objects required */
-	case 0:
-	case 3:
-		break;
-	/* ROP_AND: requires pattern and rop */
-	case 1:
-		if (!(tmp & 0x18000000))
-			valid = 0;
-		break;
-	/* BLEND_AND: requires beta1 */
-	case 2:
-		if (!(tmp & 0x20000000))
-			valid = 0;
-		break;
-	/* SRCCOPY_PREMULT, BLEND_PREMULT: beta4 required */
-	case 4:
-	case 5:
-		if (!(tmp & 0x40000000))
-			valid = 0;
-		break;
-	}
-
-	nv04_graph_set_ctx1(chan, 0x01000000, valid << 24);
-}
-
-static int
-nv04_graph_mthd_set_operation(struct nouveau_channel *chan,
-			      u32 class, u32 mthd, u32 data)
-{
-	if (data > 5)
-		return 1;
-	/* Old versions of the objects only accept first three operations. */
-	if (data > 2 && class < 0x40)
-		return 1;
-	nv04_graph_set_ctx1(chan, 0x00038000, data << 15);
-	/* changing operation changes set of objects needed for validation */
-	nv04_graph_set_ctx_val(chan, 0, 0);
-	return 0;
-}
-
-static int
-nv04_graph_mthd_surf3d_clip_h(struct nouveau_channel *chan,
-			      u32 class, u32 mthd, u32 data)
-{
-	uint32_t min = data & 0xffff, max;
-	uint32_t w = data >> 16;
-	if (min & 0x8000)
-		/* too large */
-		return 1;
-	if (w & 0x8000)
-		/* yes, it accepts negative for some reason. */
-		w |= 0xffff0000;
-	max = min + w;
-	max &= 0x3ffff;
-	nv_wr32(chan->dev, 0x40053c, min);
-	nv_wr32(chan->dev, 0x400544, max);
-	return 0;
-}
-
-static int
-nv04_graph_mthd_surf3d_clip_v(struct nouveau_channel *chan,
-			      u32 class, u32 mthd, u32 data)
-{
-	uint32_t min = data & 0xffff, max;
-	uint32_t w = data >> 16;
-	if (min & 0x8000)
-		/* too large */
-		return 1;
-	if (w & 0x8000)
-		/* yes, it accepts negative for some reason. */
-		w |= 0xffff0000;
-	max = min + w;
-	max &= 0x3ffff;
-	nv_wr32(chan->dev, 0x400540, min);
-	nv_wr32(chan->dev, 0x400548, max);
-	return 0;
-}
-
-static int
-nv04_graph_mthd_bind_surf2d(struct nouveau_channel *chan,
-			    u32 class, u32 mthd, u32 data)
-{
-	switch (nv_ri32(chan->dev, data << 4) & 0xff) {
-	case 0x30:
-		nv04_graph_set_ctx1(chan, 0x00004000, 0);
-		nv04_graph_set_ctx_val(chan, 0x02000000, 0);
-		return 0;
-	case 0x42:
-		nv04_graph_set_ctx1(chan, 0x00004000, 0);
-		nv04_graph_set_ctx_val(chan, 0x02000000, 0x02000000);
-		return 0;
-	}
-	return 1;
-}
-
-static int
-nv04_graph_mthd_bind_surf2d_swzsurf(struct nouveau_channel *chan,
-				    u32 class, u32 mthd, u32 data)
-{
-	switch (nv_ri32(chan->dev, data << 4) & 0xff) {
-	case 0x30:
-		nv04_graph_set_ctx1(chan, 0x00004000, 0);
-		nv04_graph_set_ctx_val(chan, 0x02000000, 0);
-		return 0;
-	case 0x42:
-		nv04_graph_set_ctx1(chan, 0x00004000, 0);
-		nv04_graph_set_ctx_val(chan, 0x02000000, 0x02000000);
-		return 0;
-	case 0x52:
-		nv04_graph_set_ctx1(chan, 0x00004000, 0x00004000);
-		nv04_graph_set_ctx_val(chan, 0x02000000, 0x02000000);
-		return 0;
-	}
-	return 1;
-}
-
-static int
-nv04_graph_mthd_bind_nv01_patt(struct nouveau_channel *chan,
-			       u32 class, u32 mthd, u32 data)
-{
-	switch (nv_ri32(chan->dev, data << 4) & 0xff) {
-	case 0x30:
-		nv04_graph_set_ctx_val(chan, 0x08000000, 0);
-		return 0;
-	case 0x18:
-		nv04_graph_set_ctx_val(chan, 0x08000000, 0x08000000);
-		return 0;
-	}
-	return 1;
-}
-
-static int
-nv04_graph_mthd_bind_nv04_patt(struct nouveau_channel *chan,
-			       u32 class, u32 mthd, u32 data)
-{
-	switch (nv_ri32(chan->dev, data << 4) & 0xff) {
-	case 0x30:
-		nv04_graph_set_ctx_val(chan, 0x08000000, 0);
-		return 0;
-	case 0x44:
-		nv04_graph_set_ctx_val(chan, 0x08000000, 0x08000000);
-		return 0;
-	}
-	return 1;
-}
-
-static int
-nv04_graph_mthd_bind_rop(struct nouveau_channel *chan,
-			 u32 class, u32 mthd, u32 data)
-{
-	switch (nv_ri32(chan->dev, data << 4) & 0xff) {
-	case 0x30:
-		nv04_graph_set_ctx_val(chan, 0x10000000, 0);
-		return 0;
-	case 0x43:
-		nv04_graph_set_ctx_val(chan, 0x10000000, 0x10000000);
-		return 0;
-	}
-	return 1;
-}
-
-static int
-nv04_graph_mthd_bind_beta1(struct nouveau_channel *chan,
-			   u32 class, u32 mthd, u32 data)
-{
-	switch (nv_ri32(chan->dev, data << 4) & 0xff) {
-	case 0x30:
-		nv04_graph_set_ctx_val(chan, 0x20000000, 0);
-		return 0;
-	case 0x12:
-		nv04_graph_set_ctx_val(chan, 0x20000000, 0x20000000);
-		return 0;
-	}
-	return 1;
-}
-
-static int
-nv04_graph_mthd_bind_beta4(struct nouveau_channel *chan,
-			   u32 class, u32 mthd, u32 data)
-{
-	switch (nv_ri32(chan->dev, data << 4) & 0xff) {
-	case 0x30:
-		nv04_graph_set_ctx_val(chan, 0x40000000, 0);
-		return 0;
-	case 0x72:
-		nv04_graph_set_ctx_val(chan, 0x40000000, 0x40000000);
-		return 0;
-	}
-	return 1;
-}
-
-static int
-nv04_graph_mthd_bind_surf_dst(struct nouveau_channel *chan,
-			      u32 class, u32 mthd, u32 data)
-{
-	switch (nv_ri32(chan->dev, data << 4) & 0xff) {
-	case 0x30:
-		nv04_graph_set_ctx_val(chan, 0x02000000, 0);
-		return 0;
-	case 0x58:
-		nv04_graph_set_ctx_val(chan, 0x02000000, 0x02000000);
-		return 0;
-	}
-	return 1;
-}
-
-static int
-nv04_graph_mthd_bind_surf_src(struct nouveau_channel *chan,
-			      u32 class, u32 mthd, u32 data)
-{
-	switch (nv_ri32(chan->dev, data << 4) & 0xff) {
-	case 0x30:
-		nv04_graph_set_ctx_val(chan, 0x04000000, 0);
-		return 0;
-	case 0x59:
-		nv04_graph_set_ctx_val(chan, 0x04000000, 0x04000000);
-		return 0;
-	}
-	return 1;
-}
-
-static int
-nv04_graph_mthd_bind_surf_color(struct nouveau_channel *chan,
-				u32 class, u32 mthd, u32 data)
-{
-	switch (nv_ri32(chan->dev, data << 4) & 0xff) {
-	case 0x30:
-		nv04_graph_set_ctx_val(chan, 0x02000000, 0);
-		return 0;
-	case 0x5a:
-		nv04_graph_set_ctx_val(chan, 0x02000000, 0x02000000);
-		return 0;
-	}
-	return 1;
-}
-
-static int
-nv04_graph_mthd_bind_surf_zeta(struct nouveau_channel *chan,
-			       u32 class, u32 mthd, u32 data)
-{
-	switch (nv_ri32(chan->dev, data << 4) & 0xff) {
-	case 0x30:
-		nv04_graph_set_ctx_val(chan, 0x04000000, 0);
-		return 0;
-	case 0x5b:
-		nv04_graph_set_ctx_val(chan, 0x04000000, 0x04000000);
-		return 0;
-	}
-	return 1;
-}
-
-static int
-nv04_graph_mthd_bind_clip(struct nouveau_channel *chan,
-			  u32 class, u32 mthd, u32 data)
-{
-	switch (nv_ri32(chan->dev, data << 4) & 0xff) {
-	case 0x30:
-		nv04_graph_set_ctx1(chan, 0x2000, 0);
-		return 0;
-	case 0x19:
-		nv04_graph_set_ctx1(chan, 0x2000, 0x2000);
-		return 0;
-	}
-	return 1;
-}
-
-static int
-nv04_graph_mthd_bind_chroma(struct nouveau_channel *chan,
-			    u32 class, u32 mthd, u32 data)
-{
-	switch (nv_ri32(chan->dev, data << 4) & 0xff) {
-	case 0x30:
-		nv04_graph_set_ctx1(chan, 0x1000, 0);
-		return 0;
-	/* Yes, for some reason even the old versions of objects
-	 * accept 0x57 and not 0x17. Consistency be damned.
-	 */
-	case 0x57:
-		nv04_graph_set_ctx1(chan, 0x1000, 0x1000);
-		return 0;
-	}
-	return 1;
-}
-
-static struct nouveau_bitfield nv04_graph_intr[] = {
-	{ NV_PGRAPH_INTR_NOTIFY, "NOTIFY" },
-	{}
-};
-
-static struct nouveau_bitfield nv04_graph_nstatus[] = {
-	{ NV04_PGRAPH_NSTATUS_STATE_IN_USE,       "STATE_IN_USE" },
-	{ NV04_PGRAPH_NSTATUS_INVALID_STATE,      "INVALID_STATE" },
-	{ NV04_PGRAPH_NSTATUS_BAD_ARGUMENT,       "BAD_ARGUMENT" },
-	{ NV04_PGRAPH_NSTATUS_PROTECTION_FAULT,   "PROTECTION_FAULT" },
-	{}
-};
-
-struct nouveau_bitfield nv04_graph_nsource[] = {
-	{ NV03_PGRAPH_NSOURCE_NOTIFICATION,       "NOTIFICATION" },
-	{ NV03_PGRAPH_NSOURCE_DATA_ERROR,         "DATA_ERROR" },
-	{ NV03_PGRAPH_NSOURCE_PROTECTION_ERROR,   "PROTECTION_ERROR" },
-	{ NV03_PGRAPH_NSOURCE_RANGE_EXCEPTION,    "RANGE_EXCEPTION" },
-	{ NV03_PGRAPH_NSOURCE_LIMIT_COLOR,        "LIMIT_COLOR" },
-	{ NV03_PGRAPH_NSOURCE_LIMIT_ZETA,         "LIMIT_ZETA" },
-	{ NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD,       "ILLEGAL_MTHD" },
-	{ NV03_PGRAPH_NSOURCE_DMA_R_PROTECTION,   "DMA_R_PROTECTION" },
-	{ NV03_PGRAPH_NSOURCE_DMA_W_PROTECTION,   "DMA_W_PROTECTION" },
-	{ NV03_PGRAPH_NSOURCE_FORMAT_EXCEPTION,   "FORMAT_EXCEPTION" },
-	{ NV03_PGRAPH_NSOURCE_PATCH_EXCEPTION,    "PATCH_EXCEPTION" },
-	{ NV03_PGRAPH_NSOURCE_STATE_INVALID,      "STATE_INVALID" },
-	{ NV03_PGRAPH_NSOURCE_DOUBLE_NOTIFY,      "DOUBLE_NOTIFY" },
-	{ NV03_PGRAPH_NSOURCE_NOTIFY_IN_USE,      "NOTIFY_IN_USE" },
-	{ NV03_PGRAPH_NSOURCE_METHOD_CNT,         "METHOD_CNT" },
-	{ NV03_PGRAPH_NSOURCE_BFR_NOTIFICATION,   "BFR_NOTIFICATION" },
-	{ NV03_PGRAPH_NSOURCE_DMA_VTX_PROTECTION, "DMA_VTX_PROTECTION" },
-	{ NV03_PGRAPH_NSOURCE_DMA_WIDTH_A,        "DMA_WIDTH_A" },
-	{ NV03_PGRAPH_NSOURCE_DMA_WIDTH_B,        "DMA_WIDTH_B" },
-	{}
-};
-
-static void
-nv04_graph_context_switch(struct drm_device *dev)
-{
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct nouveau_channel *chan = NULL;
-	int chid;
-
-	nouveau_wait_for_idle(dev);
-
-	/* If previous context is valid, we need to save it */
-	nv04_graph_unload_context(dev);
-
-	/* Load context for next channel */
-	chid = nv_rd32(dev, NV03_PFIFO_CACHE1_PUSH1) &
-			    NV03_PFIFO_CACHE1_PUSH1_CHID_MASK;
-	chan = dev_priv->channels.ptr[chid];
-	if (chan)
-		nv04_graph_load_context(chan);
-}
-
-static void
-nv04_graph_isr(struct drm_device *dev)
-{
-	u32 stat;
-
-	while ((stat = nv_rd32(dev, NV03_PGRAPH_INTR))) {
-		u32 nsource = nv_rd32(dev, NV03_PGRAPH_NSOURCE);
-		u32 nstatus = nv_rd32(dev, NV03_PGRAPH_NSTATUS);
-		u32 addr = nv_rd32(dev, NV04_PGRAPH_TRAPPED_ADDR);
-		u32 chid = (addr & 0x0f000000) >> 24;
-		u32 subc = (addr & 0x0000e000) >> 13;
-		u32 mthd = (addr & 0x00001ffc);
-		u32 data = nv_rd32(dev, NV04_PGRAPH_TRAPPED_DATA);
-		u32 class = nv_rd32(dev, 0x400180 + subc * 4) & 0xff;
-		u32 show = stat;
-
-		if (stat & NV_PGRAPH_INTR_NOTIFY) {
-			if (nsource & NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD) {
-				if (!nouveau_gpuobj_mthd_call2(dev, chid, class, mthd, data))
-					show &= ~NV_PGRAPH_INTR_NOTIFY;
-			}
-		}
-
-		if (stat & NV_PGRAPH_INTR_CONTEXT_SWITCH) {
-			nv_wr32(dev, NV03_PGRAPH_INTR, NV_PGRAPH_INTR_CONTEXT_SWITCH);
-			stat &= ~NV_PGRAPH_INTR_CONTEXT_SWITCH;
-			show &= ~NV_PGRAPH_INTR_CONTEXT_SWITCH;
-			nv04_graph_context_switch(dev);
-		}
-
-		nv_wr32(dev, NV03_PGRAPH_INTR, stat);
-		nv_wr32(dev, NV04_PGRAPH_FIFO, 0x00000001);
-
-		if (show && nouveau_ratelimit()) {
-			NV_INFO(dev, "PGRAPH -");
-			nouveau_bitfield_print(nv04_graph_intr, show);
-			printk(" nsource:");
-			nouveau_bitfield_print(nv04_graph_nsource, nsource);
-			printk(" nstatus:");
-			nouveau_bitfield_print(nv04_graph_nstatus, nstatus);
-			printk("\n");
-			NV_INFO(dev, "PGRAPH - ch %d/%d class 0x%04x "
-				     "mthd 0x%04x data 0x%08x\n",
-				chid, subc, class, mthd, data);
-		}
-	}
-}
-
-static void
-nv04_graph_destroy(struct drm_device *dev, int engine)
-{
-	struct nv04_graph_engine *pgraph = nv_engine(dev, engine);
-
-	nouveau_irq_unregister(dev, 12);
-
-	NVOBJ_ENGINE_DEL(dev, GR);
-	kfree(pgraph);
-}
-
-int
-nv04_graph_create(struct drm_device *dev)
-{
-	struct nv04_graph_engine *pgraph;
-
-	pgraph = kzalloc(sizeof(*pgraph), GFP_KERNEL);
-	if (!pgraph)
-		return -ENOMEM;
-
-	pgraph->base.destroy = nv04_graph_destroy;
-	pgraph->base.init = nv04_graph_init;
-	pgraph->base.fini = nv04_graph_fini;
-	pgraph->base.context_new = nv04_graph_context_new;
-	pgraph->base.context_del = nv04_graph_context_del;
-	pgraph->base.object_new = nv04_graph_object_new;
-
-	NVOBJ_ENGINE_ADD(dev, GR, &pgraph->base);
-	nouveau_irq_register(dev, 12, nv04_graph_isr);
-
-	/* dvd subpicture */
-	NVOBJ_CLASS(dev, 0x0038, GR);
-
-	/* m2mf */
-	NVOBJ_CLASS(dev, 0x0039, GR);
-
-	/* nv03 gdirect */
-	NVOBJ_CLASS(dev, 0x004b, GR);
-	NVOBJ_MTHD (dev, 0x004b, 0x0184, nv04_graph_mthd_bind_nv01_patt);
-	NVOBJ_MTHD (dev, 0x004b, 0x0188, nv04_graph_mthd_bind_rop);
-	NVOBJ_MTHD (dev, 0x004b, 0x018c, nv04_graph_mthd_bind_beta1);
-	NVOBJ_MTHD (dev, 0x004b, 0x0190, nv04_graph_mthd_bind_surf_dst);
-	NVOBJ_MTHD (dev, 0x004b, 0x02fc, nv04_graph_mthd_set_operation);
-
-	/* nv04 gdirect */
-	NVOBJ_CLASS(dev, 0x004a, GR);
-	NVOBJ_MTHD (dev, 0x004a, 0x0188, nv04_graph_mthd_bind_nv04_patt);
-	NVOBJ_MTHD (dev, 0x004a, 0x018c, nv04_graph_mthd_bind_rop);
-	NVOBJ_MTHD (dev, 0x004a, 0x0190, nv04_graph_mthd_bind_beta1);
-	NVOBJ_MTHD (dev, 0x004a, 0x0194, nv04_graph_mthd_bind_beta4);
-	NVOBJ_MTHD (dev, 0x004a, 0x0198, nv04_graph_mthd_bind_surf2d);
-	NVOBJ_MTHD (dev, 0x004a, 0x02fc, nv04_graph_mthd_set_operation);
-
-	/* nv01 imageblit */
-	NVOBJ_CLASS(dev, 0x001f, GR);
-	NVOBJ_MTHD (dev, 0x001f, 0x0184, nv04_graph_mthd_bind_chroma);
-	NVOBJ_MTHD (dev, 0x001f, 0x0188, nv04_graph_mthd_bind_clip);
-	NVOBJ_MTHD (dev, 0x001f, 0x018c, nv04_graph_mthd_bind_nv01_patt);
-	NVOBJ_MTHD (dev, 0x001f, 0x0190, nv04_graph_mthd_bind_rop);
-	NVOBJ_MTHD (dev, 0x001f, 0x0194, nv04_graph_mthd_bind_beta1);
-	NVOBJ_MTHD (dev, 0x001f, 0x0198, nv04_graph_mthd_bind_surf_dst);
-	NVOBJ_MTHD (dev, 0x001f, 0x019c, nv04_graph_mthd_bind_surf_src);
-	NVOBJ_MTHD (dev, 0x001f, 0x02fc, nv04_graph_mthd_set_operation);
-
-	/* nv04 imageblit */
-	NVOBJ_CLASS(dev, 0x005f, GR);
-	NVOBJ_MTHD (dev, 0x005f, 0x0184, nv04_graph_mthd_bind_chroma);
-	NVOBJ_MTHD (dev, 0x005f, 0x0188, nv04_graph_mthd_bind_clip);
-	NVOBJ_MTHD (dev, 0x005f, 0x018c, nv04_graph_mthd_bind_nv04_patt);
-	NVOBJ_MTHD (dev, 0x005f, 0x0190, nv04_graph_mthd_bind_rop);
-	NVOBJ_MTHD (dev, 0x005f, 0x0194, nv04_graph_mthd_bind_beta1);
-	NVOBJ_MTHD (dev, 0x005f, 0x0198, nv04_graph_mthd_bind_beta4);
-	NVOBJ_MTHD (dev, 0x005f, 0x019c, nv04_graph_mthd_bind_surf2d);
-	NVOBJ_MTHD (dev, 0x005f, 0x02fc, nv04_graph_mthd_set_operation);
-
-	/* nv04 iifc */
-	NVOBJ_CLASS(dev, 0x0060, GR);
-	NVOBJ_MTHD (dev, 0x0060, 0x0188, nv04_graph_mthd_bind_chroma);
-	NVOBJ_MTHD (dev, 0x0060, 0x018c, nv04_graph_mthd_bind_clip);
-	NVOBJ_MTHD (dev, 0x0060, 0x0190, nv04_graph_mthd_bind_nv04_patt);
-	NVOBJ_MTHD (dev, 0x0060, 0x0194, nv04_graph_mthd_bind_rop);
-	NVOBJ_MTHD (dev, 0x0060, 0x0198, nv04_graph_mthd_bind_beta1);
-	NVOBJ_MTHD (dev, 0x0060, 0x019c, nv04_graph_mthd_bind_beta4);
-	NVOBJ_MTHD (dev, 0x0060, 0x01a0, nv04_graph_mthd_bind_surf2d_swzsurf);
-	NVOBJ_MTHD (dev, 0x0060, 0x03e4, nv04_graph_mthd_set_operation);
-
-	/* nv05 iifc */
-	NVOBJ_CLASS(dev, 0x0064, GR);
-
-	/* nv01 ifc */
-	NVOBJ_CLASS(dev, 0x0021, GR);
-	NVOBJ_MTHD (dev, 0x0021, 0x0184, nv04_graph_mthd_bind_chroma);
-	NVOBJ_MTHD (dev, 0x0021, 0x0188, nv04_graph_mthd_bind_clip);
-	NVOBJ_MTHD (dev, 0x0021, 0x018c, nv04_graph_mthd_bind_nv01_patt);
-	NVOBJ_MTHD (dev, 0x0021, 0x0190, nv04_graph_mthd_bind_rop);
-	NVOBJ_MTHD (dev, 0x0021, 0x0194, nv04_graph_mthd_bind_beta1);
-	NVOBJ_MTHD (dev, 0x0021, 0x0198, nv04_graph_mthd_bind_surf_dst);
-	NVOBJ_MTHD (dev, 0x0021, 0x02fc, nv04_graph_mthd_set_operation);
-
-	/* nv04 ifc */
-	NVOBJ_CLASS(dev, 0x0061, GR);
-	NVOBJ_MTHD (dev, 0x0061, 0x0184, nv04_graph_mthd_bind_chroma);
-	NVOBJ_MTHD (dev, 0x0061, 0x0188, nv04_graph_mthd_bind_clip);
-	NVOBJ_MTHD (dev, 0x0061, 0x018c, nv04_graph_mthd_bind_nv04_patt);
-	NVOBJ_MTHD (dev, 0x0061, 0x0190, nv04_graph_mthd_bind_rop);
-	NVOBJ_MTHD (dev, 0x0061, 0x0194, nv04_graph_mthd_bind_beta1);
-	NVOBJ_MTHD (dev, 0x0061, 0x0198, nv04_graph_mthd_bind_beta4);
-	NVOBJ_MTHD (dev, 0x0061, 0x019c, nv04_graph_mthd_bind_surf2d);
-	NVOBJ_MTHD (dev, 0x0061, 0x02fc, nv04_graph_mthd_set_operation);
-
-	/* nv05 ifc */
-	NVOBJ_CLASS(dev, 0x0065, GR);
-
-	/* nv03 sifc */
-	NVOBJ_CLASS(dev, 0x0036, GR);
-	NVOBJ_MTHD (dev, 0x0036, 0x0184, nv04_graph_mthd_bind_chroma);
-	NVOBJ_MTHD (dev, 0x0036, 0x0188, nv04_graph_mthd_bind_nv01_patt);
-	NVOBJ_MTHD (dev, 0x0036, 0x018c, nv04_graph_mthd_bind_rop);
-	NVOBJ_MTHD (dev, 0x0036, 0x0190, nv04_graph_mthd_bind_beta1);
-	NVOBJ_MTHD (dev, 0x0036, 0x0194, nv04_graph_mthd_bind_surf_dst);
-	NVOBJ_MTHD (dev, 0x0036, 0x02fc, nv04_graph_mthd_set_operation);
-
-	/* nv04 sifc */
-	NVOBJ_CLASS(dev, 0x0076, GR);
-	NVOBJ_MTHD (dev, 0x0076, 0x0184, nv04_graph_mthd_bind_chroma);
-	NVOBJ_MTHD (dev, 0x0076, 0x0188, nv04_graph_mthd_bind_nv04_patt);
-	NVOBJ_MTHD (dev, 0x0076, 0x018c, nv04_graph_mthd_bind_rop);
-	NVOBJ_MTHD (dev, 0x0076, 0x0190, nv04_graph_mthd_bind_beta1);
-	NVOBJ_MTHD (dev, 0x0076, 0x0194, nv04_graph_mthd_bind_beta4);
-	NVOBJ_MTHD (dev, 0x0076, 0x0198, nv04_graph_mthd_bind_surf2d);
-	NVOBJ_MTHD (dev, 0x0076, 0x02fc, nv04_graph_mthd_set_operation);
-
-	/* nv05 sifc */
-	NVOBJ_CLASS(dev, 0x0066, GR);
-
-	/* nv03 sifm */
-	NVOBJ_CLASS(dev, 0x0037, GR);
-	NVOBJ_MTHD (dev, 0x0037, 0x0188, nv04_graph_mthd_bind_nv01_patt);
-	NVOBJ_MTHD (dev, 0x0037, 0x018c, nv04_graph_mthd_bind_rop);
-	NVOBJ_MTHD (dev, 0x0037, 0x0190, nv04_graph_mthd_bind_beta1);
-	NVOBJ_MTHD (dev, 0x0037, 0x0194, nv04_graph_mthd_bind_surf_dst);
-	NVOBJ_MTHD (dev, 0x0037, 0x0304, nv04_graph_mthd_set_operation);
-
-	/* nv04 sifm */
-	NVOBJ_CLASS(dev, 0x0077, GR);
-	NVOBJ_MTHD (dev, 0x0077, 0x0188, nv04_graph_mthd_bind_nv04_patt);
-	NVOBJ_MTHD (dev, 0x0077, 0x018c, nv04_graph_mthd_bind_rop);
-	NVOBJ_MTHD (dev, 0x0077, 0x0190, nv04_graph_mthd_bind_beta1);
-	NVOBJ_MTHD (dev, 0x0077, 0x0194, nv04_graph_mthd_bind_beta4);
-	NVOBJ_MTHD (dev, 0x0077, 0x0198, nv04_graph_mthd_bind_surf2d_swzsurf);
-	NVOBJ_MTHD (dev, 0x0077, 0x0304, nv04_graph_mthd_set_operation);
-
-	/* null */
-	NVOBJ_CLASS(dev, 0x0030, GR);
-
-	/* surf2d */
-	NVOBJ_CLASS(dev, 0x0042, GR);
-
-	/* rop */
-	NVOBJ_CLASS(dev, 0x0043, GR);
-
-	/* beta1 */
-	NVOBJ_CLASS(dev, 0x0012, GR);
-
-	/* beta4 */
-	NVOBJ_CLASS(dev, 0x0072, GR);
-
-	/* cliprect */
-	NVOBJ_CLASS(dev, 0x0019, GR);
-
-	/* nv01 pattern */
-	NVOBJ_CLASS(dev, 0x0018, GR);
-
-	/* nv04 pattern */
-	NVOBJ_CLASS(dev, 0x0044, GR);
-
-	/* swzsurf */
-	NVOBJ_CLASS(dev, 0x0052, GR);
-
-	/* surf3d */
-	NVOBJ_CLASS(dev, 0x0053, GR);
-	NVOBJ_MTHD (dev, 0x0053, 0x02f8, nv04_graph_mthd_surf3d_clip_h);
-	NVOBJ_MTHD (dev, 0x0053, 0x02fc, nv04_graph_mthd_surf3d_clip_v);
-
-	/* nv03 tex_tri */
-	NVOBJ_CLASS(dev, 0x0048, GR);
-	NVOBJ_MTHD (dev, 0x0048, 0x0188, nv04_graph_mthd_bind_clip);
-	NVOBJ_MTHD (dev, 0x0048, 0x018c, nv04_graph_mthd_bind_surf_color);
-	NVOBJ_MTHD (dev, 0x0048, 0x0190, nv04_graph_mthd_bind_surf_zeta);
-
-	/* tex_tri */
-	NVOBJ_CLASS(dev, 0x0054, GR);
-
-	/* multitex_tri */
-	NVOBJ_CLASS(dev, 0x0055, GR);
-
-	/* nv01 chroma */
-	NVOBJ_CLASS(dev, 0x0017, GR);
-
-	/* nv04 chroma */
-	NVOBJ_CLASS(dev, 0x0057, GR);
-
-	/* surf_dst */
-	NVOBJ_CLASS(dev, 0x0058, GR);
-
-	/* surf_src */
-	NVOBJ_CLASS(dev, 0x0059, GR);
-
-	/* surf_color */
-	NVOBJ_CLASS(dev, 0x005a, GR);
-
-	/* surf_zeta */
-	NVOBJ_CLASS(dev, 0x005b, GR);
-
-	/* nv01 line */
-	NVOBJ_CLASS(dev, 0x001c, GR);
-	NVOBJ_MTHD (dev, 0x001c, 0x0184, nv04_graph_mthd_bind_clip);
-	NVOBJ_MTHD (dev, 0x001c, 0x0188, nv04_graph_mthd_bind_nv01_patt);
-	NVOBJ_MTHD (dev, 0x001c, 0x018c, nv04_graph_mthd_bind_rop);
-	NVOBJ_MTHD (dev, 0x001c, 0x0190, nv04_graph_mthd_bind_beta1);
-	NVOBJ_MTHD (dev, 0x001c, 0x0194, nv04_graph_mthd_bind_surf_dst);
-	NVOBJ_MTHD (dev, 0x001c, 0x02fc, nv04_graph_mthd_set_operation);
-
-	/* nv04 line */
-	NVOBJ_CLASS(dev, 0x005c, GR);
-	NVOBJ_MTHD (dev, 0x005c, 0x0184, nv04_graph_mthd_bind_clip);
-	NVOBJ_MTHD (dev, 0x005c, 0x0188, nv04_graph_mthd_bind_nv04_patt);
-	NVOBJ_MTHD (dev, 0x005c, 0x018c, nv04_graph_mthd_bind_rop);
-	NVOBJ_MTHD (dev, 0x005c, 0x0190, nv04_graph_mthd_bind_beta1);
-	NVOBJ_MTHD (dev, 0x005c, 0x0194, nv04_graph_mthd_bind_beta4);
-	NVOBJ_MTHD (dev, 0x005c, 0x0198, nv04_graph_mthd_bind_surf2d);
-	NVOBJ_MTHD (dev, 0x005c, 0x02fc, nv04_graph_mthd_set_operation);
-
-	/* nv01 tri */
-	NVOBJ_CLASS(dev, 0x001d, GR);
-	NVOBJ_MTHD (dev, 0x001d, 0x0184, nv04_graph_mthd_bind_clip);
-	NVOBJ_MTHD (dev, 0x001d, 0x0188, nv04_graph_mthd_bind_nv01_patt);
-	NVOBJ_MTHD (dev, 0x001d, 0x018c, nv04_graph_mthd_bind_rop);
-	NVOBJ_MTHD (dev, 0x001d, 0x0190, nv04_graph_mthd_bind_beta1);
-	NVOBJ_MTHD (dev, 0x001d, 0x0194, nv04_graph_mthd_bind_surf_dst);
-	NVOBJ_MTHD (dev, 0x001d, 0x02fc, nv04_graph_mthd_set_operation);
-
-	/* nv04 tri */
-	NVOBJ_CLASS(dev, 0x005d, GR);
-	NVOBJ_MTHD (dev, 0x005d, 0x0184, nv04_graph_mthd_bind_clip);
-	NVOBJ_MTHD (dev, 0x005d, 0x0188, nv04_graph_mthd_bind_nv04_patt);
-	NVOBJ_MTHD (dev, 0x005d, 0x018c, nv04_graph_mthd_bind_rop);
-	NVOBJ_MTHD (dev, 0x005d, 0x0190, nv04_graph_mthd_bind_beta1);
-	NVOBJ_MTHD (dev, 0x005d, 0x0194, nv04_graph_mthd_bind_beta4);
-	NVOBJ_MTHD (dev, 0x005d, 0x0198, nv04_graph_mthd_bind_surf2d);
-	NVOBJ_MTHD (dev, 0x005d, 0x02fc, nv04_graph_mthd_set_operation);
-
-	/* nv01 rect */
-	NVOBJ_CLASS(dev, 0x001e, GR);
-	NVOBJ_MTHD (dev, 0x001e, 0x0184, nv04_graph_mthd_bind_clip);
-	NVOBJ_MTHD (dev, 0x001e, 0x0188, nv04_graph_mthd_bind_nv01_patt);
-	NVOBJ_MTHD (dev, 0x001e, 0x018c, nv04_graph_mthd_bind_rop);
-	NVOBJ_MTHD (dev, 0x001e, 0x0190, nv04_graph_mthd_bind_beta1);
-	NVOBJ_MTHD (dev, 0x001e, 0x0194, nv04_graph_mthd_bind_surf_dst);
-	NVOBJ_MTHD (dev, 0x001e, 0x02fc, nv04_graph_mthd_set_operation);
-
-	/* nv04 rect */
-	NVOBJ_CLASS(dev, 0x005e, GR);
-	NVOBJ_MTHD (dev, 0x005e, 0x0184, nv04_graph_mthd_bind_clip);
-	NVOBJ_MTHD (dev, 0x005e, 0x0188, nv04_graph_mthd_bind_nv04_patt);
-	NVOBJ_MTHD (dev, 0x005e, 0x018c, nv04_graph_mthd_bind_rop);
-	NVOBJ_MTHD (dev, 0x005e, 0x0190, nv04_graph_mthd_bind_beta1);
-	NVOBJ_MTHD (dev, 0x005e, 0x0194, nv04_graph_mthd_bind_beta4);
-	NVOBJ_MTHD (dev, 0x005e, 0x0198, nv04_graph_mthd_bind_surf2d);
-	NVOBJ_MTHD (dev, 0x005e, 0x02fc, nv04_graph_mthd_set_operation);
-
-	return 0;
-}
diff --git a/drivers/gpu/drm/nouveau/nv04_instmem.c b/drivers/gpu/drm/nouveau/nv04_instmem.c
deleted file mode 100644
index a9e380040fea..000000000000
--- a/drivers/gpu/drm/nouveau/nv04_instmem.c
+++ /dev/null
@@ -1,192 +0,0 @@
-#include <drm/drmP.h>
-
-#include "nouveau_drv.h"
-#include "nouveau_fifo.h"
-#include "nouveau_ramht.h"
-
-/* returns the size of fifo context */
-static int
-nouveau_fifo_ctx_size(struct drm_device *dev)
-{
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-
-	if (dev_priv->chipset >= 0x40)
-		return 128 * 32;
-	else
-	if (dev_priv->chipset >= 0x17)
-		return 64 * 32;
-	else
-	if (dev_priv->chipset >= 0x10)
-		return 32 * 32;
-
-	return 32 * 16;
-}
-
-int nv04_instmem_init(struct drm_device *dev)
-{
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct nouveau_gpuobj *ramht = NULL;
-	u32 offset, length;
-	int ret;
-
-	/* RAMIN always available */
-	dev_priv->ramin_available = true;
-
-	/* Reserve space at end of VRAM for PRAMIN */
-	if (dev_priv->card_type >= NV_40) {
-		u32 vs = hweight8((nv_rd32(dev, 0x001540) & 0x0000ff00) >> 8);
-		u32 rsvd;
-
-		/* estimate grctx size, the magics come from nv40_grctx.c */
-		if      (dev_priv->chipset == 0x40) rsvd = 0x6aa0 * vs;
-		else if (dev_priv->chipset  < 0x43) rsvd = 0x4f00 * vs;
-		else if (nv44_graph_class(dev))	    rsvd = 0x4980 * vs;
-		else				    rsvd = 0x4a40 * vs;
-		rsvd += 16 * 1024;
-		rsvd *= 32; /* per-channel */
-
-		rsvd += 512 * 1024; /* pci(e)gart table */
-		rsvd += 512 * 1024; /* object storage */
-
-		dev_priv->ramin_rsvd_vram = round_up(rsvd, 4096);
-	} else {
-		dev_priv->ramin_rsvd_vram = 512 * 1024;
-	}
-
-	/* Setup shared RAMHT */
-	ret = nouveau_gpuobj_new_fake(dev, 0x10000, ~0, 4096,
-				      NVOBJ_FLAG_ZERO_ALLOC, &ramht);
-	if (ret)
-		return ret;
-
-	ret = nouveau_ramht_new(dev, ramht, &dev_priv->ramht);
-	nouveau_gpuobj_ref(NULL, &ramht);
-	if (ret)
-		return ret;
-
-	/* And RAMRO */
-	ret = nouveau_gpuobj_new_fake(dev, 0x11200, ~0, 512,
-				      NVOBJ_FLAG_ZERO_ALLOC, &dev_priv->ramro);
-	if (ret)
-		return ret;
-
-	/* And RAMFC */
-	length = nouveau_fifo_ctx_size(dev);
-	switch (dev_priv->card_type) {
-	case NV_40:
-		offset = 0x20000;
-		break;
-	default:
-		offset = 0x11400;
-		break;
-	}
-
-	ret = nouveau_gpuobj_new_fake(dev, offset, ~0, length,
-				      NVOBJ_FLAG_ZERO_ALLOC, &dev_priv->ramfc);
-	if (ret)
-		return ret;
-
-	/* Only allow space after RAMFC to be used for object allocation */
-	offset += length;
-
-	/* It appears RAMRO (or something?) is controlled by 0x2220/0x2230
-	 * on certain NV4x chipsets as well as RAMFC.  When 0x2230 == 0
-	 * ("new style" control) the upper 16-bits of 0x2220 points at this
-	 * other mysterious table that's clobbering important things.
-	 *
-	 * We're now pointing this at RAMIN+0x30000 to avoid RAMFC getting
-	 * smashed to pieces on us, so reserve 0x30000-0x40000 too..
-	 */
-	if (dev_priv->card_type >= NV_40) {
-		if (offset < 0x40000)
-			offset = 0x40000;
-	}
-
-	ret = drm_mm_init(&dev_priv->ramin_heap, offset,
-			  dev_priv->ramin_rsvd_vram - offset);
-	if (ret) {
-		NV_ERROR(dev, "Failed to init RAMIN heap: %d\n", ret);
-		return ret;
-	}
-
-	return 0;
-}
-
-void
-nv04_instmem_takedown(struct drm_device *dev)
-{
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-
-	nouveau_ramht_ref(NULL, &dev_priv->ramht, NULL);
-	nouveau_gpuobj_ref(NULL, &dev_priv->ramro);
-	nouveau_gpuobj_ref(NULL, &dev_priv->ramfc);
-
-	if (drm_mm_initialized(&dev_priv->ramin_heap))
-		drm_mm_takedown(&dev_priv->ramin_heap);
-}
-
-int
-nv04_instmem_suspend(struct drm_device *dev)
-{
-	return 0;
-}
-
-void
-nv04_instmem_resume(struct drm_device *dev)
-{
-}
-
-int
-nv04_instmem_get(struct nouveau_gpuobj *gpuobj, struct nouveau_channel *chan,
-		 u32 size, u32 align)
-{
-	struct drm_nouveau_private *dev_priv = gpuobj->dev->dev_private;
-	struct drm_mm_node *ramin = NULL;
-
-	do {
-		if (drm_mm_pre_get(&dev_priv->ramin_heap))
-			return -ENOMEM;
-
-		spin_lock(&dev_priv->ramin_lock);
-		ramin = drm_mm_search_free(&dev_priv->ramin_heap, size, align, 0);
-		if (ramin == NULL) {
-			spin_unlock(&dev_priv->ramin_lock);
-			return -ENOMEM;
-		}
-
-		ramin = drm_mm_get_block_atomic(ramin, size, align);
-		spin_unlock(&dev_priv->ramin_lock);
-	} while (ramin == NULL);
-
-	gpuobj->node  = ramin;
-	gpuobj->vinst = ramin->start;
-	return 0;
-}
-
-void
-nv04_instmem_put(struct nouveau_gpuobj *gpuobj)
-{
-	struct drm_nouveau_private *dev_priv = gpuobj->dev->dev_private;
-
-	spin_lock(&dev_priv->ramin_lock);
-	drm_mm_put_block(gpuobj->node);
-	gpuobj->node = NULL;
-	spin_unlock(&dev_priv->ramin_lock);
-}
-
-int
-nv04_instmem_map(struct nouveau_gpuobj *gpuobj)
-{
-	gpuobj->pinst = gpuobj->vinst;
-	return 0;
-}
-
-void
-nv04_instmem_unmap(struct nouveau_gpuobj *gpuobj)
-{
-}
-
-void
-nv04_instmem_flush(struct drm_device *dev)
-{
-}
diff --git a/drivers/gpu/drm/nouveau/nv04_mc.c b/drivers/gpu/drm/nouveau/nv04_mc.c
deleted file mode 100644
index 83751e7a3309..000000000000
--- a/drivers/gpu/drm/nouveau/nv04_mc.c
+++ /dev/null
@@ -1,23 +0,0 @@
-#include <drm/drmP.h>
-#include "nouveau_drv.h"
-#include <drm/nouveau_drm.h>
-
-int
-nv04_mc_init(struct drm_device *dev)
-{
-	/* Power up everything, resetting each individual unit will
-	 * be done later if needed.
-	 */
-
-	nv_wr32(dev, NV03_PMC_ENABLE, 0xFFFFFFFF);
-
-	/* Disable PROM access. */
-	nv_wr32(dev, NV_PBUS_PCI_NV_20, NV_PBUS_PCI_NV_20_ROM_SHADOW_ENABLED);
-
-	return 0;
-}
-
-void
-nv04_mc_takedown(struct drm_device *dev)
-{
-}
diff --git a/drivers/gpu/drm/nouveau/nv04_pm.c b/drivers/gpu/drm/nouveau/nv04_pm.c
index 435b5a832da3..2a0cc9d0614a 100644
--- a/drivers/gpu/drm/nouveau/nv04_pm.c
+++ b/drivers/gpu/drm/nouveau/nv04_pm.c
@@ -23,10 +23,15 @@
  */
 
 #include <drm/drmP.h>
-#include "nouveau_drv.h"
+#include "nouveau_drm.h"
+#include "nouveau_reg.h"
 #include "nouveau_hw.h"
 #include "nouveau_pm.h"
 
+#include <subdev/bios/pll.h>
+#include <subdev/clock.h>
+#include <subdev/timer.h>
+
 int
 nv04_pm_clocks_get(struct drm_device *dev, struct nouveau_pm_level *perflvl)
 {
@@ -46,7 +51,7 @@ nv04_pm_clocks_get(struct drm_device *dev, struct nouveau_pm_level *perflvl)
 }
 
 struct nv04_pm_clock {
-	struct pll_lims pll;
+	struct nvbios_pll pll;
 	struct nouveau_pll_vals calc;
 };
 
@@ -58,13 +63,16 @@ struct nv04_pm_state {
 static int
 calc_pll(struct drm_device *dev, u32 id, int khz, struct nv04_pm_clock *clk)
 {
+	struct nouveau_device *device = nouveau_dev(dev);
+	struct nouveau_bios *bios = nouveau_bios(device);
+	struct nouveau_clock *pclk = nouveau_clock(device);
 	int ret;
 
-	ret = get_pll_limits(dev, id, &clk->pll);
+	ret = nvbios_pll_parse(bios, id, &clk->pll);
 	if (ret)
 		return ret;
 
-	ret = nouveau_calc_pll_mnp(dev, &clk->pll, khz, &clk->calc);
+	ret = pclk->pll_calc(pclk, &clk->pll, khz, &clk->calc);
 	if (!ret)
 		return -EINVAL;
 
@@ -100,37 +108,38 @@ error:
 static void
 prog_pll(struct drm_device *dev, struct nv04_pm_clock *clk)
 {
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_device *device = nouveau_dev(dev);
+	struct nouveau_clock *pclk = nouveau_clock(device);
 	u32 reg = clk->pll.reg;
 
 	/* thank the insane nouveau_hw_setpll() interface for this */
-	if (dev_priv->card_type >= NV_40)
+	if (device->card_type >= NV_40)
 		reg += 4;
 
-	nouveau_hw_setpll(dev, reg, &clk->calc);
+	pclk->pll_prog(pclk, reg, &clk->calc);
 }
 
 int
 nv04_pm_clocks_set(struct drm_device *dev, void *pre_state)
 {
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct nouveau_timer_engine *ptimer = &dev_priv->engine.timer;
+	struct nouveau_device *device = nouveau_dev(dev);
+	struct nouveau_timer *ptimer = nouveau_timer(device);
 	struct nv04_pm_state *state = pre_state;
 
 	prog_pll(dev, &state->core);
 
 	if (state->memory.pll.reg) {
 		prog_pll(dev, &state->memory);
-		if (dev_priv->card_type < NV_30) {
-			if (dev_priv->card_type == NV_20)
-				nv_mask(dev, 0x1002c4, 0, 1 << 20);
+		if (device->card_type < NV_30) {
+			if (device->card_type == NV_20)
+				nv_mask(device, 0x1002c4, 0, 1 << 20);
 
 			/* Reset the DLLs */
-			nv_mask(dev, 0x1002c0, 0, 1 << 8);
+			nv_mask(device, 0x1002c0, 0, 1 << 8);
 		}
 	}
 
-	ptimer->init(dev);
+	nv_ofuncs(ptimer)->init(nv_object(ptimer));
 
 	kfree(state);
 	return 0;
diff --git a/drivers/gpu/drm/nouveau/nv04_software.c b/drivers/gpu/drm/nouveau/nv04_software.c
deleted file mode 100644
index 02509e715693..000000000000
--- a/drivers/gpu/drm/nouveau/nv04_software.c
+++ /dev/null
@@ -1,147 +0,0 @@
-/*
- * Copyright 2012 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Ben Skeggs
- */
-
-#include <drm/drmP.h>
-
-#include "nouveau_drv.h"
-#include "nouveau_ramht.h"
-#include "nouveau_fence.h"
-#include "nouveau_software.h"
-#include "nouveau_hw.h"
-
-struct nv04_software_priv {
-	struct nouveau_software_priv base;
-};
-
-struct nv04_software_chan {
-	struct nouveau_software_chan base;
-};
-
-static int
-mthd_flip(struct nouveau_channel *chan, u32 class, u32 mthd, u32 data)
-{
-
-	struct nouveau_page_flip_state state;
-
-	if (!nouveau_finish_page_flip(chan, &state)) {
-		nv_set_crtc_base(chan->dev, state.crtc, state.offset +
-				 state.y * state.pitch +
-				 state.x * state.bpp / 8);
-	}
-
-	return 0;
-}
-
-static int
-nv04_software_context_new(struct nouveau_channel *chan, int engine)
-{
-	struct nv04_software_chan *pch;
-
-	pch = kzalloc(sizeof(*pch), GFP_KERNEL);
-	if (!pch)
-		return -ENOMEM;
-
-	nouveau_software_context_new(&pch->base);
-	chan->engctx[engine] = pch;
-	return 0;
-}
-
-static void
-nv04_software_context_del(struct nouveau_channel *chan, int engine)
-{
-	struct nv04_software_chan *pch = chan->engctx[engine];
-	chan->engctx[engine] = NULL;
-	kfree(pch);
-}
-
-static int
-nv04_software_object_new(struct nouveau_channel *chan, int engine,
-			 u32 handle, u16 class)
-{
-	struct drm_device *dev = chan->dev;
-	struct nouveau_gpuobj *obj = NULL;
-	int ret;
-
-	ret = nouveau_gpuobj_new(dev, chan, 16, 16, 0, &obj);
-	if (ret)
-		return ret;
-	obj->engine = 0;
-	obj->class  = class;
-
-	ret = nouveau_ramht_insert(chan, handle, obj);
-	nouveau_gpuobj_ref(NULL, &obj);
-	return ret;
-}
-
-static int
-nv04_software_init(struct drm_device *dev, int engine)
-{
-	return 0;
-}
-
-static int
-nv04_software_fini(struct drm_device *dev, int engine, bool suspend)
-{
-	return 0;
-}
-
-static void
-nv04_software_destroy(struct drm_device *dev, int engine)
-{
-	struct nv04_software_priv *psw = nv_engine(dev, engine);
-
-	NVOBJ_ENGINE_DEL(dev, SW);
-	kfree(psw);
-}
-
-int
-nv04_software_create(struct drm_device *dev)
-{
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct nv04_software_priv *psw;
-
-	psw = kzalloc(sizeof(*psw), GFP_KERNEL);
-	if (!psw)
-		return -ENOMEM;
-
-	psw->base.base.destroy = nv04_software_destroy;
-	psw->base.base.init = nv04_software_init;
-	psw->base.base.fini = nv04_software_fini;
-	psw->base.base.context_new = nv04_software_context_new;
-	psw->base.base.context_del = nv04_software_context_del;
-	psw->base.base.object_new = nv04_software_object_new;
-	nouveau_software_create(&psw->base);
-
-	NVOBJ_ENGINE_ADD(dev, SW, &psw->base.base);
-	if (dev_priv->card_type <= NV_04) {
-		NVOBJ_CLASS(dev, 0x006e, SW);
-		NVOBJ_MTHD (dev, 0x006e, 0x0150, nv04_fence_mthd);
-		NVOBJ_MTHD (dev, 0x006e, 0x0500, mthd_flip);
-	} else {
-		NVOBJ_CLASS(dev, 0x016e, SW);
-		NVOBJ_MTHD (dev, 0x016e, 0x0500, mthd_flip);
-	}
-
-	return 0;
-}
diff --git a/drivers/gpu/drm/nouveau/nv04_timer.c b/drivers/gpu/drm/nouveau/nv04_timer.c
deleted file mode 100644
index 71ad319affcb..000000000000
--- a/drivers/gpu/drm/nouveau/nv04_timer.c
+++ /dev/null
@@ -1,83 +0,0 @@
-#include <drm/drmP.h>
-#include "nouveau_drv.h"
-#include <drm/nouveau_drm.h>
-#include "nouveau_hw.h"
-
-int
-nv04_timer_init(struct drm_device *dev)
-{
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	u32 m, n, d;
-
-	nv_wr32(dev, NV04_PTIMER_INTR_EN_0, 0x00000000);
-	nv_wr32(dev, NV04_PTIMER_INTR_0, 0xFFFFFFFF);
-
-	/* aim for 31.25MHz, which gives us nanosecond timestamps */
-	d = 1000000 / 32;
-
-	/* determine base clock for timer source */
-	if (dev_priv->chipset < 0x40) {
-		n = nouveau_hw_get_clock(dev, PLL_CORE);
-	} else
-	if (dev_priv->chipset == 0x40) {
-		/*XXX: figure this out */
-		n = 0;
-	} else {
-		n = dev_priv->crystal;
-		m = 1;
-		while (n < (d * 2)) {
-			n += (n / m);
-			m++;
-		}
-
-		nv_wr32(dev, 0x009220, m - 1);
-	}
-
-	if (!n) {
-		NV_WARN(dev, "PTIMER: unknown input clock freq\n");
-		if (!nv_rd32(dev, NV04_PTIMER_NUMERATOR) ||
-		    !nv_rd32(dev, NV04_PTIMER_DENOMINATOR)) {
-			nv_wr32(dev, NV04_PTIMER_NUMERATOR, 1);
-			nv_wr32(dev, NV04_PTIMER_DENOMINATOR, 1);
-		}
-		return 0;
-	}
-
-	/* reduce ratio to acceptable values */
-	while (((n % 5) == 0) && ((d % 5) == 0)) {
-		n /= 5;
-		d /= 5;
-	}
-
-	while (((n % 2) == 0) && ((d % 2) == 0)) {
-		n /= 2;
-		d /= 2;
-	}
-
-	while (n > 0xffff || d > 0xffff) {
-		n >>= 1;
-		d >>= 1;
-	}
-
-	nv_wr32(dev, NV04_PTIMER_NUMERATOR, n);
-	nv_wr32(dev, NV04_PTIMER_DENOMINATOR, d);
-	return 0;
-}
-
-u64
-nv04_timer_read(struct drm_device *dev)
-{
-	u32 hi, lo;
-
-	do {
-		hi = nv_rd32(dev, NV04_PTIMER_TIME_1);
-		lo = nv_rd32(dev, NV04_PTIMER_TIME_0);
-	} while (hi != nv_rd32(dev, NV04_PTIMER_TIME_1));
-
-	return ((u64)hi << 32 | lo);
-}
-
-void
-nv04_timer_takedown(struct drm_device *dev)
-{
-}
diff --git a/drivers/gpu/drm/nouveau/nv04_tv.c b/drivers/gpu/drm/nouveau/nv04_tv.c
index 7157d403ed59..099fbeda6e2e 100644
--- a/drivers/gpu/drm/nouveau/nv04_tv.c
+++ b/drivers/gpu/drm/nouveau/nv04_tv.c
@@ -25,7 +25,8 @@
  */
 
 #include <drm/drmP.h>
-#include "nouveau_drv.h"
+#include "nouveau_drm.h"
+#include "nouveau_reg.h"
 #include "nouveau_encoder.h"
 #include "nouveau_connector.h"
 #include "nouveau_crtc.h"
@@ -34,6 +35,8 @@
 
 #include <drm/i2c/ch7006.h>
 
+#include <subdev/i2c.h>
+
 static struct i2c_board_info nv04_tv_encoder_info[] = {
 	{
 		I2C_BOARD_INFO("ch7006", 0x75),
@@ -49,8 +52,11 @@ static struct i2c_board_info nv04_tv_encoder_info[] = {
 
 int nv04_tv_identify(struct drm_device *dev, int i2c_index)
 {
-	return nouveau_i2c_identify(dev, "TV encoder", nv04_tv_encoder_info,
-				    NULL, i2c_index);
+	struct nouveau_drm *drm = nouveau_drm(dev);
+	struct nouveau_i2c *i2c = nouveau_i2c(drm->device);
+
+	return i2c->identify(i2c, i2c_index, "TV encoder",
+			     nv04_tv_encoder_info, NULL);
 }
 
 
@@ -64,12 +70,12 @@ int nv04_tv_identify(struct drm_device *dev, int i2c_index)
 static void nv04_tv_dpms(struct drm_encoder *encoder, int mode)
 {
 	struct drm_device *dev = encoder->dev;
+	struct nouveau_drm *drm = nouveau_drm(dev);
 	struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct nv04_mode_state *state = &dev_priv->mode_reg;
+	struct nv04_mode_state *state = &nv04_display(dev)->mode_reg;
 	uint8_t crtc1A;
 
-	NV_INFO(dev, "Setting dpms mode %d on TV encoder (output %d)\n",
+	NV_INFO(drm, "Setting dpms mode %d on TV encoder (output %d)\n",
 		mode, nv_encoder->dcb->index);
 
 	state->pllsel &= ~(PLLSEL_TV_CRTC1_MASK | PLLSEL_TV_CRTC2_MASK);
@@ -94,8 +100,7 @@ static void nv04_tv_dpms(struct drm_encoder *encoder, int mode)
 
 static void nv04_tv_bind(struct drm_device *dev, int head, bool bind)
 {
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct nv04_crtc_reg *state = &dev_priv->mode_reg.crtc_reg[head];
+	struct nv04_crtc_reg *state = &nv04_display(dev)->mode_reg.crtc_reg[head];
 
 	state->tv_setup = 0;
 
@@ -133,9 +138,8 @@ static void nv04_tv_mode_set(struct drm_encoder *encoder,
 			     struct drm_display_mode *adjusted_mode)
 {
 	struct drm_device *dev = encoder->dev;
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
 	struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc);
-	struct nv04_crtc_reg *regp = &dev_priv->mode_reg.crtc_reg[nv_crtc->index];
+	struct nv04_crtc_reg *regp = &nv04_display(dev)->mode_reg.crtc_reg[nv_crtc->index];
 
 	regp->tv_htotal = adjusted_mode->htotal;
 	regp->tv_vtotal = adjusted_mode->vtotal;
@@ -157,12 +161,13 @@ static void nv04_tv_commit(struct drm_encoder *encoder)
 {
 	struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
 	struct drm_device *dev = encoder->dev;
+	struct nouveau_drm *drm = nouveau_drm(dev);
 	struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc);
 	struct drm_encoder_helper_funcs *helper = encoder->helper_private;
 
 	helper->dpms(encoder, DRM_MODE_DPMS_ON);
 
-	NV_INFO(dev, "Output %s is running on CRTC %d using output %c\n",
+	NV_INFO(drm, "Output %s is running on CRTC %d using output %c\n",
 		      drm_get_connector_name(&nouveau_encoder_connector_get(nv_encoder)->base), nv_crtc->index,
 		      '@' + ffs(nv_encoder->dcb->or));
 }
@@ -181,15 +186,16 @@ static const struct drm_encoder_funcs nv04_tv_funcs = {
 };
 
 int
-nv04_tv_create(struct drm_connector *connector, struct dcb_entry *entry)
+nv04_tv_create(struct drm_connector *connector, struct dcb_output *entry)
 {
 	struct nouveau_encoder *nv_encoder;
 	struct drm_encoder *encoder;
 	struct drm_device *dev = connector->dev;
 	struct drm_encoder_helper_funcs *hfuncs;
 	struct drm_encoder_slave_funcs *sfuncs;
-	struct nouveau_i2c_chan *i2c =
-		nouveau_i2c_find(dev, entry->i2c_index);
+	struct nouveau_drm *drm = nouveau_drm(dev);
+	struct nouveau_i2c *i2c = nouveau_i2c(drm->device);
+	struct nouveau_i2c_port *port = i2c->find(i2c, entry->i2c_index);
 	int type, ret;
 
 	/* Ensure that we can talk to this encoder */
@@ -221,7 +227,7 @@ nv04_tv_create(struct drm_connector *connector, struct dcb_entry *entry)
 
 	/* Run the slave-specific initialization */
 	ret = drm_i2c_encoder_init(dev, to_encoder_slave(encoder),
-				   &i2c->adapter, &nv04_tv_encoder_info[type]);
+				   &port->adapter, &nv04_tv_encoder_info[type]);
 	if (ret < 0)
 		goto fail_cleanup;
 
diff --git a/drivers/gpu/drm/nouveau/nv10_fb.c b/drivers/gpu/drm/nouveau/nv10_fb.c
deleted file mode 100644
index 510e90f34482..000000000000
--- a/drivers/gpu/drm/nouveau/nv10_fb.c
+++ /dev/null
@@ -1,103 +0,0 @@
-#include <drm/drmP.h>
-#include "nouveau_drv.h"
-#include <drm/nouveau_drm.h>
-
-void
-nv10_fb_init_tile_region(struct drm_device *dev, int i, uint32_t addr,
-			 uint32_t size, uint32_t pitch, uint32_t flags)
-{
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i];
-
-	tile->addr  = 0x80000000 | addr;
-	tile->limit = max(1u, addr + size) - 1;
-	tile->pitch = pitch;
-}
-
-void
-nv10_fb_free_tile_region(struct drm_device *dev, int i)
-{
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i];
-
-	tile->addr = tile->limit = tile->pitch = tile->zcomp = 0;
-}
-
-void
-nv10_fb_set_tile_region(struct drm_device *dev, int i)
-{
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i];
-
-	nv_wr32(dev, NV10_PFB_TLIMIT(i), tile->limit);
-	nv_wr32(dev, NV10_PFB_TSIZE(i), tile->pitch);
-	nv_wr32(dev, NV10_PFB_TILE(i), tile->addr);
-}
-
-int
-nv1a_fb_vram_init(struct drm_device *dev)
-{
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct pci_dev *bridge;
-	uint32_t mem, mib;
-
-	bridge = pci_get_bus_and_slot(0, PCI_DEVFN(0, 1));
-	if (!bridge) {
-		NV_ERROR(dev, "no bridge device\n");
-		return 0;
-	}
-
-	if (dev_priv->chipset == 0x1a) {
-		pci_read_config_dword(bridge, 0x7c, &mem);
-		mib = ((mem >> 6) & 31) + 1;
-	} else {
-		pci_read_config_dword(bridge, 0x84, &mem);
-		mib = ((mem >> 4) & 127) + 1;
-	}
-
-	dev_priv->vram_size = mib * 1024 * 1024;
-	return 0;
-}
-
-int
-nv10_fb_vram_init(struct drm_device *dev)
-{
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	u32 fifo_data = nv_rd32(dev, NV04_PFB_FIFO_DATA);
-	u32 cfg0 = nv_rd32(dev, 0x100200);
-
-	dev_priv->vram_size = fifo_data & NV10_PFB_FIFO_DATA_RAM_AMOUNT_MB_MASK;
-
-	if (cfg0 & 0x00000001)
-		dev_priv->vram_type = NV_MEM_TYPE_DDR1;
-	else
-		dev_priv->vram_type = NV_MEM_TYPE_SDRAM;
-
-	return 0;
-}
-
-int
-nv10_fb_init(struct drm_device *dev)
-{
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct nouveau_fb_engine *pfb = &dev_priv->engine.fb;
-	int i;
-
-	/* Turn all the tiling regions off. */
-	pfb->num_tiles = NV10_PFB_TILE__SIZE;
-	for (i = 0; i < pfb->num_tiles; i++)
-		pfb->set_tile_region(dev, i);
-
-	return 0;
-}
-
-void
-nv10_fb_takedown(struct drm_device *dev)
-{
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct nouveau_fb_engine *pfb = &dev_priv->engine.fb;
-	int i;
-
-	for (i = 0; i < pfb->num_tiles; i++)
-		pfb->free_tile_region(dev, i);
-}
diff --git a/drivers/gpu/drm/nouveau/nv10_fence.c b/drivers/gpu/drm/nouveau/nv10_fence.c
index d30f752464ef..ce752bf5cc4e 100644
--- a/drivers/gpu/drm/nouveau/nv10_fence.c
+++ b/drivers/gpu/drm/nouveau/nv10_fence.c
@@ -22,10 +22,11 @@
  * Authors: Ben Skeggs <bskeggs@redhat.com>
  */
 
-#include <drm/drmP.h>
-#include "nouveau_drv.h"
+#include <core/object.h>
+#include <core/class.h>
+
+#include "nouveau_drm.h"
 #include "nouveau_dma.h"
-#include "nouveau_ramht.h"
 #include "nouveau_fence.h"
 
 struct nv10_fence_chan {
@@ -39,7 +40,7 @@ struct nv10_fence_priv {
 	u32 sequence;
 };
 
-static int
+int
 nv10_fence_emit(struct nouveau_fence *fence)
 {
 	struct nouveau_channel *chan = fence->channel;
@@ -60,15 +61,15 @@ nv10_fence_sync(struct nouveau_fence *fence,
 	return -ENODEV;
 }
 
-static int
+int
 nv17_fence_sync(struct nouveau_fence *fence,
 		struct nouveau_channel *prev, struct nouveau_channel *chan)
 {
-	struct nv10_fence_priv *priv = nv_engine(chan->dev, NVOBJ_ENGINE_FENCE);
+	struct nv10_fence_priv *priv = chan->drm->fence;
 	u32 value;
 	int ret;
 
-	if (!mutex_trylock(&prev->mutex))
+	if (!mutex_trylock(&prev->cli->mutex))
 		return -EBUSY;
 
 	spin_lock(&priv->lock);
@@ -95,34 +96,33 @@ nv17_fence_sync(struct nouveau_fence *fence,
 		FIRE_RING (chan);
 	}
 
-	mutex_unlock(&prev->mutex);
+	mutex_unlock(&prev->cli->mutex);
 	return 0;
 }
 
-static u32
+u32
 nv10_fence_read(struct nouveau_channel *chan)
 {
-	return nvchan_rd32(chan, 0x0048);
+	return nv_ro32(chan->object, 0x0048);
 }
 
-static void
-nv10_fence_context_del(struct nouveau_channel *chan, int engine)
+void
+nv10_fence_context_del(struct nouveau_channel *chan)
 {
-	struct nv10_fence_chan *fctx = chan->engctx[engine];
+	struct nv10_fence_chan *fctx = chan->fence;
 	nouveau_fence_context_del(&fctx->base);
-	chan->engctx[engine] = NULL;
+	chan->fence = NULL;
 	kfree(fctx);
 }
 
 static int
-nv10_fence_context_new(struct nouveau_channel *chan, int engine)
+nv10_fence_context_new(struct nouveau_channel *chan)
 {
-	struct nv10_fence_priv *priv = nv_engine(chan->dev, engine);
+	struct nv10_fence_priv *priv = chan->drm->fence;
 	struct nv10_fence_chan *fctx;
-	struct nouveau_gpuobj *obj;
 	int ret = 0;
 
-	fctx = chan->engctx[engine] = kzalloc(sizeof(*fctx), GFP_KERNEL);
+	fctx = chan->fence = kzalloc(sizeof(*fctx), GFP_KERNEL);
 	if (!fctx)
 		return -ENOMEM;
 
@@ -130,69 +130,56 @@ nv10_fence_context_new(struct nouveau_channel *chan, int engine)
 
 	if (priv->bo) {
 		struct ttm_mem_reg *mem = &priv->bo->bo.mem;
-
-		ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_FROM_MEMORY,
-					     mem->start * PAGE_SIZE, mem->size,
-					     NV_MEM_ACCESS_RW,
-					     NV_MEM_TARGET_VRAM, &obj);
-		if (!ret) {
-			ret = nouveau_ramht_insert(chan, NvSema, obj);
-			nouveau_gpuobj_ref(NULL, &obj);
-		}
+		struct nouveau_object *object;
+		u32 start = mem->start * PAGE_SIZE;
+		u32 limit = mem->start + mem->size - 1;
+
+		ret = nouveau_object_new(nv_object(chan->cli), chan->handle,
+					 NvSema, 0x0002,
+					 &(struct nv_dma_class) {
+						.flags = NV_DMA_TARGET_VRAM |
+							 NV_DMA_ACCESS_RDWR,
+						.start = start,
+						.limit = limit,
+					 }, sizeof(struct nv_dma_class),
+					 &object);
 	}
 
 	if (ret)
-		nv10_fence_context_del(chan, engine);
+		nv10_fence_context_del(chan);
 	return ret;
 }
 
-static int
-nv10_fence_fini(struct drm_device *dev, int engine, bool suspend)
+void
+nv10_fence_destroy(struct nouveau_drm *drm)
 {
-	return 0;
-}
-
-static int
-nv10_fence_init(struct drm_device *dev, int engine)
-{
-	return 0;
-}
-
-static void
-nv10_fence_destroy(struct drm_device *dev, int engine)
-{
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct nv10_fence_priv *priv = nv_engine(dev, engine);
-
+	struct nv10_fence_priv *priv = drm->fence;
+	nouveau_bo_unmap(priv->bo);
 	nouveau_bo_ref(NULL, &priv->bo);
-	dev_priv->eng[engine] = NULL;
+	drm->fence = NULL;
 	kfree(priv);
 }
 
 int
-nv10_fence_create(struct drm_device *dev)
+nv10_fence_create(struct nouveau_drm *drm)
 {
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
 	struct nv10_fence_priv *priv;
 	int ret = 0;
 
-	priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+	priv = drm->fence = kzalloc(sizeof(*priv), GFP_KERNEL);
 	if (!priv)
 		return -ENOMEM;
 
-	priv->base.engine.destroy = nv10_fence_destroy;
-	priv->base.engine.init = nv10_fence_init;
-	priv->base.engine.fini = nv10_fence_fini;
-	priv->base.engine.context_new = nv10_fence_context_new;
-	priv->base.engine.context_del = nv10_fence_context_del;
+	priv->base.dtor = nv10_fence_destroy;
+	priv->base.context_new = nv10_fence_context_new;
+	priv->base.context_del = nv10_fence_context_del;
 	priv->base.emit = nv10_fence_emit;
 	priv->base.read = nv10_fence_read;
 	priv->base.sync = nv10_fence_sync;
-	dev_priv->eng[NVOBJ_ENGINE_FENCE] = &priv->base.engine;
 	spin_lock_init(&priv->lock);
 
-	if (dev_priv->chipset >= 0x17) {
-		ret = nouveau_bo_new(dev, 4096, 0x1000, TTM_PL_FLAG_VRAM,
+	if (nv_device(drm->device)->chipset >= 0x17) {
+		ret = nouveau_bo_new(drm->dev, 4096, 0x1000, TTM_PL_FLAG_VRAM,
 				     0, 0x0000, NULL, &priv->bo);
 		if (!ret) {
 			ret = nouveau_bo_pin(priv->bo, TTM_PL_FLAG_VRAM);
@@ -209,6 +196,6 @@ nv10_fence_create(struct drm_device *dev)
 	}
 
 	if (ret)
-		nv10_fence_destroy(dev, NVOBJ_ENGINE_FENCE);
+		nv10_fence_destroy(drm);
 	return ret;
 }
diff --git a/drivers/gpu/drm/nouveau/nv10_fifo.c b/drivers/gpu/drm/nouveau/nv10_fifo.c
deleted file mode 100644
index 05a2499b7a4d..000000000000
--- a/drivers/gpu/drm/nouveau/nv10_fifo.c
+++ /dev/null
@@ -1,137 +0,0 @@
-/*
- * Copyright (C) 2012 Ben Skeggs.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining
- * a copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sublicense, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial
- * portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
- * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
- * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
- * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
- * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-
-#include <drm/drmP.h>
-#include "nouveau_drv.h"
-#include "nouveau_fifo.h"
-#include "nouveau_util.h"
-#include "nouveau_ramht.h"
-
-static struct ramfc_desc {
-	unsigned bits:6;
-	unsigned ctxs:5;
-	unsigned ctxp:8;
-	unsigned regs:5;
-	unsigned regp;
-} nv10_ramfc[] = {
-	{ 32,  0, 0x00,  0, NV04_PFIFO_CACHE1_DMA_PUT },
-	{ 32,  0, 0x04,  0, NV04_PFIFO_CACHE1_DMA_GET },
-	{ 32,  0, 0x08,  0, NV10_PFIFO_CACHE1_REF_CNT },
-	{ 16,  0, 0x0c,  0, NV04_PFIFO_CACHE1_DMA_INSTANCE },
-	{ 16, 16, 0x0c,  0, NV04_PFIFO_CACHE1_DMA_DCOUNT },
-	{ 32,  0, 0x10,  0, NV04_PFIFO_CACHE1_DMA_STATE },
-	{ 32,  0, 0x14,  0, NV04_PFIFO_CACHE1_DMA_FETCH },
-	{ 32,  0, 0x18,  0, NV04_PFIFO_CACHE1_ENGINE },
-	{ 32,  0, 0x1c,  0, NV04_PFIFO_CACHE1_PULL1 },
-	{}
-};
-
-struct nv10_fifo_priv {
-	struct nouveau_fifo_priv base;
-	struct ramfc_desc *ramfc_desc;
-};
-
-struct nv10_fifo_chan {
-	struct nouveau_fifo_chan base;
-	struct nouveau_gpuobj *ramfc;
-};
-
-static int
-nv10_fifo_context_new(struct nouveau_channel *chan, int engine)
-{
-	struct drm_device *dev = chan->dev;
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct nv10_fifo_priv *priv = nv_engine(dev, engine);
-	struct nv10_fifo_chan *fctx;
-	unsigned long flags;
-	int ret;
-
-	fctx = chan->engctx[engine] = kzalloc(sizeof(*fctx), GFP_KERNEL);
-	if (!fctx)
-		return -ENOMEM;
-
-	/* map channel control registers */
-	chan->user = ioremap(pci_resource_start(dev->pdev, 0) +
-			     NV03_USER(chan->id), PAGE_SIZE);
-	if (!chan->user) {
-		ret = -ENOMEM;
-		goto error;
-	}
-
-	/* initialise default fifo context */
-	ret = nouveau_gpuobj_new_fake(dev, dev_priv->ramfc->pinst +
-				      chan->id * 32, ~0, 32,
-				      NVOBJ_FLAG_ZERO_FREE, &fctx->ramfc);
-	if (ret)
-		goto error;
-
-	nv_wo32(fctx->ramfc, 0x00, chan->pushbuf_base);
-	nv_wo32(fctx->ramfc, 0x04, chan->pushbuf_base);
-	nv_wo32(fctx->ramfc, 0x08, 0x00000000);
-	nv_wo32(fctx->ramfc, 0x0c, chan->pushbuf->pinst >> 4);
-	nv_wo32(fctx->ramfc, 0x10, 0x00000000);
-	nv_wo32(fctx->ramfc, 0x14, NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES |
-				   NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES |
-#ifdef __BIG_ENDIAN
-				   NV_PFIFO_CACHE1_BIG_ENDIAN |
-#endif
-				   NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8);
-	nv_wo32(fctx->ramfc, 0x18, 0x00000000);
-	nv_wo32(fctx->ramfc, 0x1c, 0x00000000);
-
-	/* enable dma mode on the channel */
-	spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
-	nv_mask(dev, NV04_PFIFO_MODE, (1 << chan->id), (1 << chan->id));
-	spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
-
-error:
-	if (ret)
-		priv->base.base.context_del(chan, engine);
-	return ret;
-}
-
-int
-nv10_fifo_create(struct drm_device *dev)
-{
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct nv10_fifo_priv *priv;
-
-	priv = kzalloc(sizeof(*priv), GFP_KERNEL);
-	if (!priv)
-		return -ENOMEM;
-
-	priv->base.base.destroy = nv04_fifo_destroy;
-	priv->base.base.init = nv04_fifo_init;
-	priv->base.base.fini = nv04_fifo_fini;
-	priv->base.base.context_new = nv10_fifo_context_new;
-	priv->base.base.context_del = nv04_fifo_context_del;
-	priv->base.channels = 31;
-	priv->ramfc_desc = nv10_ramfc;
-	dev_priv->eng[NVOBJ_ENGINE_FIFO] = &priv->base.base;
-
-	nouveau_irq_register(dev, 8, nv04_fifo_isr);
-	return 0;
-}
diff --git a/drivers/gpu/drm/nouveau/nv10_gpio.c b/drivers/gpu/drm/nouveau/nv10_gpio.c
deleted file mode 100644
index ecc1b62dd751..000000000000
--- a/drivers/gpu/drm/nouveau/nv10_gpio.c
+++ /dev/null
@@ -1,123 +0,0 @@
-/*
- * Copyright (C) 2009 Francisco Jerez.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining
- * a copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sublicense, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial
- * portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
- * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
- * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
- * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
- * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-
-#include <drm/drmP.h>
-#include "nouveau_drv.h"
-#include "nouveau_hw.h"
-#include "nouveau_gpio.h"
-
-int
-nv10_gpio_sense(struct drm_device *dev, int line)
-{
-	if (line < 2) {
-		line = line * 16;
-		line = NVReadCRTC(dev, 0, NV_PCRTC_GPIO) >> line;
-		return !!(line & 0x0100);
-	} else
-	if (line < 10) {
-		line = (line - 2) * 4;
-		line = NVReadCRTC(dev, 0, NV_PCRTC_GPIO_EXT) >> line;
-		return !!(line & 0x04);
-	} else
-	if (line < 14) {
-		line = (line - 10) * 4;
-		line = NVReadCRTC(dev, 0, NV_PCRTC_850) >> line;
-		return !!(line & 0x04);
-	}
-
-	return -EINVAL;
-}
-
-int
-nv10_gpio_drive(struct drm_device *dev, int line, int dir, int out)
-{
-	u32 reg, mask, data;
-
-	if (line < 2) {
-		line = line * 16;
-		reg  = NV_PCRTC_GPIO;
-		mask = 0x00000011;
-		data = (dir << 4) | out;
-	} else
-	if (line < 10) {
-		line = (line - 2) * 4;
-		reg  = NV_PCRTC_GPIO_EXT;
-		mask = 0x00000003;
-		data = (dir << 1) | out;
-	} else
-	if (line < 14) {
-		line = (line - 10) * 4;
-		reg  = NV_PCRTC_850;
-		mask = 0x00000003;
-		data = (dir << 1) | out;
-	} else {
-		return -EINVAL;
-	}
-
-	mask = NVReadCRTC(dev, 0, reg) & ~(mask << line);
-	NVWriteCRTC(dev, 0, reg, mask | (data << line));
-	return 0;
-}
-
-void
-nv10_gpio_irq_enable(struct drm_device *dev, int line, bool on)
-{
-	u32 mask = 0x00010001 << line;
-
-	nv_wr32(dev, 0x001104, mask);
-	nv_mask(dev, 0x001144, mask, on ? mask : 0);
-}
-
-static void
-nv10_gpio_isr(struct drm_device *dev)
-{
-	u32 intr = nv_rd32(dev, 0x1104);
-	u32 hi = (intr & 0x0000ffff) >> 0;
-	u32 lo = (intr & 0xffff0000) >> 16;
-
-	nouveau_gpio_isr(dev, 0, hi | lo);
-
-	nv_wr32(dev, 0x001104, intr);
-}
-
-int
-nv10_gpio_init(struct drm_device *dev)
-{
-	nv_wr32(dev, 0x001140, 0x00000000);
-	nv_wr32(dev, 0x001100, 0xffffffff);
-	nv_wr32(dev, 0x001144, 0x00000000);
-	nv_wr32(dev, 0x001104, 0xffffffff);
-	nouveau_irq_register(dev, 28, nv10_gpio_isr); /* PBUS */
-	return 0;
-}
-
-void
-nv10_gpio_fini(struct drm_device *dev)
-{
-	nv_wr32(dev, 0x001140, 0x00000000);
-	nv_wr32(dev, 0x001144, 0x00000000);
-	nouveau_irq_unregister(dev, 28);
-}
diff --git a/drivers/gpu/drm/nouveau/nv10_graph.c b/drivers/gpu/drm/nouveau/nv10_graph.c
deleted file mode 100644
index 75dd51bbe64d..000000000000
--- a/drivers/gpu/drm/nouveau/nv10_graph.c
+++ /dev/null
@@ -1,1188 +0,0 @@
-/*
- * Copyright 2007 Matthieu CASTET <castet.matthieu@free.fr>
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-#include <drm/drmP.h>
-#include <drm/nouveau_drm.h>
-#include "nouveau_drv.h"
-#include "nouveau_util.h"
-
-struct nv10_graph_engine {
-	struct nouveau_exec_engine base;
-};
-
-struct pipe_state {
-	uint32_t pipe_0x0000[0x040/4];
-	uint32_t pipe_0x0040[0x010/4];
-	uint32_t pipe_0x0200[0x0c0/4];
-	uint32_t pipe_0x4400[0x080/4];
-	uint32_t pipe_0x6400[0x3b0/4];
-	uint32_t pipe_0x6800[0x2f0/4];
-	uint32_t pipe_0x6c00[0x030/4];
-	uint32_t pipe_0x7000[0x130/4];
-	uint32_t pipe_0x7400[0x0c0/4];
-	uint32_t pipe_0x7800[0x0c0/4];
-};
-
-static int nv10_graph_ctx_regs[] = {
-	NV10_PGRAPH_CTX_SWITCH(0),
-	NV10_PGRAPH_CTX_SWITCH(1),
-	NV10_PGRAPH_CTX_SWITCH(2),
-	NV10_PGRAPH_CTX_SWITCH(3),
-	NV10_PGRAPH_CTX_SWITCH(4),
-	NV10_PGRAPH_CTX_CACHE(0, 0),
-	NV10_PGRAPH_CTX_CACHE(0, 1),
-	NV10_PGRAPH_CTX_CACHE(0, 2),
-	NV10_PGRAPH_CTX_CACHE(0, 3),
-	NV10_PGRAPH_CTX_CACHE(0, 4),
-	NV10_PGRAPH_CTX_CACHE(1, 0),
-	NV10_PGRAPH_CTX_CACHE(1, 1),
-	NV10_PGRAPH_CTX_CACHE(1, 2),
-	NV10_PGRAPH_CTX_CACHE(1, 3),
-	NV10_PGRAPH_CTX_CACHE(1, 4),
-	NV10_PGRAPH_CTX_CACHE(2, 0),
-	NV10_PGRAPH_CTX_CACHE(2, 1),
-	NV10_PGRAPH_CTX_CACHE(2, 2),
-	NV10_PGRAPH_CTX_CACHE(2, 3),
-	NV10_PGRAPH_CTX_CACHE(2, 4),
-	NV10_PGRAPH_CTX_CACHE(3, 0),
-	NV10_PGRAPH_CTX_CACHE(3, 1),
-	NV10_PGRAPH_CTX_CACHE(3, 2),
-	NV10_PGRAPH_CTX_CACHE(3, 3),
-	NV10_PGRAPH_CTX_CACHE(3, 4),
-	NV10_PGRAPH_CTX_CACHE(4, 0),
-	NV10_PGRAPH_CTX_CACHE(4, 1),
-	NV10_PGRAPH_CTX_CACHE(4, 2),
-	NV10_PGRAPH_CTX_CACHE(4, 3),
-	NV10_PGRAPH_CTX_CACHE(4, 4),
-	NV10_PGRAPH_CTX_CACHE(5, 0),
-	NV10_PGRAPH_CTX_CACHE(5, 1),
-	NV10_PGRAPH_CTX_CACHE(5, 2),
-	NV10_PGRAPH_CTX_CACHE(5, 3),
-	NV10_PGRAPH_CTX_CACHE(5, 4),
-	NV10_PGRAPH_CTX_CACHE(6, 0),
-	NV10_PGRAPH_CTX_CACHE(6, 1),
-	NV10_PGRAPH_CTX_CACHE(6, 2),
-	NV10_PGRAPH_CTX_CACHE(6, 3),
-	NV10_PGRAPH_CTX_CACHE(6, 4),
-	NV10_PGRAPH_CTX_CACHE(7, 0),
-	NV10_PGRAPH_CTX_CACHE(7, 1),
-	NV10_PGRAPH_CTX_CACHE(7, 2),
-	NV10_PGRAPH_CTX_CACHE(7, 3),
-	NV10_PGRAPH_CTX_CACHE(7, 4),
-	NV10_PGRAPH_CTX_USER,
-	NV04_PGRAPH_DMA_START_0,
-	NV04_PGRAPH_DMA_START_1,
-	NV04_PGRAPH_DMA_LENGTH,
-	NV04_PGRAPH_DMA_MISC,
-	NV10_PGRAPH_DMA_PITCH,
-	NV04_PGRAPH_BOFFSET0,
-	NV04_PGRAPH_BBASE0,
-	NV04_PGRAPH_BLIMIT0,
-	NV04_PGRAPH_BOFFSET1,
-	NV04_PGRAPH_BBASE1,
-	NV04_PGRAPH_BLIMIT1,
-	NV04_PGRAPH_BOFFSET2,
-	NV04_PGRAPH_BBASE2,
-	NV04_PGRAPH_BLIMIT2,
-	NV04_PGRAPH_BOFFSET3,
-	NV04_PGRAPH_BBASE3,
-	NV04_PGRAPH_BLIMIT3,
-	NV04_PGRAPH_BOFFSET4,
-	NV04_PGRAPH_BBASE4,
-	NV04_PGRAPH_BLIMIT4,
-	NV04_PGRAPH_BOFFSET5,
-	NV04_PGRAPH_BBASE5,
-	NV04_PGRAPH_BLIMIT5,
-	NV04_PGRAPH_BPITCH0,
-	NV04_PGRAPH_BPITCH1,
-	NV04_PGRAPH_BPITCH2,
-	NV04_PGRAPH_BPITCH3,
-	NV04_PGRAPH_BPITCH4,
-	NV10_PGRAPH_SURFACE,
-	NV10_PGRAPH_STATE,
-	NV04_PGRAPH_BSWIZZLE2,
-	NV04_PGRAPH_BSWIZZLE5,
-	NV04_PGRAPH_BPIXEL,
-	NV10_PGRAPH_NOTIFY,
-	NV04_PGRAPH_PATT_COLOR0,
-	NV04_PGRAPH_PATT_COLOR1,
-	NV04_PGRAPH_PATT_COLORRAM, /* 64 values from 0x400900 to 0x4009fc */
-	0x00400904,
-	0x00400908,
-	0x0040090c,
-	0x00400910,
-	0x00400914,
-	0x00400918,
-	0x0040091c,
-	0x00400920,
-	0x00400924,
-	0x00400928,
-	0x0040092c,
-	0x00400930,
-	0x00400934,
-	0x00400938,
-	0x0040093c,
-	0x00400940,
-	0x00400944,
-	0x00400948,
-	0x0040094c,
-	0x00400950,
-	0x00400954,
-	0x00400958,
-	0x0040095c,
-	0x00400960,
-	0x00400964,
-	0x00400968,
-	0x0040096c,
-	0x00400970,
-	0x00400974,
-	0x00400978,
-	0x0040097c,
-	0x00400980,
-	0x00400984,
-	0x00400988,
-	0x0040098c,
-	0x00400990,
-	0x00400994,
-	0x00400998,
-	0x0040099c,
-	0x004009a0,
-	0x004009a4,
-	0x004009a8,
-	0x004009ac,
-	0x004009b0,
-	0x004009b4,
-	0x004009b8,
-	0x004009bc,
-	0x004009c0,
-	0x004009c4,
-	0x004009c8,
-	0x004009cc,
-	0x004009d0,
-	0x004009d4,
-	0x004009d8,
-	0x004009dc,
-	0x004009e0,
-	0x004009e4,
-	0x004009e8,
-	0x004009ec,
-	0x004009f0,
-	0x004009f4,
-	0x004009f8,
-	0x004009fc,
-	NV04_PGRAPH_PATTERN,	/* 2 values from 0x400808 to 0x40080c */
-	0x0040080c,
-	NV04_PGRAPH_PATTERN_SHAPE,
-	NV03_PGRAPH_MONO_COLOR0,
-	NV04_PGRAPH_ROP3,
-	NV04_PGRAPH_CHROMA,
-	NV04_PGRAPH_BETA_AND,
-	NV04_PGRAPH_BETA_PREMULT,
-	0x00400e70,
-	0x00400e74,
-	0x00400e78,
-	0x00400e7c,
-	0x00400e80,
-	0x00400e84,
-	0x00400e88,
-	0x00400e8c,
-	0x00400ea0,
-	0x00400ea4,
-	0x00400ea8,
-	0x00400e90,
-	0x00400e94,
-	0x00400e98,
-	0x00400e9c,
-	NV10_PGRAPH_WINDOWCLIP_HORIZONTAL, /* 8 values from 0x400f00-0x400f1c */
-	NV10_PGRAPH_WINDOWCLIP_VERTICAL,   /* 8 values from 0x400f20-0x400f3c */
-	0x00400f04,
-	0x00400f24,
-	0x00400f08,
-	0x00400f28,
-	0x00400f0c,
-	0x00400f2c,
-	0x00400f10,
-	0x00400f30,
-	0x00400f14,
-	0x00400f34,
-	0x00400f18,
-	0x00400f38,
-	0x00400f1c,
-	0x00400f3c,
-	NV10_PGRAPH_XFMODE0,
-	NV10_PGRAPH_XFMODE1,
-	NV10_PGRAPH_GLOBALSTATE0,
-	NV10_PGRAPH_GLOBALSTATE1,
-	NV04_PGRAPH_STORED_FMT,
-	NV04_PGRAPH_SOURCE_COLOR,
-	NV03_PGRAPH_ABS_X_RAM,	/* 32 values from 0x400400 to 0x40047c */
-	NV03_PGRAPH_ABS_Y_RAM,	/* 32 values from 0x400480 to 0x4004fc */
-	0x00400404,
-	0x00400484,
-	0x00400408,
-	0x00400488,
-	0x0040040c,
-	0x0040048c,
-	0x00400410,
-	0x00400490,
-	0x00400414,
-	0x00400494,
-	0x00400418,
-	0x00400498,
-	0x0040041c,
-	0x0040049c,
-	0x00400420,
-	0x004004a0,
-	0x00400424,
-	0x004004a4,
-	0x00400428,
-	0x004004a8,
-	0x0040042c,
-	0x004004ac,
-	0x00400430,
-	0x004004b0,
-	0x00400434,
-	0x004004b4,
-	0x00400438,
-	0x004004b8,
-	0x0040043c,
-	0x004004bc,
-	0x00400440,
-	0x004004c0,
-	0x00400444,
-	0x004004c4,
-	0x00400448,
-	0x004004c8,
-	0x0040044c,
-	0x004004cc,
-	0x00400450,
-	0x004004d0,
-	0x00400454,
-	0x004004d4,
-	0x00400458,
-	0x004004d8,
-	0x0040045c,
-	0x004004dc,
-	0x00400460,
-	0x004004e0,
-	0x00400464,
-	0x004004e4,
-	0x00400468,
-	0x004004e8,
-	0x0040046c,
-	0x004004ec,
-	0x00400470,
-	0x004004f0,
-	0x00400474,
-	0x004004f4,
-	0x00400478,
-	0x004004f8,
-	0x0040047c,
-	0x004004fc,
-	NV03_PGRAPH_ABS_UCLIP_XMIN,
-	NV03_PGRAPH_ABS_UCLIP_XMAX,
-	NV03_PGRAPH_ABS_UCLIP_YMIN,
-	NV03_PGRAPH_ABS_UCLIP_YMAX,
-	0x00400550,
-	0x00400558,
-	0x00400554,
-	0x0040055c,
-	NV03_PGRAPH_ABS_UCLIPA_XMIN,
-	NV03_PGRAPH_ABS_UCLIPA_XMAX,
-	NV03_PGRAPH_ABS_UCLIPA_YMIN,
-	NV03_PGRAPH_ABS_UCLIPA_YMAX,
-	NV03_PGRAPH_ABS_ICLIP_XMAX,
-	NV03_PGRAPH_ABS_ICLIP_YMAX,
-	NV03_PGRAPH_XY_LOGIC_MISC0,
-	NV03_PGRAPH_XY_LOGIC_MISC1,
-	NV03_PGRAPH_XY_LOGIC_MISC2,
-	NV03_PGRAPH_XY_LOGIC_MISC3,
-	NV03_PGRAPH_CLIPX_0,
-	NV03_PGRAPH_CLIPX_1,
-	NV03_PGRAPH_CLIPY_0,
-	NV03_PGRAPH_CLIPY_1,
-	NV10_PGRAPH_COMBINER0_IN_ALPHA,
-	NV10_PGRAPH_COMBINER1_IN_ALPHA,
-	NV10_PGRAPH_COMBINER0_IN_RGB,
-	NV10_PGRAPH_COMBINER1_IN_RGB,
-	NV10_PGRAPH_COMBINER_COLOR0,
-	NV10_PGRAPH_COMBINER_COLOR1,
-	NV10_PGRAPH_COMBINER0_OUT_ALPHA,
-	NV10_PGRAPH_COMBINER1_OUT_ALPHA,
-	NV10_PGRAPH_COMBINER0_OUT_RGB,
-	NV10_PGRAPH_COMBINER1_OUT_RGB,
-	NV10_PGRAPH_COMBINER_FINAL0,
-	NV10_PGRAPH_COMBINER_FINAL1,
-	0x00400e00,
-	0x00400e04,
-	0x00400e08,
-	0x00400e0c,
-	0x00400e10,
-	0x00400e14,
-	0x00400e18,
-	0x00400e1c,
-	0x00400e20,
-	0x00400e24,
-	0x00400e28,
-	0x00400e2c,
-	0x00400e30,
-	0x00400e34,
-	0x00400e38,
-	0x00400e3c,
-	NV04_PGRAPH_PASSTHRU_0,
-	NV04_PGRAPH_PASSTHRU_1,
-	NV04_PGRAPH_PASSTHRU_2,
-	NV10_PGRAPH_DIMX_TEXTURE,
-	NV10_PGRAPH_WDIMX_TEXTURE,
-	NV10_PGRAPH_DVD_COLORFMT,
-	NV10_PGRAPH_SCALED_FORMAT,
-	NV04_PGRAPH_MISC24_0,
-	NV04_PGRAPH_MISC24_1,
-	NV04_PGRAPH_MISC24_2,
-	NV03_PGRAPH_X_MISC,
-	NV03_PGRAPH_Y_MISC,
-	NV04_PGRAPH_VALID1,
-	NV04_PGRAPH_VALID2,
-};
-
-static int nv17_graph_ctx_regs[] = {
-	NV10_PGRAPH_DEBUG_4,
-	0x004006b0,
-	0x00400eac,
-	0x00400eb0,
-	0x00400eb4,
-	0x00400eb8,
-	0x00400ebc,
-	0x00400ec0,
-	0x00400ec4,
-	0x00400ec8,
-	0x00400ecc,
-	0x00400ed0,
-	0x00400ed4,
-	0x00400ed8,
-	0x00400edc,
-	0x00400ee0,
-	0x00400a00,
-	0x00400a04,
-};
-
-struct graph_state {
-	int nv10[ARRAY_SIZE(nv10_graph_ctx_regs)];
-	int nv17[ARRAY_SIZE(nv17_graph_ctx_regs)];
-	struct pipe_state pipe_state;
-	uint32_t lma_window[4];
-};
-
-#define PIPE_SAVE(dev, state, addr)					\
-	do {								\
-		int __i;						\
-		nv_wr32(dev, NV10_PGRAPH_PIPE_ADDRESS, addr);		\
-		for (__i = 0; __i < ARRAY_SIZE(state); __i++)		\
-			state[__i] = nv_rd32(dev, NV10_PGRAPH_PIPE_DATA); \
-	} while (0)
-
-#define PIPE_RESTORE(dev, state, addr)					\
-	do {								\
-		int __i;						\
-		nv_wr32(dev, NV10_PGRAPH_PIPE_ADDRESS, addr);		\
-		for (__i = 0; __i < ARRAY_SIZE(state); __i++)		\
-			nv_wr32(dev, NV10_PGRAPH_PIPE_DATA, state[__i]); \
-	} while (0)
-
-static void nv10_graph_save_pipe(struct nouveau_channel *chan)
-{
-	struct graph_state *pgraph_ctx = chan->engctx[NVOBJ_ENGINE_GR];
-	struct pipe_state *pipe = &pgraph_ctx->pipe_state;
-	struct drm_device *dev = chan->dev;
-
-	PIPE_SAVE(dev, pipe->pipe_0x4400, 0x4400);
-	PIPE_SAVE(dev, pipe->pipe_0x0200, 0x0200);
-	PIPE_SAVE(dev, pipe->pipe_0x6400, 0x6400);
-	PIPE_SAVE(dev, pipe->pipe_0x6800, 0x6800);
-	PIPE_SAVE(dev, pipe->pipe_0x6c00, 0x6c00);
-	PIPE_SAVE(dev, pipe->pipe_0x7000, 0x7000);
-	PIPE_SAVE(dev, pipe->pipe_0x7400, 0x7400);
-	PIPE_SAVE(dev, pipe->pipe_0x7800, 0x7800);
-	PIPE_SAVE(dev, pipe->pipe_0x0040, 0x0040);
-	PIPE_SAVE(dev, pipe->pipe_0x0000, 0x0000);
-}
-
-static void nv10_graph_load_pipe(struct nouveau_channel *chan)
-{
-	struct graph_state *pgraph_ctx = chan->engctx[NVOBJ_ENGINE_GR];
-	struct pipe_state *pipe = &pgraph_ctx->pipe_state;
-	struct drm_device *dev = chan->dev;
-	uint32_t xfmode0, xfmode1;
-	int i;
-
-	nouveau_wait_for_idle(dev);
-	/* XXX check haiku comments */
-	xfmode0 = nv_rd32(dev, NV10_PGRAPH_XFMODE0);
-	xfmode1 = nv_rd32(dev, NV10_PGRAPH_XFMODE1);
-	nv_wr32(dev, NV10_PGRAPH_XFMODE0, 0x10000000);
-	nv_wr32(dev, NV10_PGRAPH_XFMODE1, 0x00000000);
-	nv_wr32(dev, NV10_PGRAPH_PIPE_ADDRESS, 0x000064c0);
-	for (i = 0; i < 4; i++)
-		nv_wr32(dev, NV10_PGRAPH_PIPE_DATA, 0x3f800000);
-	for (i = 0; i < 4; i++)
-		nv_wr32(dev, NV10_PGRAPH_PIPE_DATA, 0x00000000);
-
-	nv_wr32(dev, NV10_PGRAPH_PIPE_ADDRESS, 0x00006ab0);
-	for (i = 0; i < 3; i++)
-		nv_wr32(dev, NV10_PGRAPH_PIPE_DATA, 0x3f800000);
-
-	nv_wr32(dev, NV10_PGRAPH_PIPE_ADDRESS, 0x00006a80);
-	for (i = 0; i < 3; i++)
-		nv_wr32(dev, NV10_PGRAPH_PIPE_DATA, 0x00000000);
-
-	nv_wr32(dev, NV10_PGRAPH_PIPE_ADDRESS, 0x00000040);
-	nv_wr32(dev, NV10_PGRAPH_PIPE_DATA, 0x00000008);
-
-
-	PIPE_RESTORE(dev, pipe->pipe_0x0200, 0x0200);
-	nouveau_wait_for_idle(dev);
-
-	/* restore XFMODE */
-	nv_wr32(dev, NV10_PGRAPH_XFMODE0, xfmode0);
-	nv_wr32(dev, NV10_PGRAPH_XFMODE1, xfmode1);
-	PIPE_RESTORE(dev, pipe->pipe_0x6400, 0x6400);
-	PIPE_RESTORE(dev, pipe->pipe_0x6800, 0x6800);
-	PIPE_RESTORE(dev, pipe->pipe_0x6c00, 0x6c00);
-	PIPE_RESTORE(dev, pipe->pipe_0x7000, 0x7000);
-	PIPE_RESTORE(dev, pipe->pipe_0x7400, 0x7400);
-	PIPE_RESTORE(dev, pipe->pipe_0x7800, 0x7800);
-	PIPE_RESTORE(dev, pipe->pipe_0x4400, 0x4400);
-	PIPE_RESTORE(dev, pipe->pipe_0x0000, 0x0000);
-	PIPE_RESTORE(dev, pipe->pipe_0x0040, 0x0040);
-	nouveau_wait_for_idle(dev);
-}
-
-static void nv10_graph_create_pipe(struct nouveau_channel *chan)
-{
-	struct graph_state *pgraph_ctx = chan->engctx[NVOBJ_ENGINE_GR];
-	struct pipe_state *fifo_pipe_state = &pgraph_ctx->pipe_state;
-	struct drm_device *dev = chan->dev;
-	uint32_t *fifo_pipe_state_addr;
-	int i;
-#define PIPE_INIT(addr) \
-	do { \
-		fifo_pipe_state_addr = fifo_pipe_state->pipe_##addr; \
-	} while (0)
-#define PIPE_INIT_END(addr) \
-	do { \
-		uint32_t *__end_addr = fifo_pipe_state->pipe_##addr + \
-				ARRAY_SIZE(fifo_pipe_state->pipe_##addr); \
-		if (fifo_pipe_state_addr != __end_addr) \
-			NV_ERROR(dev, "incomplete pipe init for 0x%x :  %p/%p\n", \
-				addr, fifo_pipe_state_addr, __end_addr); \
-	} while (0)
-#define NV_WRITE_PIPE_INIT(value) *(fifo_pipe_state_addr++) = value
-
-	PIPE_INIT(0x0200);
-	for (i = 0; i < 48; i++)
-		NV_WRITE_PIPE_INIT(0x00000000);
-	PIPE_INIT_END(0x0200);
-
-	PIPE_INIT(0x6400);
-	for (i = 0; i < 211; i++)
-		NV_WRITE_PIPE_INIT(0x00000000);
-	NV_WRITE_PIPE_INIT(0x3f800000);
-	NV_WRITE_PIPE_INIT(0x40000000);
-	NV_WRITE_PIPE_INIT(0x40000000);
-	NV_WRITE_PIPE_INIT(0x40000000);
-	NV_WRITE_PIPE_INIT(0x40000000);
-	NV_WRITE_PIPE_INIT(0x00000000);
-	NV_WRITE_PIPE_INIT(0x00000000);
-	NV_WRITE_PIPE_INIT(0x3f800000);
-	NV_WRITE_PIPE_INIT(0x00000000);
-	NV_WRITE_PIPE_INIT(0x3f000000);
-	NV_WRITE_PIPE_INIT(0x3f000000);
-	NV_WRITE_PIPE_INIT(0x00000000);
-	NV_WRITE_PIPE_INIT(0x00000000);
-	NV_WRITE_PIPE_INIT(0x00000000);
-	NV_WRITE_PIPE_INIT(0x00000000);
-	NV_WRITE_PIPE_INIT(0x3f800000);
-	NV_WRITE_PIPE_INIT(0x00000000);
-	NV_WRITE_PIPE_INIT(0x00000000);
-	NV_WRITE_PIPE_INIT(0x00000000);
-	NV_WRITE_PIPE_INIT(0x00000000);
-	NV_WRITE_PIPE_INIT(0x00000000);
-	NV_WRITE_PIPE_INIT(0x3f800000);
-	NV_WRITE_PIPE_INIT(0x3f800000);
-	NV_WRITE_PIPE_INIT(0x3f800000);
-	NV_WRITE_PIPE_INIT(0x3f800000);
-	PIPE_INIT_END(0x6400);
-
-	PIPE_INIT(0x6800);
-	for (i = 0; i < 162; i++)
-		NV_WRITE_PIPE_INIT(0x00000000);
-	NV_WRITE_PIPE_INIT(0x3f800000);
-	for (i = 0; i < 25; i++)
-		NV_WRITE_PIPE_INIT(0x00000000);
-	PIPE_INIT_END(0x6800);
-
-	PIPE_INIT(0x6c00);
-	NV_WRITE_PIPE_INIT(0x00000000);
-	NV_WRITE_PIPE_INIT(0x00000000);
-	NV_WRITE_PIPE_INIT(0x00000000);
-	NV_WRITE_PIPE_INIT(0x00000000);
-	NV_WRITE_PIPE_INIT(0xbf800000);
-	NV_WRITE_PIPE_INIT(0x00000000);
-	NV_WRITE_PIPE_INIT(0x00000000);
-	NV_WRITE_PIPE_INIT(0x00000000);
-	NV_WRITE_PIPE_INIT(0x00000000);
-	NV_WRITE_PIPE_INIT(0x00000000);
-	NV_WRITE_PIPE_INIT(0x00000000);
-	NV_WRITE_PIPE_INIT(0x00000000);
-	PIPE_INIT_END(0x6c00);
-
-	PIPE_INIT(0x7000);
-	NV_WRITE_PIPE_INIT(0x00000000);
-	NV_WRITE_PIPE_INIT(0x00000000);
-	NV_WRITE_PIPE_INIT(0x00000000);
-	NV_WRITE_PIPE_INIT(0x00000000);
-	NV_WRITE_PIPE_INIT(0x00000000);
-	NV_WRITE_PIPE_INIT(0x00000000);
-	NV_WRITE_PIPE_INIT(0x00000000);
-	NV_WRITE_PIPE_INIT(0x00000000);
-	NV_WRITE_PIPE_INIT(0x00000000);
-	NV_WRITE_PIPE_INIT(0x00000000);
-	NV_WRITE_PIPE_INIT(0x00000000);
-	NV_WRITE_PIPE_INIT(0x00000000);
-	NV_WRITE_PIPE_INIT(0x7149f2ca);
-	NV_WRITE_PIPE_INIT(0x00000000);
-	NV_WRITE_PIPE_INIT(0x00000000);
-	NV_WRITE_PIPE_INIT(0x00000000);
-	NV_WRITE_PIPE_INIT(0x7149f2ca);
-	NV_WRITE_PIPE_INIT(0x00000000);
-	NV_WRITE_PIPE_INIT(0x00000000);
-	NV_WRITE_PIPE_INIT(0x00000000);
-	NV_WRITE_PIPE_INIT(0x7149f2ca);
-	NV_WRITE_PIPE_INIT(0x00000000);
-	NV_WRITE_PIPE_INIT(0x00000000);
-	NV_WRITE_PIPE_INIT(0x00000000);
-	NV_WRITE_PIPE_INIT(0x7149f2ca);
-	NV_WRITE_PIPE_INIT(0x00000000);
-	NV_WRITE_PIPE_INIT(0x00000000);
-	NV_WRITE_PIPE_INIT(0x00000000);
-	NV_WRITE_PIPE_INIT(0x7149f2ca);
-	NV_WRITE_PIPE_INIT(0x00000000);
-	NV_WRITE_PIPE_INIT(0x00000000);
-	NV_WRITE_PIPE_INIT(0x00000000);
-	NV_WRITE_PIPE_INIT(0x7149f2ca);
-	NV_WRITE_PIPE_INIT(0x00000000);
-	NV_WRITE_PIPE_INIT(0x00000000);
-	NV_WRITE_PIPE_INIT(0x00000000);
-	NV_WRITE_PIPE_INIT(0x7149f2ca);
-	NV_WRITE_PIPE_INIT(0x00000000);
-	NV_WRITE_PIPE_INIT(0x00000000);
-	NV_WRITE_PIPE_INIT(0x00000000);
-	NV_WRITE_PIPE_INIT(0x7149f2ca);
-	for (i = 0; i < 35; i++)
-		NV_WRITE_PIPE_INIT(0x00000000);
-	PIPE_INIT_END(0x7000);
-
-	PIPE_INIT(0x7400);
-	for (i = 0; i < 48; i++)
-		NV_WRITE_PIPE_INIT(0x00000000);
-	PIPE_INIT_END(0x7400);
-
-	PIPE_INIT(0x7800);
-	for (i = 0; i < 48; i++)
-		NV_WRITE_PIPE_INIT(0x00000000);
-	PIPE_INIT_END(0x7800);
-
-	PIPE_INIT(0x4400);
-	for (i = 0; i < 32; i++)
-		NV_WRITE_PIPE_INIT(0x00000000);
-	PIPE_INIT_END(0x4400);
-
-	PIPE_INIT(0x0000);
-	for (i = 0; i < 16; i++)
-		NV_WRITE_PIPE_INIT(0x00000000);
-	PIPE_INIT_END(0x0000);
-
-	PIPE_INIT(0x0040);
-	for (i = 0; i < 4; i++)
-		NV_WRITE_PIPE_INIT(0x00000000);
-	PIPE_INIT_END(0x0040);
-
-#undef PIPE_INIT
-#undef PIPE_INIT_END
-#undef NV_WRITE_PIPE_INIT
-}
-
-static int nv10_graph_ctx_regs_find_offset(struct drm_device *dev, int reg)
-{
-	int i;
-	for (i = 0; i < ARRAY_SIZE(nv10_graph_ctx_regs); i++) {
-		if (nv10_graph_ctx_regs[i] == reg)
-			return i;
-	}
-	NV_ERROR(dev, "unknow offset nv10_ctx_regs %d\n", reg);
-	return -1;
-}
-
-static int nv17_graph_ctx_regs_find_offset(struct drm_device *dev, int reg)
-{
-	int i;
-	for (i = 0; i < ARRAY_SIZE(nv17_graph_ctx_regs); i++) {
-		if (nv17_graph_ctx_regs[i] == reg)
-			return i;
-	}
-	NV_ERROR(dev, "unknow offset nv17_ctx_regs %d\n", reg);
-	return -1;
-}
-
-static void nv10_graph_load_dma_vtxbuf(struct nouveau_channel *chan,
-				       uint32_t inst)
-{
-	struct drm_device *dev = chan->dev;
-	uint32_t st2, st2_dl, st2_dh, fifo_ptr, fifo[0x60/4];
-	uint32_t ctx_user, ctx_switch[5];
-	int i, subchan = -1;
-
-	/* NV10TCL_DMA_VTXBUF (method 0x18c) modifies hidden state
-	 * that cannot be restored via MMIO. Do it through the FIFO
-	 * instead.
-	 */
-
-	/* Look for a celsius object */
-	for (i = 0; i < 8; i++) {
-		int class = nv_rd32(dev, NV10_PGRAPH_CTX_CACHE(i, 0)) & 0xfff;
-
-		if (class == 0x56 || class == 0x96 || class == 0x99) {
-			subchan = i;
-			break;
-		}
-	}
-
-	if (subchan < 0 || !inst)
-		return;
-
-	/* Save the current ctx object */
-	ctx_user = nv_rd32(dev, NV10_PGRAPH_CTX_USER);
-	for (i = 0; i < 5; i++)
-		ctx_switch[i] = nv_rd32(dev, NV10_PGRAPH_CTX_SWITCH(i));
-
-	/* Save the FIFO state */
-	st2 = nv_rd32(dev, NV10_PGRAPH_FFINTFC_ST2);
-	st2_dl = nv_rd32(dev, NV10_PGRAPH_FFINTFC_ST2_DL);
-	st2_dh = nv_rd32(dev, NV10_PGRAPH_FFINTFC_ST2_DH);
-	fifo_ptr = nv_rd32(dev, NV10_PGRAPH_FFINTFC_FIFO_PTR);
-
-	for (i = 0; i < ARRAY_SIZE(fifo); i++)
-		fifo[i] = nv_rd32(dev, 0x4007a0 + 4 * i);
-
-	/* Switch to the celsius subchannel */
-	for (i = 0; i < 5; i++)
-		nv_wr32(dev, NV10_PGRAPH_CTX_SWITCH(i),
-			nv_rd32(dev, NV10_PGRAPH_CTX_CACHE(subchan, i)));
-	nv_mask(dev, NV10_PGRAPH_CTX_USER, 0xe000, subchan << 13);
-
-	/* Inject NV10TCL_DMA_VTXBUF */
-	nv_wr32(dev, NV10_PGRAPH_FFINTFC_FIFO_PTR, 0);
-	nv_wr32(dev, NV10_PGRAPH_FFINTFC_ST2,
-		0x2c000000 | chan->id << 20 | subchan << 16 | 0x18c);
-	nv_wr32(dev, NV10_PGRAPH_FFINTFC_ST2_DL, inst);
-	nv_mask(dev, NV10_PGRAPH_CTX_CONTROL, 0, 0x10000);
-	nv_mask(dev, NV04_PGRAPH_FIFO, 0x00000001, 0x00000001);
-	nv_mask(dev, NV04_PGRAPH_FIFO, 0x00000001, 0x00000000);
-
-	/* Restore the FIFO state */
-	for (i = 0; i < ARRAY_SIZE(fifo); i++)
-		nv_wr32(dev, 0x4007a0 + 4 * i, fifo[i]);
-
-	nv_wr32(dev, NV10_PGRAPH_FFINTFC_FIFO_PTR, fifo_ptr);
-	nv_wr32(dev, NV10_PGRAPH_FFINTFC_ST2, st2);
-	nv_wr32(dev, NV10_PGRAPH_FFINTFC_ST2_DL, st2_dl);
-	nv_wr32(dev, NV10_PGRAPH_FFINTFC_ST2_DH, st2_dh);
-
-	/* Restore the current ctx object */
-	for (i = 0; i < 5; i++)
-		nv_wr32(dev, NV10_PGRAPH_CTX_SWITCH(i), ctx_switch[i]);
-	nv_wr32(dev, NV10_PGRAPH_CTX_USER, ctx_user);
-}
-
-static int
-nv10_graph_load_context(struct nouveau_channel *chan)
-{
-	struct drm_device *dev = chan->dev;
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct graph_state *pgraph_ctx = chan->engctx[NVOBJ_ENGINE_GR];
-	uint32_t tmp;
-	int i;
-
-	for (i = 0; i < ARRAY_SIZE(nv10_graph_ctx_regs); i++)
-		nv_wr32(dev, nv10_graph_ctx_regs[i], pgraph_ctx->nv10[i]);
-	if (dev_priv->chipset >= 0x17) {
-		for (i = 0; i < ARRAY_SIZE(nv17_graph_ctx_regs); i++)
-			nv_wr32(dev, nv17_graph_ctx_regs[i],
-						pgraph_ctx->nv17[i]);
-	}
-
-	nv10_graph_load_pipe(chan);
-	nv10_graph_load_dma_vtxbuf(chan, (nv_rd32(dev, NV10_PGRAPH_GLOBALSTATE1)
-					  & 0xffff));
-
-	nv_wr32(dev, NV10_PGRAPH_CTX_CONTROL, 0x10010100);
-	tmp = nv_rd32(dev, NV10_PGRAPH_CTX_USER);
-	nv_wr32(dev, NV10_PGRAPH_CTX_USER, (tmp & 0xffffff) | chan->id << 24);
-	tmp = nv_rd32(dev, NV10_PGRAPH_FFINTFC_ST2);
-	nv_wr32(dev, NV10_PGRAPH_FFINTFC_ST2, tmp & 0xcfffffff);
-	return 0;
-}
-
-static int
-nv10_graph_unload_context(struct drm_device *dev)
-{
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct nouveau_channel *chan;
-	struct graph_state *ctx;
-	uint32_t tmp;
-	int i;
-
-	chan = nv10_graph_channel(dev);
-	if (!chan)
-		return 0;
-	ctx = chan->engctx[NVOBJ_ENGINE_GR];
-
-	for (i = 0; i < ARRAY_SIZE(nv10_graph_ctx_regs); i++)
-		ctx->nv10[i] = nv_rd32(dev, nv10_graph_ctx_regs[i]);
-
-	if (dev_priv->chipset >= 0x17) {
-		for (i = 0; i < ARRAY_SIZE(nv17_graph_ctx_regs); i++)
-			ctx->nv17[i] = nv_rd32(dev, nv17_graph_ctx_regs[i]);
-	}
-
-	nv10_graph_save_pipe(chan);
-
-	nv_wr32(dev, NV10_PGRAPH_CTX_CONTROL, 0x10000000);
-	tmp  = nv_rd32(dev, NV10_PGRAPH_CTX_USER) & 0x00ffffff;
-	tmp |= 31 << 24;
-	nv_wr32(dev, NV10_PGRAPH_CTX_USER, tmp);
-	return 0;
-}
-
-static void
-nv10_graph_context_switch(struct drm_device *dev)
-{
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct nouveau_channel *chan = NULL;
-	int chid;
-
-	nouveau_wait_for_idle(dev);
-
-	/* If previous context is valid, we need to save it */
-	nv10_graph_unload_context(dev);
-
-	/* Load context for next channel */
-	chid = (nv_rd32(dev, NV04_PGRAPH_TRAPPED_ADDR) >> 20) & 0x1f;
-	chan = dev_priv->channels.ptr[chid];
-	if (chan && chan->engctx[NVOBJ_ENGINE_GR])
-		nv10_graph_load_context(chan);
-}
-
-#define NV_WRITE_CTX(reg, val) do { \
-	int offset = nv10_graph_ctx_regs_find_offset(dev, reg); \
-	if (offset > 0) \
-		pgraph_ctx->nv10[offset] = val; \
-	} while (0)
-
-#define NV17_WRITE_CTX(reg, val) do { \
-	int offset = nv17_graph_ctx_regs_find_offset(dev, reg); \
-	if (offset > 0) \
-		pgraph_ctx->nv17[offset] = val; \
-	} while (0)
-
-struct nouveau_channel *
-nv10_graph_channel(struct drm_device *dev)
-{
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	int chid = 31;
-
-	if (nv_rd32(dev, NV10_PGRAPH_CTX_CONTROL) & 0x00010000)
-		chid = nv_rd32(dev, NV10_PGRAPH_CTX_USER) >> 24;
-
-	if (chid >= 31)
-		return NULL;
-
-	return dev_priv->channels.ptr[chid];
-}
-
-static int
-nv10_graph_context_new(struct nouveau_channel *chan, int engine)
-{
-	struct drm_device *dev = chan->dev;
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct graph_state *pgraph_ctx;
-
-	NV_DEBUG(dev, "nv10_graph_context_create %d\n", chan->id);
-
-	pgraph_ctx = kzalloc(sizeof(*pgraph_ctx), GFP_KERNEL);
-	if (pgraph_ctx == NULL)
-		return -ENOMEM;
-	chan->engctx[engine] = pgraph_ctx;
-
-	NV_WRITE_CTX(0x00400e88, 0x08000000);
-	NV_WRITE_CTX(0x00400e9c, 0x4b7fffff);
-	NV_WRITE_CTX(NV03_PGRAPH_XY_LOGIC_MISC0, 0x0001ffff);
-	NV_WRITE_CTX(0x00400e10, 0x00001000);
-	NV_WRITE_CTX(0x00400e14, 0x00001000);
-	NV_WRITE_CTX(0x00400e30, 0x00080008);
-	NV_WRITE_CTX(0x00400e34, 0x00080008);
-	if (dev_priv->chipset >= 0x17) {
-		/* is it really needed ??? */
-		NV17_WRITE_CTX(NV10_PGRAPH_DEBUG_4,
-					nv_rd32(dev, NV10_PGRAPH_DEBUG_4));
-		NV17_WRITE_CTX(0x004006b0, nv_rd32(dev, 0x004006b0));
-		NV17_WRITE_CTX(0x00400eac, 0x0fff0000);
-		NV17_WRITE_CTX(0x00400eb0, 0x0fff0000);
-		NV17_WRITE_CTX(0x00400ec0, 0x00000080);
-		NV17_WRITE_CTX(0x00400ed0, 0x00000080);
-	}
-	NV_WRITE_CTX(NV10_PGRAPH_CTX_USER, chan->id << 24);
-
-	nv10_graph_create_pipe(chan);
-	return 0;
-}
-
-static void
-nv10_graph_context_del(struct nouveau_channel *chan, int engine)
-{
-	struct drm_device *dev = chan->dev;
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct graph_state *pgraph_ctx = chan->engctx[engine];
-	unsigned long flags;
-
-	spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
-	nv_mask(dev, NV04_PGRAPH_FIFO, 0x00000001, 0x00000000);
-
-	/* Unload the context if it's the currently active one */
-	if (nv10_graph_channel(dev) == chan)
-		nv10_graph_unload_context(dev);
-
-	nv_mask(dev, NV04_PGRAPH_FIFO, 0x00000001, 0x00000001);
-	spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
-
-	/* Free the context resources */
-	chan->engctx[engine] = NULL;
-	kfree(pgraph_ctx);
-}
-
-static void
-nv10_graph_set_tile_region(struct drm_device *dev, int i)
-{
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i];
-
-	nv_wr32(dev, NV10_PGRAPH_TLIMIT(i), tile->limit);
-	nv_wr32(dev, NV10_PGRAPH_TSIZE(i), tile->pitch);
-	nv_wr32(dev, NV10_PGRAPH_TILE(i), tile->addr);
-}
-
-static int
-nv10_graph_init(struct drm_device *dev, int engine)
-{
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	u32 tmp;
-	int i;
-
-	nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) &
-			~NV_PMC_ENABLE_PGRAPH);
-	nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) |
-			 NV_PMC_ENABLE_PGRAPH);
-
-	nv_wr32(dev, NV03_PGRAPH_INTR   , 0xFFFFFFFF);
-	nv_wr32(dev, NV03_PGRAPH_INTR_EN, 0xFFFFFFFF);
-
-	nv_wr32(dev, NV04_PGRAPH_DEBUG_0, 0xFFFFFFFF);
-	nv_wr32(dev, NV04_PGRAPH_DEBUG_0, 0x00000000);
-	nv_wr32(dev, NV04_PGRAPH_DEBUG_1, 0x00118700);
-	/* nv_wr32(dev, NV04_PGRAPH_DEBUG_2, 0x24E00810); */ /* 0x25f92ad9 */
-	nv_wr32(dev, NV04_PGRAPH_DEBUG_2, 0x25f92ad9);
-	nv_wr32(dev, NV04_PGRAPH_DEBUG_3, 0x55DE0830 |
-				      (1<<29) |
-				      (1<<31));
-	if (dev_priv->chipset >= 0x17) {
-		nv_wr32(dev, NV10_PGRAPH_DEBUG_4, 0x1f000000);
-		nv_wr32(dev, 0x400a10, 0x3ff3fb6);
-		nv_wr32(dev, 0x400838, 0x2f8684);
-		nv_wr32(dev, 0x40083c, 0x115f3f);
-		nv_wr32(dev, 0x004006b0, 0x40000020);
-	} else
-		nv_wr32(dev, NV10_PGRAPH_DEBUG_4, 0x00000000);
-
-	/* Turn all the tiling regions off. */
-	for (i = 0; i < NV10_PFB_TILE__SIZE; i++)
-		nv10_graph_set_tile_region(dev, i);
-
-	nv_wr32(dev, NV10_PGRAPH_CTX_SWITCH(0), 0x00000000);
-	nv_wr32(dev, NV10_PGRAPH_CTX_SWITCH(1), 0x00000000);
-	nv_wr32(dev, NV10_PGRAPH_CTX_SWITCH(2), 0x00000000);
-	nv_wr32(dev, NV10_PGRAPH_CTX_SWITCH(3), 0x00000000);
-	nv_wr32(dev, NV10_PGRAPH_CTX_SWITCH(4), 0x00000000);
-	nv_wr32(dev, NV10_PGRAPH_STATE, 0xFFFFFFFF);
-
-	tmp  = nv_rd32(dev, NV10_PGRAPH_CTX_USER) & 0x00ffffff;
-	tmp |= 31 << 24;
-	nv_wr32(dev, NV10_PGRAPH_CTX_USER, tmp);
-	nv_wr32(dev, NV10_PGRAPH_CTX_CONTROL, 0x10000100);
-	nv_wr32(dev, NV10_PGRAPH_FFINTFC_ST2, 0x08000000);
-
-	return 0;
-}
-
-static int
-nv10_graph_fini(struct drm_device *dev, int engine, bool suspend)
-{
-	nv_mask(dev, NV04_PGRAPH_FIFO, 0x00000001, 0x00000000);
-	if (!nv_wait(dev, NV04_PGRAPH_STATUS, ~0, 0) && suspend) {
-		nv_mask(dev, NV04_PGRAPH_FIFO, 0x00000001, 0x00000001);
-		return -EBUSY;
-	}
-	nv10_graph_unload_context(dev);
-	nv_wr32(dev, NV03_PGRAPH_INTR_EN, 0x00000000);
-	return 0;
-}
-
-static int
-nv17_graph_mthd_lma_window(struct nouveau_channel *chan,
-			   u32 class, u32 mthd, u32 data)
-{
-	struct graph_state *ctx = chan->engctx[NVOBJ_ENGINE_GR];
-	struct drm_device *dev = chan->dev;
-	struct pipe_state *pipe = &ctx->pipe_state;
-	uint32_t pipe_0x0040[1], pipe_0x64c0[8], pipe_0x6a80[3], pipe_0x6ab0[3];
-	uint32_t xfmode0, xfmode1;
-	int i;
-
-	ctx->lma_window[(mthd - 0x1638) / 4] = data;
-
-	if (mthd != 0x1644)
-		return 0;
-
-	nouveau_wait_for_idle(dev);
-
-	PIPE_SAVE(dev, pipe_0x0040, 0x0040);
-	PIPE_SAVE(dev, pipe->pipe_0x0200, 0x0200);
-
-	PIPE_RESTORE(dev, ctx->lma_window, 0x6790);
-
-	nouveau_wait_for_idle(dev);
-
-	xfmode0 = nv_rd32(dev, NV10_PGRAPH_XFMODE0);
-	xfmode1 = nv_rd32(dev, NV10_PGRAPH_XFMODE1);
-
-	PIPE_SAVE(dev, pipe->pipe_0x4400, 0x4400);
-	PIPE_SAVE(dev, pipe_0x64c0, 0x64c0);
-	PIPE_SAVE(dev, pipe_0x6ab0, 0x6ab0);
-	PIPE_SAVE(dev, pipe_0x6a80, 0x6a80);
-
-	nouveau_wait_for_idle(dev);
-
-	nv_wr32(dev, NV10_PGRAPH_XFMODE0, 0x10000000);
-	nv_wr32(dev, NV10_PGRAPH_XFMODE1, 0x00000000);
-	nv_wr32(dev, NV10_PGRAPH_PIPE_ADDRESS, 0x000064c0);
-	for (i = 0; i < 4; i++)
-		nv_wr32(dev, NV10_PGRAPH_PIPE_DATA, 0x3f800000);
-	for (i = 0; i < 4; i++)
-		nv_wr32(dev, NV10_PGRAPH_PIPE_DATA, 0x00000000);
-
-	nv_wr32(dev, NV10_PGRAPH_PIPE_ADDRESS, 0x00006ab0);
-	for (i = 0; i < 3; i++)
-		nv_wr32(dev, NV10_PGRAPH_PIPE_DATA, 0x3f800000);
-
-	nv_wr32(dev, NV10_PGRAPH_PIPE_ADDRESS, 0x00006a80);
-	for (i = 0; i < 3; i++)
-		nv_wr32(dev, NV10_PGRAPH_PIPE_DATA, 0x00000000);
-
-	nv_wr32(dev, NV10_PGRAPH_PIPE_ADDRESS, 0x00000040);
-	nv_wr32(dev, NV10_PGRAPH_PIPE_DATA, 0x00000008);
-
-	PIPE_RESTORE(dev, pipe->pipe_0x0200, 0x0200);
-
-	nouveau_wait_for_idle(dev);
-
-	PIPE_RESTORE(dev, pipe_0x0040, 0x0040);
-
-	nv_wr32(dev, NV10_PGRAPH_XFMODE0, xfmode0);
-	nv_wr32(dev, NV10_PGRAPH_XFMODE1, xfmode1);
-
-	PIPE_RESTORE(dev, pipe_0x64c0, 0x64c0);
-	PIPE_RESTORE(dev, pipe_0x6ab0, 0x6ab0);
-	PIPE_RESTORE(dev, pipe_0x6a80, 0x6a80);
-	PIPE_RESTORE(dev, pipe->pipe_0x4400, 0x4400);
-
-	nv_wr32(dev, NV10_PGRAPH_PIPE_ADDRESS, 0x000000c0);
-	nv_wr32(dev, NV10_PGRAPH_PIPE_DATA, 0x00000000);
-
-	nouveau_wait_for_idle(dev);
-
-	return 0;
-}
-
-static int
-nv17_graph_mthd_lma_enable(struct nouveau_channel *chan,
-			   u32 class, u32 mthd, u32 data)
-{
-	struct drm_device *dev = chan->dev;
-
-	nouveau_wait_for_idle(dev);
-
-	nv_wr32(dev, NV10_PGRAPH_DEBUG_4,
-		nv_rd32(dev, NV10_PGRAPH_DEBUG_4) | 0x1 << 8);
-	nv_wr32(dev, 0x004006b0,
-		nv_rd32(dev, 0x004006b0) | 0x8 << 24);
-
-	return 0;
-}
-
-struct nouveau_bitfield nv10_graph_intr[] = {
-	{ NV_PGRAPH_INTR_NOTIFY, "NOTIFY" },
-	{ NV_PGRAPH_INTR_ERROR,  "ERROR"  },
-	{}
-};
-
-struct nouveau_bitfield nv10_graph_nstatus[] = {
-	{ NV10_PGRAPH_NSTATUS_STATE_IN_USE,       "STATE_IN_USE" },
-	{ NV10_PGRAPH_NSTATUS_INVALID_STATE,      "INVALID_STATE" },
-	{ NV10_PGRAPH_NSTATUS_BAD_ARGUMENT,       "BAD_ARGUMENT" },
-	{ NV10_PGRAPH_NSTATUS_PROTECTION_FAULT,   "PROTECTION_FAULT" },
-	{}
-};
-
-static void
-nv10_graph_isr(struct drm_device *dev)
-{
-	u32 stat;
-
-	while ((stat = nv_rd32(dev, NV03_PGRAPH_INTR))) {
-		u32 nsource = nv_rd32(dev, NV03_PGRAPH_NSOURCE);
-		u32 nstatus = nv_rd32(dev, NV03_PGRAPH_NSTATUS);
-		u32 addr = nv_rd32(dev, NV04_PGRAPH_TRAPPED_ADDR);
-		u32 chid = (addr & 0x01f00000) >> 20;
-		u32 subc = (addr & 0x00070000) >> 16;
-		u32 mthd = (addr & 0x00001ffc);
-		u32 data = nv_rd32(dev, NV04_PGRAPH_TRAPPED_DATA);
-		u32 class = nv_rd32(dev, 0x400160 + subc * 4) & 0xfff;
-		u32 show = stat;
-
-		if (stat & NV_PGRAPH_INTR_ERROR) {
-			if (nsource & NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD) {
-				if (!nouveau_gpuobj_mthd_call2(dev, chid, class, mthd, data))
-					show &= ~NV_PGRAPH_INTR_ERROR;
-			}
-		}
-
-		if (stat & NV_PGRAPH_INTR_CONTEXT_SWITCH) {
-			nv_wr32(dev, NV03_PGRAPH_INTR, NV_PGRAPH_INTR_CONTEXT_SWITCH);
-			stat &= ~NV_PGRAPH_INTR_CONTEXT_SWITCH;
-			show &= ~NV_PGRAPH_INTR_CONTEXT_SWITCH;
-			nv10_graph_context_switch(dev);
-		}
-
-		nv_wr32(dev, NV03_PGRAPH_INTR, stat);
-		nv_wr32(dev, NV04_PGRAPH_FIFO, 0x00000001);
-
-		if (show && nouveau_ratelimit()) {
-			NV_INFO(dev, "PGRAPH -");
-			nouveau_bitfield_print(nv10_graph_intr, show);
-			printk(" nsource:");
-			nouveau_bitfield_print(nv04_graph_nsource, nsource);
-			printk(" nstatus:");
-			nouveau_bitfield_print(nv10_graph_nstatus, nstatus);
-			printk("\n");
-			NV_INFO(dev, "PGRAPH - ch %d/%d class 0x%04x "
-				     "mthd 0x%04x data 0x%08x\n",
-				chid, subc, class, mthd, data);
-		}
-	}
-}
-
-static void
-nv10_graph_destroy(struct drm_device *dev, int engine)
-{
-	struct nv10_graph_engine *pgraph = nv_engine(dev, engine);
-
-	nouveau_irq_unregister(dev, 12);
-	kfree(pgraph);
-}
-
-int
-nv10_graph_create(struct drm_device *dev)
-{
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct nv10_graph_engine *pgraph;
-
-	pgraph = kzalloc(sizeof(*pgraph), GFP_KERNEL);
-	if (!pgraph)
-		return -ENOMEM;
-
-	pgraph->base.destroy = nv10_graph_destroy;
-	pgraph->base.init = nv10_graph_init;
-	pgraph->base.fini = nv10_graph_fini;
-	pgraph->base.context_new = nv10_graph_context_new;
-	pgraph->base.context_del = nv10_graph_context_del;
-	pgraph->base.object_new = nv04_graph_object_new;
-	pgraph->base.set_tile_region = nv10_graph_set_tile_region;
-
-	NVOBJ_ENGINE_ADD(dev, GR, &pgraph->base);
-	nouveau_irq_register(dev, 12, nv10_graph_isr);
-
-	NVOBJ_CLASS(dev, 0x0030, GR); /* null */
-	NVOBJ_CLASS(dev, 0x0039, GR); /* m2mf */
-	NVOBJ_CLASS(dev, 0x004a, GR); /* gdirect */
-	NVOBJ_CLASS(dev, 0x005f, GR); /* imageblit */
-	NVOBJ_CLASS(dev, 0x009f, GR); /* imageblit (nv12) */
-	NVOBJ_CLASS(dev, 0x008a, GR); /* ifc */
-	NVOBJ_CLASS(dev, 0x0089, GR); /* sifm */
-	NVOBJ_CLASS(dev, 0x0062, GR); /* surf2d */
-	NVOBJ_CLASS(dev, 0x0043, GR); /* rop */
-	NVOBJ_CLASS(dev, 0x0012, GR); /* beta1 */
-	NVOBJ_CLASS(dev, 0x0072, GR); /* beta4 */
-	NVOBJ_CLASS(dev, 0x0019, GR); /* cliprect */
-	NVOBJ_CLASS(dev, 0x0044, GR); /* pattern */
-	NVOBJ_CLASS(dev, 0x0052, GR); /* swzsurf */
-	NVOBJ_CLASS(dev, 0x0093, GR); /* surf3d */
-	NVOBJ_CLASS(dev, 0x0094, GR); /* tex_tri */
-	NVOBJ_CLASS(dev, 0x0095, GR); /* multitex_tri */
-
-	/* celcius */
-	if (dev_priv->chipset <= 0x10) {
-		NVOBJ_CLASS(dev, 0x0056, GR);
-	} else
-	if (dev_priv->chipset < 0x17 || dev_priv->chipset == 0x1a) {
-		NVOBJ_CLASS(dev, 0x0096, GR);
-	} else {
-		NVOBJ_CLASS(dev, 0x0099, GR);
-		NVOBJ_MTHD (dev, 0x0099, 0x1638, nv17_graph_mthd_lma_window);
-		NVOBJ_MTHD (dev, 0x0099, 0x163c, nv17_graph_mthd_lma_window);
-		NVOBJ_MTHD (dev, 0x0099, 0x1640, nv17_graph_mthd_lma_window);
-		NVOBJ_MTHD (dev, 0x0099, 0x1644, nv17_graph_mthd_lma_window);
-		NVOBJ_MTHD (dev, 0x0099, 0x1658, nv17_graph_mthd_lma_enable);
-	}
-
-	return 0;
-}
diff --git a/drivers/gpu/drm/nouveau/nv17_fifo.c b/drivers/gpu/drm/nouveau/nv17_fifo.c
deleted file mode 100644
index 4ae61aeea741..000000000000
--- a/drivers/gpu/drm/nouveau/nv17_fifo.c
+++ /dev/null
@@ -1,176 +0,0 @@
-/*
- * Copyright (C) 2012 Ben Skeggs.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining
- * a copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sublicense, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial
- * portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
- * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
- * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
- * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
- * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-
-#include <drm/drmP.h>
-#include "nouveau_drv.h"
-#include "nouveau_fifo.h"
-#include "nouveau_util.h"
-#include "nouveau_ramht.h"
-
-static struct ramfc_desc {
-	unsigned bits:6;
-	unsigned ctxs:5;
-	unsigned ctxp:8;
-	unsigned regs:5;
-	unsigned regp;
-} nv17_ramfc[] = {
-	{ 32,  0, 0x00,  0, NV04_PFIFO_CACHE1_DMA_PUT },
-	{ 32,  0, 0x04,  0, NV04_PFIFO_CACHE1_DMA_GET },
-	{ 32,  0, 0x08,  0, NV10_PFIFO_CACHE1_REF_CNT },
-	{ 16,  0, 0x0c,  0, NV04_PFIFO_CACHE1_DMA_INSTANCE },
-	{ 16, 16, 0x0c,  0, NV04_PFIFO_CACHE1_DMA_DCOUNT },
-	{ 32,  0, 0x10,  0, NV04_PFIFO_CACHE1_DMA_STATE },
-	{ 32,  0, 0x14,  0, NV04_PFIFO_CACHE1_DMA_FETCH },
-	{ 32,  0, 0x18,  0, NV04_PFIFO_CACHE1_ENGINE },
-	{ 32,  0, 0x1c,  0, NV04_PFIFO_CACHE1_PULL1 },
-	{ 32,  0, 0x20,  0, NV10_PFIFO_CACHE1_ACQUIRE_VALUE },
-	{ 32,  0, 0x24,  0, NV10_PFIFO_CACHE1_ACQUIRE_TIMESTAMP },
-	{ 32,  0, 0x28,  0, NV10_PFIFO_CACHE1_ACQUIRE_TIMEOUT },
-	{ 32,  0, 0x2c,  0, NV10_PFIFO_CACHE1_SEMAPHORE },
-	{ 32,  0, 0x30,  0, NV10_PFIFO_CACHE1_DMA_SUBROUTINE },
-	{}
-};
-
-struct nv17_fifo_priv {
-	struct nouveau_fifo_priv base;
-	struct ramfc_desc *ramfc_desc;
-};
-
-struct nv17_fifo_chan {
-	struct nouveau_fifo_chan base;
-	struct nouveau_gpuobj *ramfc;
-};
-
-static int
-nv17_fifo_context_new(struct nouveau_channel *chan, int engine)
-{
-	struct drm_device *dev = chan->dev;
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct nv17_fifo_priv *priv = nv_engine(dev, engine);
-	struct nv17_fifo_chan *fctx;
-	unsigned long flags;
-	int ret;
-
-	fctx = chan->engctx[engine] = kzalloc(sizeof(*fctx), GFP_KERNEL);
-	if (!fctx)
-		return -ENOMEM;
-
-	/* map channel control registers */
-	chan->user = ioremap(pci_resource_start(dev->pdev, 0) +
-			     NV03_USER(chan->id), PAGE_SIZE);
-	if (!chan->user) {
-		ret = -ENOMEM;
-		goto error;
-	}
-
-	/* initialise default fifo context */
-	ret = nouveau_gpuobj_new_fake(dev, dev_priv->ramfc->pinst +
-				      chan->id * 64, ~0, 64,
-				      NVOBJ_FLAG_ZERO_ALLOC |
-				      NVOBJ_FLAG_ZERO_FREE, &fctx->ramfc);
-	if (ret)
-		goto error;
-
-	nv_wo32(fctx->ramfc, 0x00, chan->pushbuf_base);
-	nv_wo32(fctx->ramfc, 0x04, chan->pushbuf_base);
-	nv_wo32(fctx->ramfc, 0x0c, chan->pushbuf->pinst >> 4);
-	nv_wo32(fctx->ramfc, 0x14, NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES |
-				   NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES |
-#ifdef __BIG_ENDIAN
-				   NV_PFIFO_CACHE1_BIG_ENDIAN |
-#endif
-				   NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8);
-
-	/* enable dma mode on the channel */
-	spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
-	nv_mask(dev, NV04_PFIFO_MODE, (1 << chan->id), (1 << chan->id));
-	spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
-
-error:
-	if (ret)
-		priv->base.base.context_del(chan, engine);
-	return ret;
-}
-
-static int
-nv17_fifo_init(struct drm_device *dev, int engine)
-{
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct nv17_fifo_priv *priv = nv_engine(dev, engine);
-	int i;
-
-	nv_mask(dev, NV03_PMC_ENABLE, NV_PMC_ENABLE_PFIFO, 0);
-	nv_mask(dev, NV03_PMC_ENABLE, NV_PMC_ENABLE_PFIFO, NV_PMC_ENABLE_PFIFO);
-
-	nv_wr32(dev, NV04_PFIFO_DELAY_0, 0x000000ff);
-	nv_wr32(dev, NV04_PFIFO_DMA_TIMESLICE, 0x0101ffff);
-
-	nv_wr32(dev, NV03_PFIFO_RAMHT, (0x03 << 24) /* search 128 */ |
-				       ((dev_priv->ramht->bits - 9) << 16) |
-				       (dev_priv->ramht->gpuobj->pinst >> 8));
-	nv_wr32(dev, NV03_PFIFO_RAMRO, dev_priv->ramro->pinst >> 8);
-	nv_wr32(dev, NV03_PFIFO_RAMFC, 0x00010000 |
-				       dev_priv->ramfc->pinst >> 8);
-
-	nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH1, priv->base.channels);
-
-	nv_wr32(dev, NV03_PFIFO_INTR_0, 0xffffffff);
-	nv_wr32(dev, NV03_PFIFO_INTR_EN_0, 0xffffffff);
-
-	nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH0, 1);
-	nv_wr32(dev, NV04_PFIFO_CACHE1_PULL0, 1);
-	nv_wr32(dev, NV03_PFIFO_CACHES, 1);
-
-	for (i = 0; i < priv->base.channels; i++) {
-		if (dev_priv->channels.ptr[i])
-			nv_mask(dev, NV04_PFIFO_MODE, (1 << i), (1 << i));
-	}
-
-	return 0;
-}
-
-int
-nv17_fifo_create(struct drm_device *dev)
-{
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct nv17_fifo_priv *priv;
-
-	priv = kzalloc(sizeof(*priv), GFP_KERNEL);
-	if (!priv)
-		return -ENOMEM;
-
-	priv->base.base.destroy = nv04_fifo_destroy;
-	priv->base.base.init = nv17_fifo_init;
-	priv->base.base.fini = nv04_fifo_fini;
-	priv->base.base.context_new = nv17_fifo_context_new;
-	priv->base.base.context_del = nv04_fifo_context_del;
-	priv->base.channels = 31;
-	priv->ramfc_desc = nv17_ramfc;
-	dev_priv->eng[NVOBJ_ENGINE_FIFO] = &priv->base.base;
-
-	nouveau_irq_register(dev, 8, nv04_fifo_isr);
-	return 0;
-}
diff --git a/drivers/gpu/drm/nouveau/nv17_tv.c b/drivers/gpu/drm/nouveau/nv17_tv.c
index 6331e79b0124..897b63621e2d 100644
--- a/drivers/gpu/drm/nouveau/nv17_tv.c
+++ b/drivers/gpu/drm/nouveau/nv17_tv.c
@@ -26,18 +26,32 @@
 
 #include <drm/drmP.h>
 #include <drm/drm_crtc_helper.h>
-#include "nouveau_drv.h"
+#include "nouveau_drm.h"
+#include "nouveau_reg.h"
 #include "nouveau_encoder.h"
 #include "nouveau_connector.h"
 #include "nouveau_crtc.h"
-#include "nouveau_gpio.h"
 #include "nouveau_hw.h"
 #include "nv17_tv.h"
 
+#include <core/device.h>
+
+#include <subdev/bios/gpio.h>
+#include <subdev/gpio.h>
+
+MODULE_PARM_DESC(tv_norm, "Default TV norm.\n"
+		 "\t\tSupported: PAL, PAL-M, PAL-N, PAL-Nc, NTSC-M, NTSC-J,\n"
+		 "\t\t\thd480i, hd480p, hd576i, hd576p, hd720p, hd1080i.\n"
+		 "\t\tDefault: PAL\n"
+		 "\t\t*NOTE* Ignored for cards with external TV encoders.");
+static char *nouveau_tv_norm;
+module_param_named(tv_norm, nouveau_tv_norm, charp, 0400);
+
 static uint32_t nv42_tv_sample_load(struct drm_encoder *encoder)
 {
 	struct drm_device *dev = encoder->dev;
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_drm *drm = nouveau_drm(dev);
+	struct nouveau_gpio *gpio = nouveau_gpio(drm->device);
 	uint32_t testval, regoffset = nv04_dac_output_offset(encoder);
 	uint32_t gpio0, gpio1, fp_htotal, fp_hsync_start, fp_hsync_end,
 		fp_control, test_ctrl, dacclk, ctv_14, ctv_1c, ctv_6c;
@@ -46,15 +60,15 @@ static uint32_t nv42_tv_sample_load(struct drm_encoder *encoder)
 
 #define RGB_TEST_DATA(r, g, b) (r << 0 | g << 10 | b << 20)
 	testval = RGB_TEST_DATA(0x82, 0xeb, 0x82);
-	if (dev_priv->vbios.tvdactestval)
-		testval = dev_priv->vbios.tvdactestval;
+	if (drm->vbios.tvdactestval)
+		testval = drm->vbios.tvdactestval;
 
 	dacclk = NVReadRAMDAC(dev, 0, NV_PRAMDAC_DACCLK + regoffset);
 	head = (dacclk & 0x100) >> 8;
 
 	/* Save the previous state. */
-	gpio1 = nouveau_gpio_func_get(dev, DCB_GPIO_TVDAC1);
-	gpio0 = nouveau_gpio_func_get(dev, DCB_GPIO_TVDAC0);
+	gpio1 = gpio->get(gpio, 0, DCB_GPIO_TVDAC1, 0xff);
+	gpio0 = gpio->get(gpio, 0, DCB_GPIO_TVDAC0, 0xff);
 	fp_htotal = NVReadRAMDAC(dev, head, NV_PRAMDAC_FP_HTOTAL);
 	fp_hsync_start = NVReadRAMDAC(dev, head, NV_PRAMDAC_FP_HSYNC_START);
 	fp_hsync_end = NVReadRAMDAC(dev, head, NV_PRAMDAC_FP_HSYNC_END);
@@ -65,8 +79,8 @@ static uint32_t nv42_tv_sample_load(struct drm_encoder *encoder)
 	ctv_6c = NVReadRAMDAC(dev, head, 0x680c6c);
 
 	/* Prepare the DAC for load detection.  */
-	nouveau_gpio_func_set(dev, DCB_GPIO_TVDAC1, true);
-	nouveau_gpio_func_set(dev, DCB_GPIO_TVDAC0, true);
+	gpio->set(gpio, 0, DCB_GPIO_TVDAC1, 0xff, true);
+	gpio->set(gpio, 0, DCB_GPIO_TVDAC0, 0xff, true);
 
 	NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_HTOTAL, 1343);
 	NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_HSYNC_START, 1047);
@@ -111,8 +125,8 @@ static uint32_t nv42_tv_sample_load(struct drm_encoder *encoder)
 	NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_HSYNC_END, fp_hsync_end);
 	NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_HSYNC_START, fp_hsync_start);
 	NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_HTOTAL, fp_htotal);
-	nouveau_gpio_func_set(dev, DCB_GPIO_TVDAC1, gpio1);
-	nouveau_gpio_func_set(dev, DCB_GPIO_TVDAC0, gpio0);
+	gpio->set(gpio, 0, DCB_GPIO_TVDAC1, 0xff, gpio1);
+	gpio->set(gpio, 0, DCB_GPIO_TVDAC0, 0xff, gpio0);
 
 	return sample;
 }
@@ -120,15 +134,18 @@ static uint32_t nv42_tv_sample_load(struct drm_encoder *encoder)
 static bool
 get_tv_detect_quirks(struct drm_device *dev, uint32_t *pin_mask)
 {
+	struct nouveau_drm *drm = nouveau_drm(dev);
+	struct nouveau_object *device = drm->device;
+
 	/* Zotac FX5200 */
-	if (nv_match_device(dev, 0x0322, 0x19da, 0x1035) ||
-	    nv_match_device(dev, 0x0322, 0x19da, 0x2035)) {
+	if (nv_device_match(device, 0x0322, 0x19da, 0x1035) ||
+	    nv_device_match(device, 0x0322, 0x19da, 0x2035)) {
 		*pin_mask = 0xc;
 		return false;
 	}
 
 	/* MSI nForce2 IGP */
-	if (nv_match_device(dev, 0x01f0, 0x1462, 0x5710)) {
+	if (nv_device_match(device, 0x01f0, 0x1462, 0x5710)) {
 		*pin_mask = 0xc;
 		return false;
 	}
@@ -140,18 +157,18 @@ static enum drm_connector_status
 nv17_tv_detect(struct drm_encoder *encoder, struct drm_connector *connector)
 {
 	struct drm_device *dev = encoder->dev;
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_drm *drm = nouveau_drm(dev);
 	struct drm_mode_config *conf = &dev->mode_config;
 	struct nv17_tv_encoder *tv_enc = to_tv_enc(encoder);
-	struct dcb_entry *dcb = tv_enc->base.dcb;
+	struct dcb_output *dcb = tv_enc->base.dcb;
 	bool reliable = get_tv_detect_quirks(dev, &tv_enc->pin_mask);
 
 	if (nv04_dac_in_use(encoder))
 		return connector_status_disconnected;
 
 	if (reliable) {
-		if (dev_priv->chipset == 0x42 ||
-		    dev_priv->chipset == 0x43)
+		if (nv_device(drm->device)->chipset == 0x42 ||
+		    nv_device(drm->device)->chipset == 0x43)
 			tv_enc->pin_mask =
 				nv42_tv_sample_load(encoder) >> 28 & 0xe;
 		else
@@ -185,7 +202,7 @@ nv17_tv_detect(struct drm_encoder *encoder, struct drm_connector *connector)
 	if (!reliable) {
 		return connector_status_unknown;
 	} else if (tv_enc->subconnector) {
-		NV_INFO(dev, "Load detected on output %c\n",
+		NV_INFO(drm, "Load detected on output %c\n",
 			'@' + ffs(dcb->or));
 		return connector_status_connected;
 	} else {
@@ -357,6 +374,8 @@ static bool nv17_tv_mode_fixup(struct drm_encoder *encoder,
 static void  nv17_tv_dpms(struct drm_encoder *encoder, int mode)
 {
 	struct drm_device *dev = encoder->dev;
+	struct nouveau_drm *drm = nouveau_drm(dev);
+	struct nouveau_gpio *gpio = nouveau_gpio(drm->device);
 	struct nv17_tv_state *regs = &to_tv_enc(encoder)->state;
 	struct nv17_tv_norm_params *tv_norm = get_tv_norm(encoder);
 
@@ -364,7 +383,7 @@ static void  nv17_tv_dpms(struct drm_encoder *encoder, int mode)
 		return;
 	nouveau_encoder(encoder)->last_dpms = mode;
 
-	NV_INFO(dev, "Setting dpms mode %d on TV encoder (output %d)\n",
+	NV_INFO(drm, "Setting dpms mode %d on TV encoder (output %d)\n",
 		 mode, nouveau_encoder(encoder)->dcb->index);
 
 	regs->ptv_200 &= ~1;
@@ -381,8 +400,8 @@ static void  nv17_tv_dpms(struct drm_encoder *encoder, int mode)
 
 	nv_load_ptv(dev, regs, 200);
 
-	nouveau_gpio_func_set(dev, DCB_GPIO_TVDAC1, mode == DRM_MODE_DPMS_ON);
-	nouveau_gpio_func_set(dev, DCB_GPIO_TVDAC0, mode == DRM_MODE_DPMS_ON);
+	gpio->set(gpio, 0, DCB_GPIO_TVDAC1, 0xff, mode == DRM_MODE_DPMS_ON);
+	gpio->set(gpio, 0, DCB_GPIO_TVDAC0, 0xff, mode == DRM_MODE_DPMS_ON);
 
 	nv04_dac_update_dacclk(encoder, mode == DRM_MODE_DPMS_ON);
 }
@@ -390,11 +409,11 @@ static void  nv17_tv_dpms(struct drm_encoder *encoder, int mode)
 static void nv17_tv_prepare(struct drm_encoder *encoder)
 {
 	struct drm_device *dev = encoder->dev;
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_drm *drm = nouveau_drm(dev);
 	struct drm_encoder_helper_funcs *helper = encoder->helper_private;
 	struct nv17_tv_norm_params *tv_norm = get_tv_norm(encoder);
 	int head = nouveau_crtc(encoder->crtc)->index;
-	uint8_t *cr_lcd = &dev_priv->mode_reg.crtc_reg[head].CRTC[
+	uint8_t *cr_lcd = &nv04_display(dev)->mode_reg.crtc_reg[head].CRTC[
 							NV_CIO_CRE_LCD__INDEX];
 	uint32_t dacclk_off = NV_PRAMDAC_DACCLK +
 					nv04_dac_output_offset(encoder);
@@ -410,14 +429,14 @@ static void nv17_tv_prepare(struct drm_encoder *encoder)
 		struct drm_encoder *enc;
 
 		list_for_each_entry(enc, &dev->mode_config.encoder_list, head) {
-			struct dcb_entry *dcb = nouveau_encoder(enc)->dcb;
+			struct dcb_output *dcb = nouveau_encoder(enc)->dcb;
 
-			if ((dcb->type == OUTPUT_TMDS ||
-			     dcb->type == OUTPUT_LVDS) &&
+			if ((dcb->type == DCB_OUTPUT_TMDS ||
+			     dcb->type == DCB_OUTPUT_LVDS) &&
 			     !enc->crtc &&
 			     nv04_dfp_get_bound_head(dev, dcb) == head) {
 				nv04_dfp_bind_head(dev, dcb, head ^ 1,
-						dev_priv->vbios.fp.dual_link);
+						drm->vbios.fp.dual_link);
 			}
 		}
 
@@ -429,7 +448,7 @@ static void nv17_tv_prepare(struct drm_encoder *encoder)
 	/* Set the DACCLK register */
 	dacclk = (NVReadRAMDAC(dev, 0, dacclk_off) & ~0x30) | 0x1;
 
-	if (dev_priv->card_type == NV_40)
+	if (nv_device(drm->device)->card_type == NV_40)
 		dacclk |= 0x1a << 16;
 
 	if (tv_norm->kind == CTV_ENC_MODE) {
@@ -453,9 +472,9 @@ static void nv17_tv_mode_set(struct drm_encoder *encoder,
 			     struct drm_display_mode *adjusted_mode)
 {
 	struct drm_device *dev = encoder->dev;
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_drm *drm = nouveau_drm(dev);
 	int head = nouveau_crtc(encoder->crtc)->index;
-	struct nv04_crtc_reg *regs = &dev_priv->mode_reg.crtc_reg[head];
+	struct nv04_crtc_reg *regs = &nv04_display(dev)->mode_reg.crtc_reg[head];
 	struct nv17_tv_state *tv_regs = &to_tv_enc(encoder)->state;
 	struct nv17_tv_norm_params *tv_norm = get_tv_norm(encoder);
 	int i;
@@ -486,7 +505,7 @@ static void nv17_tv_mode_set(struct drm_encoder *encoder,
 			tv_regs->ptv_614 = 0x13;
 		}
 
-		if (dev_priv->card_type >= NV_30) {
+		if (nv_device(drm->device)->card_type >= NV_30) {
 			tv_regs->ptv_500 = 0xe8e0;
 			tv_regs->ptv_504 = 0x1710;
 			tv_regs->ptv_604 = 0x0;
@@ -566,7 +585,7 @@ static void nv17_tv_mode_set(struct drm_encoder *encoder,
 static void nv17_tv_commit(struct drm_encoder *encoder)
 {
 	struct drm_device *dev = encoder->dev;
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_drm *drm = nouveau_drm(dev);
 	struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc);
 	struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
 	struct drm_encoder_helper_funcs *helper = encoder->helper_private;
@@ -581,7 +600,7 @@ static void nv17_tv_commit(struct drm_encoder *encoder)
 	nv17_tv_state_load(dev, &to_tv_enc(encoder)->state);
 
 	/* This could use refinement for flatpanels, but it should work */
-	if (dev_priv->chipset < 0x44)
+	if (nv_device(drm->device)->chipset < 0x44)
 		NVWriteRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL +
 					nv04_dac_output_offset(encoder),
 					0xf0000000);
@@ -592,7 +611,7 @@ static void nv17_tv_commit(struct drm_encoder *encoder)
 
 	helper->dpms(encoder, DRM_MODE_DPMS_ON);
 
-	NV_INFO(dev, "Output %s is running on CRTC %d using output %c\n",
+	NV_INFO(drm, "Output %s is running on CRTC %d using output %c\n",
 		drm_get_connector_name(
 			&nouveau_encoder_connector_get(nv_encoder)->base),
 		nv_crtc->index, '@' + ffs(nv_encoder->dcb->or));
@@ -630,9 +649,10 @@ static int nv17_tv_create_resources(struct drm_encoder *encoder,
 				    struct drm_connector *connector)
 {
 	struct drm_device *dev = encoder->dev;
+	struct nouveau_drm *drm = nouveau_drm(dev);
 	struct drm_mode_config *conf = &dev->mode_config;
 	struct nv17_tv_encoder *tv_enc = to_tv_enc(encoder);
-	struct dcb_entry *dcb = nouveau_encoder(encoder)->dcb;
+	struct dcb_output *dcb = nouveau_encoder(encoder)->dcb;
 	int num_tv_norms = dcb->tvconf.has_component_output ? NUM_TV_NORMS :
 							NUM_LD_TV_NORMS;
 	int i;
@@ -646,7 +666,7 @@ static int nv17_tv_create_resources(struct drm_encoder *encoder,
 		}
 
 		if (i == num_tv_norms)
-			NV_WARN(dev, "Invalid TV norm setting \"%s\"\n",
+			NV_WARN(drm, "Invalid TV norm setting \"%s\"\n",
 				nouveau_tv_norm);
 	}
 
@@ -759,8 +779,6 @@ static void nv17_tv_destroy(struct drm_encoder *encoder)
 {
 	struct nv17_tv_encoder *tv_enc = to_tv_enc(encoder);
 
-	NV_DEBUG_KMS(encoder->dev, "\n");
-
 	drm_encoder_cleanup(encoder);
 	kfree(tv_enc);
 }
@@ -788,7 +806,7 @@ static struct drm_encoder_funcs nv17_tv_funcs = {
 };
 
 int
-nv17_tv_create(struct drm_connector *connector, struct dcb_entry *entry)
+nv17_tv_create(struct drm_connector *connector, struct dcb_output *entry)
 {
 	struct drm_device *dev = connector->dev;
 	struct drm_encoder *encoder;
diff --git a/drivers/gpu/drm/nouveau/nv17_tv.h b/drivers/gpu/drm/nouveau/nv17_tv.h
index 622e72221682..7b331543a41b 100644
--- a/drivers/gpu/drm/nouveau/nv17_tv.h
+++ b/drivers/gpu/drm/nouveau/nv17_tv.h
@@ -130,12 +130,14 @@ void nv17_ctv_update_rescaler(struct drm_encoder *encoder);
 static inline void nv_write_ptv(struct drm_device *dev, uint32_t reg,
 				uint32_t val)
 {
-	nv_wr32(dev, reg, val);
+	struct nouveau_device *device = nouveau_dev(dev);
+	nv_wr32(device, reg, val);
 }
 
 static inline uint32_t nv_read_ptv(struct drm_device *dev, uint32_t reg)
 {
-	return nv_rd32(dev, reg);
+	struct nouveau_device *device = nouveau_dev(dev);
+	return nv_rd32(device, reg);
 }
 
 static inline void nv_write_tv_enc(struct drm_device *dev, uint8_t reg,
diff --git a/drivers/gpu/drm/nouveau/nv17_tv_modes.c b/drivers/gpu/drm/nouveau/nv17_tv_modes.c
index 96e428641672..1cdfe2a5875d 100644
--- a/drivers/gpu/drm/nouveau/nv17_tv_modes.c
+++ b/drivers/gpu/drm/nouveau/nv17_tv_modes.c
@@ -26,7 +26,7 @@
 
 #include <drm/drmP.h>
 #include <drm/drm_crtc_helper.h>
-#include "nouveau_drv.h"
+#include "nouveau_drm.h"
 #include "nouveau_encoder.h"
 #include "nouveau_crtc.h"
 #include "nouveau_hw.h"
@@ -543,10 +543,9 @@ void nv17_tv_update_rescaler(struct drm_encoder *encoder)
 void nv17_ctv_update_rescaler(struct drm_encoder *encoder)
 {
 	struct drm_device *dev = encoder->dev;
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
 	struct nv17_tv_encoder *tv_enc = to_tv_enc(encoder);
 	int head = nouveau_crtc(encoder->crtc)->index;
-	struct nv04_crtc_reg *regs = &dev_priv->mode_reg.crtc_reg[head];
+	struct nv04_crtc_reg *regs = &nv04_display(dev)->mode_reg.crtc_reg[head];
 	struct drm_display_mode *crtc_mode = &encoder->crtc->mode;
 	struct drm_display_mode *output_mode =
 		&get_tv_norm(encoder)->ctv_enc_mode.mode;
diff --git a/drivers/gpu/drm/nouveau/nv20_fb.c b/drivers/gpu/drm/nouveau/nv20_fb.c
deleted file mode 100644
index 5fffc2150b8e..000000000000
--- a/drivers/gpu/drm/nouveau/nv20_fb.c
+++ /dev/null
@@ -1,147 +0,0 @@
-#include <drm/drmP.h>
-#include "nouveau_drv.h"
-#include <drm/nouveau_drm.h>
-
-static struct drm_mm_node *
-nv20_fb_alloc_tag(struct drm_device *dev, uint32_t size)
-{
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct nouveau_fb_engine *pfb = &dev_priv->engine.fb;
-	struct drm_mm_node *mem;
-	int ret;
-
-	ret = drm_mm_pre_get(&pfb->tag_heap);
-	if (ret)
-		return NULL;
-
-	spin_lock(&dev_priv->tile.lock);
-	mem = drm_mm_search_free(&pfb->tag_heap, size, 0, 0);
-	if (mem)
-		mem = drm_mm_get_block_atomic(mem, size, 0);
-	spin_unlock(&dev_priv->tile.lock);
-
-	return mem;
-}
-
-static void
-nv20_fb_free_tag(struct drm_device *dev, struct drm_mm_node **pmem)
-{
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct drm_mm_node *mem = *pmem;
-	if (mem) {
-		spin_lock(&dev_priv->tile.lock);
-		drm_mm_put_block(mem);
-		spin_unlock(&dev_priv->tile.lock);
-		*pmem = NULL;
-	}
-}
-
-void
-nv20_fb_init_tile_region(struct drm_device *dev, int i, uint32_t addr,
-			 uint32_t size, uint32_t pitch, uint32_t flags)
-{
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i];
-	int bpp = (flags & NOUVEAU_GEM_TILE_32BPP ? 32 : 16);
-
-	tile->addr  = 0x00000001 | addr;
-	tile->limit = max(1u, addr + size) - 1;
-	tile->pitch = pitch;
-
-	/* Allocate some of the on-die tag memory, used to store Z
-	 * compression meta-data (most likely just a bitmap determining
-	 * if a given tile is compressed or not).
-	 */
-	if (flags & NOUVEAU_GEM_TILE_ZETA) {
-		tile->tag_mem = nv20_fb_alloc_tag(dev, size / 256);
-		if (tile->tag_mem) {
-			/* Enable Z compression */
-			tile->zcomp = tile->tag_mem->start;
-			if (dev_priv->chipset >= 0x25) {
-				if (bpp == 16)
-					tile->zcomp |= NV25_PFB_ZCOMP_MODE_16;
-				else
-					tile->zcomp |= NV25_PFB_ZCOMP_MODE_32;
-			} else {
-				tile->zcomp |= NV20_PFB_ZCOMP_EN;
-				if (bpp != 16)
-					tile->zcomp |= NV20_PFB_ZCOMP_MODE_32;
-			}
-		}
-
-		tile->addr |= 2;
-	}
-}
-
-void
-nv20_fb_free_tile_region(struct drm_device *dev, int i)
-{
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i];
-
-	tile->addr = tile->limit = tile->pitch = tile->zcomp = 0;
-	nv20_fb_free_tag(dev, &tile->tag_mem);
-}
-
-void
-nv20_fb_set_tile_region(struct drm_device *dev, int i)
-{
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i];
-
-	nv_wr32(dev, NV10_PFB_TLIMIT(i), tile->limit);
-	nv_wr32(dev, NV10_PFB_TSIZE(i), tile->pitch);
-	nv_wr32(dev, NV10_PFB_TILE(i), tile->addr);
-	nv_wr32(dev, NV20_PFB_ZCOMP(i), tile->zcomp);
-}
-
-int
-nv20_fb_vram_init(struct drm_device *dev)
-{
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	u32 mem_size = nv_rd32(dev, 0x10020c);
-	u32 pbus1218 = nv_rd32(dev, 0x001218);
-
-	dev_priv->vram_size = mem_size & 0xff000000;
-	switch (pbus1218 & 0x00000300) {
-	case 0x00000000: dev_priv->vram_type = NV_MEM_TYPE_SDRAM; break;
-	case 0x00000100: dev_priv->vram_type = NV_MEM_TYPE_DDR1; break;
-	case 0x00000200: dev_priv->vram_type = NV_MEM_TYPE_GDDR3; break;
-	case 0x00000300: dev_priv->vram_type = NV_MEM_TYPE_GDDR2; break;
-	}
-
-	return 0;
-}
-
-int
-nv20_fb_init(struct drm_device *dev)
-{
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct nouveau_fb_engine *pfb = &dev_priv->engine.fb;
-	int i;
-
-	if (dev_priv->chipset >= 0x25)
-		drm_mm_init(&pfb->tag_heap, 0, 64 * 1024);
-	else
-		drm_mm_init(&pfb->tag_heap, 0, 32 * 1024);
-
-	/* Turn all the tiling regions off. */
-	pfb->num_tiles = NV10_PFB_TILE__SIZE;
-	for (i = 0; i < pfb->num_tiles; i++)
-		pfb->set_tile_region(dev, i);
-
-	return 0;
-}
-
-void
-nv20_fb_takedown(struct drm_device *dev)
-{
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct nouveau_fb_engine *pfb = &dev_priv->engine.fb;
-	int i;
-
-	for (i = 0; i < pfb->num_tiles; i++)
-		pfb->free_tile_region(dev, i);
-
-	drm_mm_takedown(&pfb->tag_heap);
-}
diff --git a/drivers/gpu/drm/nouveau/nv20_graph.c b/drivers/gpu/drm/nouveau/nv20_graph.c
deleted file mode 100644
index ffaab0ba76b9..000000000000
--- a/drivers/gpu/drm/nouveau/nv20_graph.c
+++ /dev/null
@@ -1,835 +0,0 @@
-#include <drm/drmP.h>
-#include "nouveau_drv.h"
-#include <drm/nouveau_drm.h>
-
-/*
- * NV20
- * -----
- * There are 3 families :
- * NV20 is 0x10de:0x020*
- * NV25/28 is 0x10de:0x025* / 0x10de:0x028*
- * NV2A is 0x10de:0x02A0
- *
- * NV30
- * -----
- * There are 3 families :
- * NV30/31 is 0x10de:0x030* / 0x10de:0x031*
- * NV34 is 0x10de:0x032*
- * NV35/36 is 0x10de:0x033* / 0x10de:0x034*
- *
- * Not seen in the wild, no dumps (probably NV35) :
- * NV37 is 0x10de:0x00fc, 0x10de:0x00fd
- * NV38 is 0x10de:0x0333, 0x10de:0x00fe
- *
- */
-
-struct nv20_graph_engine {
-	struct nouveau_exec_engine base;
-	struct nouveau_gpuobj *ctxtab;
-	void (*grctx_init)(struct nouveau_gpuobj *);
-	u32 grctx_size;
-	u32 grctx_user;
-};
-
-#define NV20_GRCTX_SIZE (3580*4)
-#define NV25_GRCTX_SIZE (3529*4)
-#define NV2A_GRCTX_SIZE (3500*4)
-
-#define NV30_31_GRCTX_SIZE (24392)
-#define NV34_GRCTX_SIZE    (18140)
-#define NV35_36_GRCTX_SIZE (22396)
-
-int
-nv20_graph_unload_context(struct drm_device *dev)
-{
-	struct nouveau_channel *chan;
-	struct nouveau_gpuobj *grctx;
-	u32 tmp;
-
-	chan = nv10_graph_channel(dev);
-	if (!chan)
-		return 0;
-	grctx = chan->engctx[NVOBJ_ENGINE_GR];
-
-	nv_wr32(dev, NV20_PGRAPH_CHANNEL_CTX_POINTER, grctx->pinst >> 4);
-	nv_wr32(dev, NV20_PGRAPH_CHANNEL_CTX_XFER,
-		     NV20_PGRAPH_CHANNEL_CTX_XFER_SAVE);
-
-	nouveau_wait_for_idle(dev);
-
-	nv_wr32(dev, NV10_PGRAPH_CTX_CONTROL, 0x10000000);
-	tmp  = nv_rd32(dev, NV10_PGRAPH_CTX_USER) & 0x00ffffff;
-	tmp |= 31 << 24;
-	nv_wr32(dev, NV10_PGRAPH_CTX_USER, tmp);
-	return 0;
-}
-
-static void
-nv20_graph_rdi(struct drm_device *dev)
-{
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	int i, writecount = 32;
-	uint32_t rdi_index = 0x2c80000;
-
-	if (dev_priv->chipset == 0x20) {
-		rdi_index = 0x3d0000;
-		writecount = 15;
-	}
-
-	nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, rdi_index);
-	for (i = 0; i < writecount; i++)
-		nv_wr32(dev, NV10_PGRAPH_RDI_DATA, 0);
-
-	nouveau_wait_for_idle(dev);
-}
-
-static void
-nv20_graph_context_init(struct nouveau_gpuobj *ctx)
-{
-	int i;
-
-	nv_wo32(ctx, 0x033c, 0xffff0000);
-	nv_wo32(ctx, 0x03a0, 0x0fff0000);
-	nv_wo32(ctx, 0x03a4, 0x0fff0000);
-	nv_wo32(ctx, 0x047c, 0x00000101);
-	nv_wo32(ctx, 0x0490, 0x00000111);
-	nv_wo32(ctx, 0x04a8, 0x44400000);
-	for (i = 0x04d4; i <= 0x04e0; i += 4)
-		nv_wo32(ctx, i, 0x00030303);
-	for (i = 0x04f4; i <= 0x0500; i += 4)
-		nv_wo32(ctx, i, 0x00080000);
-	for (i = 0x050c; i <= 0x0518; i += 4)
-		nv_wo32(ctx, i, 0x01012000);
-	for (i = 0x051c; i <= 0x0528; i += 4)
-		nv_wo32(ctx, i, 0x000105b8);
-	for (i = 0x052c; i <= 0x0538; i += 4)
-		nv_wo32(ctx, i, 0x00080008);
-	for (i = 0x055c; i <= 0x0598; i += 4)
-		nv_wo32(ctx, i, 0x07ff0000);
-	nv_wo32(ctx, 0x05a4, 0x4b7fffff);
-	nv_wo32(ctx, 0x05fc, 0x00000001);
-	nv_wo32(ctx, 0x0604, 0x00004000);
-	nv_wo32(ctx, 0x0610, 0x00000001);
-	nv_wo32(ctx, 0x0618, 0x00040000);
-	nv_wo32(ctx, 0x061c, 0x00010000);
-	for (i = 0x1c1c; i <= 0x248c; i += 16) {
-		nv_wo32(ctx, (i + 0), 0x10700ff9);
-		nv_wo32(ctx, (i + 4), 0x0436086c);
-		nv_wo32(ctx, (i + 8), 0x000c001b);
-	}
-	nv_wo32(ctx, 0x281c, 0x3f800000);
-	nv_wo32(ctx, 0x2830, 0x3f800000);
-	nv_wo32(ctx, 0x285c, 0x40000000);
-	nv_wo32(ctx, 0x2860, 0x3f800000);
-	nv_wo32(ctx, 0x2864, 0x3f000000);
-	nv_wo32(ctx, 0x286c, 0x40000000);
-	nv_wo32(ctx, 0x2870, 0x3f800000);
-	nv_wo32(ctx, 0x2878, 0xbf800000);
-	nv_wo32(ctx, 0x2880, 0xbf800000);
-	nv_wo32(ctx, 0x34a4, 0x000fe000);
-	nv_wo32(ctx, 0x3530, 0x000003f8);
-	nv_wo32(ctx, 0x3540, 0x002fe000);
-	for (i = 0x355c; i <= 0x3578; i += 4)
-		nv_wo32(ctx, i, 0x001c527c);
-}
-
-static void
-nv25_graph_context_init(struct nouveau_gpuobj *ctx)
-{
-	int i;
-
-	nv_wo32(ctx, 0x035c, 0xffff0000);
-	nv_wo32(ctx, 0x03c0, 0x0fff0000);
-	nv_wo32(ctx, 0x03c4, 0x0fff0000);
-	nv_wo32(ctx, 0x049c, 0x00000101);
-	nv_wo32(ctx, 0x04b0, 0x00000111);
-	nv_wo32(ctx, 0x04c8, 0x00000080);
-	nv_wo32(ctx, 0x04cc, 0xffff0000);
-	nv_wo32(ctx, 0x04d0, 0x00000001);
-	nv_wo32(ctx, 0x04e4, 0x44400000);
-	nv_wo32(ctx, 0x04fc, 0x4b800000);
-	for (i = 0x0510; i <= 0x051c; i += 4)
-		nv_wo32(ctx, i, 0x00030303);
-	for (i = 0x0530; i <= 0x053c; i += 4)
-		nv_wo32(ctx, i, 0x00080000);
-	for (i = 0x0548; i <= 0x0554; i += 4)
-		nv_wo32(ctx, i, 0x01012000);
-	for (i = 0x0558; i <= 0x0564; i += 4)
-		nv_wo32(ctx, i, 0x000105b8);
-	for (i = 0x0568; i <= 0x0574; i += 4)
-		nv_wo32(ctx, i, 0x00080008);
-	for (i = 0x0598; i <= 0x05d4; i += 4)
-		nv_wo32(ctx, i, 0x07ff0000);
-	nv_wo32(ctx, 0x05e0, 0x4b7fffff);
-	nv_wo32(ctx, 0x0620, 0x00000080);
-	nv_wo32(ctx, 0x0624, 0x30201000);
-	nv_wo32(ctx, 0x0628, 0x70605040);
-	nv_wo32(ctx, 0x062c, 0xb0a09080);
-	nv_wo32(ctx, 0x0630, 0xf0e0d0c0);
-	nv_wo32(ctx, 0x0664, 0x00000001);
-	nv_wo32(ctx, 0x066c, 0x00004000);
-	nv_wo32(ctx, 0x0678, 0x00000001);
-	nv_wo32(ctx, 0x0680, 0x00040000);
-	nv_wo32(ctx, 0x0684, 0x00010000);
-	for (i = 0x1b04; i <= 0x2374; i += 16) {
-		nv_wo32(ctx, (i + 0), 0x10700ff9);
-		nv_wo32(ctx, (i + 4), 0x0436086c);
-		nv_wo32(ctx, (i + 8), 0x000c001b);
-	}
-	nv_wo32(ctx, 0x2704, 0x3f800000);
-	nv_wo32(ctx, 0x2718, 0x3f800000);
-	nv_wo32(ctx, 0x2744, 0x40000000);
-	nv_wo32(ctx, 0x2748, 0x3f800000);
-	nv_wo32(ctx, 0x274c, 0x3f000000);
-	nv_wo32(ctx, 0x2754, 0x40000000);
-	nv_wo32(ctx, 0x2758, 0x3f800000);
-	nv_wo32(ctx, 0x2760, 0xbf800000);
-	nv_wo32(ctx, 0x2768, 0xbf800000);
-	nv_wo32(ctx, 0x308c, 0x000fe000);
-	nv_wo32(ctx, 0x3108, 0x000003f8);
-	nv_wo32(ctx, 0x3468, 0x002fe000);
-	for (i = 0x3484; i <= 0x34a0; i += 4)
-		nv_wo32(ctx, i, 0x001c527c);
-}
-
-static void
-nv2a_graph_context_init(struct nouveau_gpuobj *ctx)
-{
-	int i;
-
-	nv_wo32(ctx, 0x033c, 0xffff0000);
-	nv_wo32(ctx, 0x03a0, 0x0fff0000);
-	nv_wo32(ctx, 0x03a4, 0x0fff0000);
-	nv_wo32(ctx, 0x047c, 0x00000101);
-	nv_wo32(ctx, 0x0490, 0x00000111);
-	nv_wo32(ctx, 0x04a8, 0x44400000);
-	for (i = 0x04d4; i <= 0x04e0; i += 4)
-		nv_wo32(ctx, i, 0x00030303);
-	for (i = 0x04f4; i <= 0x0500; i += 4)
-		nv_wo32(ctx, i, 0x00080000);
-	for (i = 0x050c; i <= 0x0518; i += 4)
-		nv_wo32(ctx, i, 0x01012000);
-	for (i = 0x051c; i <= 0x0528; i += 4)
-		nv_wo32(ctx, i, 0x000105b8);
-	for (i = 0x052c; i <= 0x0538; i += 4)
-		nv_wo32(ctx, i, 0x00080008);
-	for (i = 0x055c; i <= 0x0598; i += 4)
-		nv_wo32(ctx, i, 0x07ff0000);
-	nv_wo32(ctx, 0x05a4, 0x4b7fffff);
-	nv_wo32(ctx, 0x05fc, 0x00000001);
-	nv_wo32(ctx, 0x0604, 0x00004000);
-	nv_wo32(ctx, 0x0610, 0x00000001);
-	nv_wo32(ctx, 0x0618, 0x00040000);
-	nv_wo32(ctx, 0x061c, 0x00010000);
-	for (i = 0x1a9c; i <= 0x22fc; i += 16) { /*XXX: check!! */
-		nv_wo32(ctx, (i + 0), 0x10700ff9);
-		nv_wo32(ctx, (i + 4), 0x0436086c);
-		nv_wo32(ctx, (i + 8), 0x000c001b);
-	}
-	nv_wo32(ctx, 0x269c, 0x3f800000);
-	nv_wo32(ctx, 0x26b0, 0x3f800000);
-	nv_wo32(ctx, 0x26dc, 0x40000000);
-	nv_wo32(ctx, 0x26e0, 0x3f800000);
-	nv_wo32(ctx, 0x26e4, 0x3f000000);
-	nv_wo32(ctx, 0x26ec, 0x40000000);
-	nv_wo32(ctx, 0x26f0, 0x3f800000);
-	nv_wo32(ctx, 0x26f8, 0xbf800000);
-	nv_wo32(ctx, 0x2700, 0xbf800000);
-	nv_wo32(ctx, 0x3024, 0x000fe000);
-	nv_wo32(ctx, 0x30a0, 0x000003f8);
-	nv_wo32(ctx, 0x33fc, 0x002fe000);
-	for (i = 0x341c; i <= 0x3438; i += 4)
-		nv_wo32(ctx, i, 0x001c527c);
-}
-
-static void
-nv30_31_graph_context_init(struct nouveau_gpuobj *ctx)
-{
-	int i;
-
-	nv_wo32(ctx, 0x0410, 0x00000101);
-	nv_wo32(ctx, 0x0424, 0x00000111);
-	nv_wo32(ctx, 0x0428, 0x00000060);
-	nv_wo32(ctx, 0x0444, 0x00000080);
-	nv_wo32(ctx, 0x0448, 0xffff0000);
-	nv_wo32(ctx, 0x044c, 0x00000001);
-	nv_wo32(ctx, 0x0460, 0x44400000);
-	nv_wo32(ctx, 0x048c, 0xffff0000);
-	for (i = 0x04e0; i < 0x04e8; i += 4)
-		nv_wo32(ctx, i, 0x0fff0000);
-	nv_wo32(ctx, 0x04ec, 0x00011100);
-	for (i = 0x0508; i < 0x0548; i += 4)
-		nv_wo32(ctx, i, 0x07ff0000);
-	nv_wo32(ctx, 0x0550, 0x4b7fffff);
-	nv_wo32(ctx, 0x058c, 0x00000080);
-	nv_wo32(ctx, 0x0590, 0x30201000);
-	nv_wo32(ctx, 0x0594, 0x70605040);
-	nv_wo32(ctx, 0x0598, 0xb8a89888);
-	nv_wo32(ctx, 0x059c, 0xf8e8d8c8);
-	nv_wo32(ctx, 0x05b0, 0xb0000000);
-	for (i = 0x0600; i < 0x0640; i += 4)
-		nv_wo32(ctx, i, 0x00010588);
-	for (i = 0x0640; i < 0x0680; i += 4)
-		nv_wo32(ctx, i, 0x00030303);
-	for (i = 0x06c0; i < 0x0700; i += 4)
-		nv_wo32(ctx, i, 0x0008aae4);
-	for (i = 0x0700; i < 0x0740; i += 4)
-		nv_wo32(ctx, i, 0x01012000);
-	for (i = 0x0740; i < 0x0780; i += 4)
-		nv_wo32(ctx, i, 0x00080008);
-	nv_wo32(ctx, 0x085c, 0x00040000);
-	nv_wo32(ctx, 0x0860, 0x00010000);
-	for (i = 0x0864; i < 0x0874; i += 4)
-		nv_wo32(ctx, i, 0x00040004);
-	for (i = 0x1f18; i <= 0x3088 ; i += 16) {
-		nv_wo32(ctx, i + 0, 0x10700ff9);
-		nv_wo32(ctx, i + 1, 0x0436086c);
-		nv_wo32(ctx, i + 2, 0x000c001b);
-	}
-	for (i = 0x30b8; i < 0x30c8; i += 4)
-		nv_wo32(ctx, i, 0x0000ffff);
-	nv_wo32(ctx, 0x344c, 0x3f800000);
-	nv_wo32(ctx, 0x3808, 0x3f800000);
-	nv_wo32(ctx, 0x381c, 0x3f800000);
-	nv_wo32(ctx, 0x3848, 0x40000000);
-	nv_wo32(ctx, 0x384c, 0x3f800000);
-	nv_wo32(ctx, 0x3850, 0x3f000000);
-	nv_wo32(ctx, 0x3858, 0x40000000);
-	nv_wo32(ctx, 0x385c, 0x3f800000);
-	nv_wo32(ctx, 0x3864, 0xbf800000);
-	nv_wo32(ctx, 0x386c, 0xbf800000);
-}
-
-static void
-nv34_graph_context_init(struct nouveau_gpuobj *ctx)
-{
-	int i;
-
-	nv_wo32(ctx, 0x040c, 0x01000101);
-	nv_wo32(ctx, 0x0420, 0x00000111);
-	nv_wo32(ctx, 0x0424, 0x00000060);
-	nv_wo32(ctx, 0x0440, 0x00000080);
-	nv_wo32(ctx, 0x0444, 0xffff0000);
-	nv_wo32(ctx, 0x0448, 0x00000001);
-	nv_wo32(ctx, 0x045c, 0x44400000);
-	nv_wo32(ctx, 0x0480, 0xffff0000);
-	for (i = 0x04d4; i < 0x04dc; i += 4)
-		nv_wo32(ctx, i, 0x0fff0000);
-	nv_wo32(ctx, 0x04e0, 0x00011100);
-	for (i = 0x04fc; i < 0x053c; i += 4)
-		nv_wo32(ctx, i, 0x07ff0000);
-	nv_wo32(ctx, 0x0544, 0x4b7fffff);
-	nv_wo32(ctx, 0x057c, 0x00000080);
-	nv_wo32(ctx, 0x0580, 0x30201000);
-	nv_wo32(ctx, 0x0584, 0x70605040);
-	nv_wo32(ctx, 0x0588, 0xb8a89888);
-	nv_wo32(ctx, 0x058c, 0xf8e8d8c8);
-	nv_wo32(ctx, 0x05a0, 0xb0000000);
-	for (i = 0x05f0; i < 0x0630; i += 4)
-		nv_wo32(ctx, i, 0x00010588);
-	for (i = 0x0630; i < 0x0670; i += 4)
-		nv_wo32(ctx, i, 0x00030303);
-	for (i = 0x06b0; i < 0x06f0; i += 4)
-		nv_wo32(ctx, i, 0x0008aae4);
-	for (i = 0x06f0; i < 0x0730; i += 4)
-		nv_wo32(ctx, i, 0x01012000);
-	for (i = 0x0730; i < 0x0770; i += 4)
-		nv_wo32(ctx, i, 0x00080008);
-	nv_wo32(ctx, 0x0850, 0x00040000);
-	nv_wo32(ctx, 0x0854, 0x00010000);
-	for (i = 0x0858; i < 0x0868; i += 4)
-		nv_wo32(ctx, i, 0x00040004);
-	for (i = 0x15ac; i <= 0x271c ; i += 16) {
-		nv_wo32(ctx, i + 0, 0x10700ff9);
-		nv_wo32(ctx, i + 1, 0x0436086c);
-		nv_wo32(ctx, i + 2, 0x000c001b);
-	}
-	for (i = 0x274c; i < 0x275c; i += 4)
-		nv_wo32(ctx, i, 0x0000ffff);
-	nv_wo32(ctx, 0x2ae0, 0x3f800000);
-	nv_wo32(ctx, 0x2e9c, 0x3f800000);
-	nv_wo32(ctx, 0x2eb0, 0x3f800000);
-	nv_wo32(ctx, 0x2edc, 0x40000000);
-	nv_wo32(ctx, 0x2ee0, 0x3f800000);
-	nv_wo32(ctx, 0x2ee4, 0x3f000000);
-	nv_wo32(ctx, 0x2eec, 0x40000000);
-	nv_wo32(ctx, 0x2ef0, 0x3f800000);
-	nv_wo32(ctx, 0x2ef8, 0xbf800000);
-	nv_wo32(ctx, 0x2f00, 0xbf800000);
-}
-
-static void
-nv35_36_graph_context_init(struct nouveau_gpuobj *ctx)
-{
-	int i;
-
-	nv_wo32(ctx, 0x040c, 0x00000101);
-	nv_wo32(ctx, 0x0420, 0x00000111);
-	nv_wo32(ctx, 0x0424, 0x00000060);
-	nv_wo32(ctx, 0x0440, 0x00000080);
-	nv_wo32(ctx, 0x0444, 0xffff0000);
-	nv_wo32(ctx, 0x0448, 0x00000001);
-	nv_wo32(ctx, 0x045c, 0x44400000);
-	nv_wo32(ctx, 0x0488, 0xffff0000);
-	for (i = 0x04dc; i < 0x04e4; i += 4)
-		nv_wo32(ctx, i, 0x0fff0000);
-	nv_wo32(ctx, 0x04e8, 0x00011100);
-	for (i = 0x0504; i < 0x0544; i += 4)
-		nv_wo32(ctx, i, 0x07ff0000);
-	nv_wo32(ctx, 0x054c, 0x4b7fffff);
-	nv_wo32(ctx, 0x0588, 0x00000080);
-	nv_wo32(ctx, 0x058c, 0x30201000);
-	nv_wo32(ctx, 0x0590, 0x70605040);
-	nv_wo32(ctx, 0x0594, 0xb8a89888);
-	nv_wo32(ctx, 0x0598, 0xf8e8d8c8);
-	nv_wo32(ctx, 0x05ac, 0xb0000000);
-	for (i = 0x0604; i < 0x0644; i += 4)
-		nv_wo32(ctx, i, 0x00010588);
-	for (i = 0x0644; i < 0x0684; i += 4)
-		nv_wo32(ctx, i, 0x00030303);
-	for (i = 0x06c4; i < 0x0704; i += 4)
-		nv_wo32(ctx, i, 0x0008aae4);
-	for (i = 0x0704; i < 0x0744; i += 4)
-		nv_wo32(ctx, i, 0x01012000);
-	for (i = 0x0744; i < 0x0784; i += 4)
-		nv_wo32(ctx, i, 0x00080008);
-	nv_wo32(ctx, 0x0860, 0x00040000);
-	nv_wo32(ctx, 0x0864, 0x00010000);
-	for (i = 0x0868; i < 0x0878; i += 4)
-		nv_wo32(ctx, i, 0x00040004);
-	for (i = 0x1f1c; i <= 0x308c ; i += 16) {
-		nv_wo32(ctx, i + 0, 0x10700ff9);
-		nv_wo32(ctx, i + 4, 0x0436086c);
-		nv_wo32(ctx, i + 8, 0x000c001b);
-	}
-	for (i = 0x30bc; i < 0x30cc; i += 4)
-		nv_wo32(ctx, i, 0x0000ffff);
-	nv_wo32(ctx, 0x3450, 0x3f800000);
-	nv_wo32(ctx, 0x380c, 0x3f800000);
-	nv_wo32(ctx, 0x3820, 0x3f800000);
-	nv_wo32(ctx, 0x384c, 0x40000000);
-	nv_wo32(ctx, 0x3850, 0x3f800000);
-	nv_wo32(ctx, 0x3854, 0x3f000000);
-	nv_wo32(ctx, 0x385c, 0x40000000);
-	nv_wo32(ctx, 0x3860, 0x3f800000);
-	nv_wo32(ctx, 0x3868, 0xbf800000);
-	nv_wo32(ctx, 0x3870, 0xbf800000);
-}
-
-int
-nv20_graph_context_new(struct nouveau_channel *chan, int engine)
-{
-	struct nv20_graph_engine *pgraph = nv_engine(chan->dev, engine);
-	struct nouveau_gpuobj *grctx = NULL;
-	struct drm_device *dev = chan->dev;
-	int ret;
-
-	ret = nouveau_gpuobj_new(dev, NULL, pgraph->grctx_size, 16,
-				 NVOBJ_FLAG_ZERO_ALLOC, &grctx);
-	if (ret)
-		return ret;
-
-	/* Initialise default context values */
-	pgraph->grctx_init(grctx);
-
-	/* nv20: nv_wo32(dev, chan->ramin_grctx->gpuobj, 10, chan->id<<24); */
-	/* CTX_USER */
-	nv_wo32(grctx, pgraph->grctx_user, (chan->id << 24) | 0x1);
-
-	nv_wo32(pgraph->ctxtab, chan->id * 4, grctx->pinst >> 4);
-	chan->engctx[engine] = grctx;
-	return 0;
-}
-
-void
-nv20_graph_context_del(struct nouveau_channel *chan, int engine)
-{
-	struct nv20_graph_engine *pgraph = nv_engine(chan->dev, engine);
-	struct nouveau_gpuobj *grctx = chan->engctx[engine];
-	struct drm_device *dev = chan->dev;
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	unsigned long flags;
-
-	spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
-	nv_mask(dev, NV04_PGRAPH_FIFO, 0x00000001, 0x00000000);
-
-	/* Unload the context if it's the currently active one */
-	if (nv10_graph_channel(dev) == chan)
-		nv20_graph_unload_context(dev);
-
-	nv_mask(dev, NV04_PGRAPH_FIFO, 0x00000001, 0x00000001);
-	spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
-
-	/* Free the context resources */
-	nv_wo32(pgraph->ctxtab, chan->id * 4, 0);
-
-	nouveau_gpuobj_ref(NULL, &grctx);
-	chan->engctx[engine] = NULL;
-}
-
-static void
-nv20_graph_set_tile_region(struct drm_device *dev, int i)
-{
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i];
-
-	nv_wr32(dev, NV20_PGRAPH_TLIMIT(i), tile->limit);
-	nv_wr32(dev, NV20_PGRAPH_TSIZE(i), tile->pitch);
-	nv_wr32(dev, NV20_PGRAPH_TILE(i), tile->addr);
-
-	nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00EA0030 + 4 * i);
-	nv_wr32(dev, NV10_PGRAPH_RDI_DATA, tile->limit);
-	nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00EA0050 + 4 * i);
-	nv_wr32(dev, NV10_PGRAPH_RDI_DATA, tile->pitch);
-	nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00EA0010 + 4 * i);
-	nv_wr32(dev, NV10_PGRAPH_RDI_DATA, tile->addr);
-
-	if (dev_priv->card_type == NV_20) {
-		nv_wr32(dev, NV20_PGRAPH_ZCOMP(i), tile->zcomp);
-		nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00ea0090 + 4 * i);
-		nv_wr32(dev, NV10_PGRAPH_RDI_DATA, tile->zcomp);
-	}
-}
-
-int
-nv20_graph_init(struct drm_device *dev, int engine)
-{
-	struct nv20_graph_engine *pgraph = nv_engine(dev, engine);
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	uint32_t tmp, vramsz;
-	int i;
-
-	nv_wr32(dev, NV03_PMC_ENABLE,
-		nv_rd32(dev, NV03_PMC_ENABLE) & ~NV_PMC_ENABLE_PGRAPH);
-	nv_wr32(dev, NV03_PMC_ENABLE,
-		nv_rd32(dev, NV03_PMC_ENABLE) |  NV_PMC_ENABLE_PGRAPH);
-
-	nv_wr32(dev, NV20_PGRAPH_CHANNEL_CTX_TABLE, pgraph->ctxtab->pinst >> 4);
-
-	nv20_graph_rdi(dev);
-
-	nv_wr32(dev, NV03_PGRAPH_INTR   , 0xFFFFFFFF);
-	nv_wr32(dev, NV03_PGRAPH_INTR_EN, 0xFFFFFFFF);
-
-	nv_wr32(dev, NV04_PGRAPH_DEBUG_0, 0xFFFFFFFF);
-	nv_wr32(dev, NV04_PGRAPH_DEBUG_0, 0x00000000);
-	nv_wr32(dev, NV04_PGRAPH_DEBUG_1, 0x00118700);
-	nv_wr32(dev, NV04_PGRAPH_DEBUG_3, 0xF3CE0475); /* 0x4 = auto ctx switch */
-	nv_wr32(dev, NV10_PGRAPH_DEBUG_4, 0x00000000);
-	nv_wr32(dev, 0x40009C           , 0x00000040);
-
-	if (dev_priv->chipset >= 0x25) {
-		nv_wr32(dev, 0x400890, 0x00a8cfff);
-		nv_wr32(dev, 0x400610, 0x304B1FB6);
-		nv_wr32(dev, 0x400B80, 0x1cbd3883);
-		nv_wr32(dev, 0x400B84, 0x44000000);
-		nv_wr32(dev, 0x400098, 0x40000080);
-		nv_wr32(dev, 0x400B88, 0x000000ff);
-
-	} else {
-		nv_wr32(dev, 0x400880, 0x0008c7df);
-		nv_wr32(dev, 0x400094, 0x00000005);
-		nv_wr32(dev, 0x400B80, 0x45eae20e);
-		nv_wr32(dev, 0x400B84, 0x24000000);
-		nv_wr32(dev, 0x400098, 0x00000040);
-		nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00E00038);
-		nv_wr32(dev, NV10_PGRAPH_RDI_DATA , 0x00000030);
-		nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00E10038);
-		nv_wr32(dev, NV10_PGRAPH_RDI_DATA , 0x00000030);
-	}
-
-	/* Turn all the tiling regions off. */
-	for (i = 0; i < NV10_PFB_TILE__SIZE; i++)
-		nv20_graph_set_tile_region(dev, i);
-
-	nv_wr32(dev, 0x4009a0, nv_rd32(dev, 0x100324));
-	nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00EA000C);
-	nv_wr32(dev, NV10_PGRAPH_RDI_DATA, nv_rd32(dev, 0x100324));
-
-	nv_wr32(dev, NV10_PGRAPH_CTX_CONTROL, 0x10000100);
-	nv_wr32(dev, NV10_PGRAPH_STATE      , 0xFFFFFFFF);
-
-	tmp = nv_rd32(dev, NV10_PGRAPH_SURFACE) & 0x0007ff00;
-	nv_wr32(dev, NV10_PGRAPH_SURFACE, tmp);
-	tmp = nv_rd32(dev, NV10_PGRAPH_SURFACE) | 0x00020100;
-	nv_wr32(dev, NV10_PGRAPH_SURFACE, tmp);
-
-	/* begin RAM config */
-	vramsz = pci_resource_len(dev->pdev, 0) - 1;
-	nv_wr32(dev, 0x4009A4, nv_rd32(dev, NV04_PFB_CFG0));
-	nv_wr32(dev, 0x4009A8, nv_rd32(dev, NV04_PFB_CFG1));
-	nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00EA0000);
-	nv_wr32(dev, NV10_PGRAPH_RDI_DATA , nv_rd32(dev, NV04_PFB_CFG0));
-	nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00EA0004);
-	nv_wr32(dev, NV10_PGRAPH_RDI_DATA , nv_rd32(dev, NV04_PFB_CFG1));
-	nv_wr32(dev, 0x400820, 0);
-	nv_wr32(dev, 0x400824, 0);
-	nv_wr32(dev, 0x400864, vramsz - 1);
-	nv_wr32(dev, 0x400868, vramsz - 1);
-
-	/* interesting.. the below overwrites some of the tile setup above.. */
-	nv_wr32(dev, 0x400B20, 0x00000000);
-	nv_wr32(dev, 0x400B04, 0xFFFFFFFF);
-
-	nv_wr32(dev, NV03_PGRAPH_ABS_UCLIP_XMIN, 0);
-	nv_wr32(dev, NV03_PGRAPH_ABS_UCLIP_YMIN, 0);
-	nv_wr32(dev, NV03_PGRAPH_ABS_UCLIP_XMAX, 0x7fff);
-	nv_wr32(dev, NV03_PGRAPH_ABS_UCLIP_YMAX, 0x7fff);
-
-	return 0;
-}
-
-int
-nv30_graph_init(struct drm_device *dev, int engine)
-{
-	struct nv20_graph_engine *pgraph = nv_engine(dev, engine);
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	int i;
-
-	nv_wr32(dev, NV03_PMC_ENABLE,
-		nv_rd32(dev, NV03_PMC_ENABLE) & ~NV_PMC_ENABLE_PGRAPH);
-	nv_wr32(dev, NV03_PMC_ENABLE,
-		nv_rd32(dev, NV03_PMC_ENABLE) |  NV_PMC_ENABLE_PGRAPH);
-
-	nv_wr32(dev, NV20_PGRAPH_CHANNEL_CTX_TABLE, pgraph->ctxtab->pinst >> 4);
-
-	nv_wr32(dev, NV03_PGRAPH_INTR   , 0xFFFFFFFF);
-	nv_wr32(dev, NV03_PGRAPH_INTR_EN, 0xFFFFFFFF);
-
-	nv_wr32(dev, NV04_PGRAPH_DEBUG_0, 0xFFFFFFFF);
-	nv_wr32(dev, NV04_PGRAPH_DEBUG_0, 0x00000000);
-	nv_wr32(dev, NV04_PGRAPH_DEBUG_1, 0x401287c0);
-	nv_wr32(dev, 0x400890, 0x01b463ff);
-	nv_wr32(dev, NV04_PGRAPH_DEBUG_3, 0xf2de0475);
-	nv_wr32(dev, NV10_PGRAPH_DEBUG_4, 0x00008000);
-	nv_wr32(dev, NV04_PGRAPH_LIMIT_VIOL_PIX, 0xf04bdff6);
-	nv_wr32(dev, 0x400B80, 0x1003d888);
-	nv_wr32(dev, 0x400B84, 0x0c000000);
-	nv_wr32(dev, 0x400098, 0x00000000);
-	nv_wr32(dev, 0x40009C, 0x0005ad00);
-	nv_wr32(dev, 0x400B88, 0x62ff00ff); /* suspiciously like PGRAPH_DEBUG_2 */
-	nv_wr32(dev, 0x4000a0, 0x00000000);
-	nv_wr32(dev, 0x4000a4, 0x00000008);
-	nv_wr32(dev, 0x4008a8, 0xb784a400);
-	nv_wr32(dev, 0x400ba0, 0x002f8685);
-	nv_wr32(dev, 0x400ba4, 0x00231f3f);
-	nv_wr32(dev, 0x4008a4, 0x40000020);
-
-	if (dev_priv->chipset == 0x34) {
-		nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00EA0004);
-		nv_wr32(dev, NV10_PGRAPH_RDI_DATA , 0x00200201);
-		nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00EA0008);
-		nv_wr32(dev, NV10_PGRAPH_RDI_DATA , 0x00000008);
-		nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00EA0000);
-		nv_wr32(dev, NV10_PGRAPH_RDI_DATA , 0x00000032);
-		nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00E00004);
-		nv_wr32(dev, NV10_PGRAPH_RDI_DATA , 0x00000002);
-	}
-
-	nv_wr32(dev, 0x4000c0, 0x00000016);
-
-	/* Turn all the tiling regions off. */
-	for (i = 0; i < NV10_PFB_TILE__SIZE; i++)
-		nv20_graph_set_tile_region(dev, i);
-
-	nv_wr32(dev, NV10_PGRAPH_CTX_CONTROL, 0x10000100);
-	nv_wr32(dev, NV10_PGRAPH_STATE      , 0xFFFFFFFF);
-	nv_wr32(dev, 0x0040075c             , 0x00000001);
-
-	/* begin RAM config */
-	/* vramsz = pci_resource_len(dev->pdev, 0) - 1; */
-	nv_wr32(dev, 0x4009A4, nv_rd32(dev, NV04_PFB_CFG0));
-	nv_wr32(dev, 0x4009A8, nv_rd32(dev, NV04_PFB_CFG1));
-	if (dev_priv->chipset != 0x34) {
-		nv_wr32(dev, 0x400750, 0x00EA0000);
-		nv_wr32(dev, 0x400754, nv_rd32(dev, NV04_PFB_CFG0));
-		nv_wr32(dev, 0x400750, 0x00EA0004);
-		nv_wr32(dev, 0x400754, nv_rd32(dev, NV04_PFB_CFG1));
-	}
-
-	return 0;
-}
-
-int
-nv20_graph_fini(struct drm_device *dev, int engine, bool suspend)
-{
-	nv_mask(dev, NV04_PGRAPH_FIFO, 0x00000001, 0x00000000);
-	if (!nv_wait(dev, NV04_PGRAPH_STATUS, ~0, 0) && suspend) {
-		nv_mask(dev, NV04_PGRAPH_FIFO, 0x00000001, 0x00000001);
-		return -EBUSY;
-	}
-	nv20_graph_unload_context(dev);
-	nv_wr32(dev, NV03_PGRAPH_INTR_EN, 0x00000000);
-	return 0;
-}
-
-static void
-nv20_graph_isr(struct drm_device *dev)
-{
-	u32 stat;
-
-	while ((stat = nv_rd32(dev, NV03_PGRAPH_INTR))) {
-		u32 nsource = nv_rd32(dev, NV03_PGRAPH_NSOURCE);
-		u32 nstatus = nv_rd32(dev, NV03_PGRAPH_NSTATUS);
-		u32 addr = nv_rd32(dev, NV04_PGRAPH_TRAPPED_ADDR);
-		u32 chid = (addr & 0x01f00000) >> 20;
-		u32 subc = (addr & 0x00070000) >> 16;
-		u32 mthd = (addr & 0x00001ffc);
-		u32 data = nv_rd32(dev, NV04_PGRAPH_TRAPPED_DATA);
-		u32 class = nv_rd32(dev, 0x400160 + subc * 4) & 0xfff;
-		u32 show = stat;
-
-		if (stat & NV_PGRAPH_INTR_ERROR) {
-			if (nsource & NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD) {
-				if (!nouveau_gpuobj_mthd_call2(dev, chid, class, mthd, data))
-					show &= ~NV_PGRAPH_INTR_ERROR;
-			}
-		}
-
-		nv_wr32(dev, NV03_PGRAPH_INTR, stat);
-		nv_wr32(dev, NV04_PGRAPH_FIFO, 0x00000001);
-
-		if (show && nouveau_ratelimit()) {
-			NV_INFO(dev, "PGRAPH -");
-			nouveau_bitfield_print(nv10_graph_intr, show);
-			printk(" nsource:");
-			nouveau_bitfield_print(nv04_graph_nsource, nsource);
-			printk(" nstatus:");
-			nouveau_bitfield_print(nv10_graph_nstatus, nstatus);
-			printk("\n");
-			NV_INFO(dev, "PGRAPH - ch %d/%d class 0x%04x "
-				     "mthd 0x%04x data 0x%08x\n",
-				chid, subc, class, mthd, data);
-		}
-	}
-}
-
-static void
-nv20_graph_destroy(struct drm_device *dev, int engine)
-{
-	struct nv20_graph_engine *pgraph = nv_engine(dev, engine);
-
-	nouveau_irq_unregister(dev, 12);
-	nouveau_gpuobj_ref(NULL, &pgraph->ctxtab);
-
-	NVOBJ_ENGINE_DEL(dev, GR);
-	kfree(pgraph);
-}
-
-int
-nv20_graph_create(struct drm_device *dev)
-{
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct nv20_graph_engine *pgraph;
-	int ret;
-
-	pgraph = kzalloc(sizeof(*pgraph), GFP_KERNEL);
-	if (!pgraph)
-		return -ENOMEM;
-
-	pgraph->base.destroy = nv20_graph_destroy;
-	pgraph->base.fini = nv20_graph_fini;
-	pgraph->base.context_new = nv20_graph_context_new;
-	pgraph->base.context_del = nv20_graph_context_del;
-	pgraph->base.object_new = nv04_graph_object_new;
-	pgraph->base.set_tile_region = nv20_graph_set_tile_region;
-
-	pgraph->grctx_user = 0x0028;
-	if (dev_priv->card_type == NV_20) {
-		pgraph->base.init = nv20_graph_init;
-		switch (dev_priv->chipset) {
-		case 0x20:
-			pgraph->grctx_init = nv20_graph_context_init;
-			pgraph->grctx_size = NV20_GRCTX_SIZE;
-			pgraph->grctx_user = 0x0000;
-			break;
-		case 0x25:
-		case 0x28:
-			pgraph->grctx_init = nv25_graph_context_init;
-			pgraph->grctx_size = NV25_GRCTX_SIZE;
-			break;
-		case 0x2a:
-			pgraph->grctx_init = nv2a_graph_context_init;
-			pgraph->grctx_size = NV2A_GRCTX_SIZE;
-			pgraph->grctx_user = 0x0000;
-			break;
-		default:
-			NV_ERROR(dev, "PGRAPH: unknown chipset\n");
-			kfree(pgraph);
-			return 0;
-		}
-	} else {
-		pgraph->base.init = nv30_graph_init;
-		switch (dev_priv->chipset) {
-		case 0x30:
-		case 0x31:
-			pgraph->grctx_init = nv30_31_graph_context_init;
-			pgraph->grctx_size = NV30_31_GRCTX_SIZE;
-			break;
-		case 0x34:
-			pgraph->grctx_init = nv34_graph_context_init;
-			pgraph->grctx_size = NV34_GRCTX_SIZE;
-			break;
-		case 0x35:
-		case 0x36:
-			pgraph->grctx_init = nv35_36_graph_context_init;
-			pgraph->grctx_size = NV35_36_GRCTX_SIZE;
-			break;
-		default:
-			NV_ERROR(dev, "PGRAPH: unknown chipset\n");
-			kfree(pgraph);
-			return 0;
-		}
-	}
-
-	/* Create Context Pointer Table */
-	ret = nouveau_gpuobj_new(dev, NULL, 32 * 4, 16, NVOBJ_FLAG_ZERO_ALLOC,
-				 &pgraph->ctxtab);
-	if (ret) {
-		kfree(pgraph);
-		return ret;
-	}
-
-	NVOBJ_ENGINE_ADD(dev, GR, &pgraph->base);
-	nouveau_irq_register(dev, 12, nv20_graph_isr);
-
-	NVOBJ_CLASS(dev, 0x0030, GR); /* null */
-	NVOBJ_CLASS(dev, 0x0039, GR); /* m2mf */
-	NVOBJ_CLASS(dev, 0x004a, GR); /* gdirect */
-	NVOBJ_CLASS(dev, 0x009f, GR); /* imageblit (nv12) */
-	NVOBJ_CLASS(dev, 0x008a, GR); /* ifc */
-	NVOBJ_CLASS(dev, 0x0089, GR); /* sifm */
-	NVOBJ_CLASS(dev, 0x0062, GR); /* surf2d */
-	NVOBJ_CLASS(dev, 0x0043, GR); /* rop */
-	NVOBJ_CLASS(dev, 0x0012, GR); /* beta1 */
-	NVOBJ_CLASS(dev, 0x0072, GR); /* beta4 */
-	NVOBJ_CLASS(dev, 0x0019, GR); /* cliprect */
-	NVOBJ_CLASS(dev, 0x0044, GR); /* pattern */
-	if (dev_priv->card_type == NV_20) {
-		NVOBJ_CLASS(dev, 0x009e, GR); /* swzsurf */
-		NVOBJ_CLASS(dev, 0x0096, GR); /* celcius */
-
-		/* kelvin */
-		if (dev_priv->chipset < 0x25)
-			NVOBJ_CLASS(dev, 0x0097, GR);
-		else
-			NVOBJ_CLASS(dev, 0x0597, GR);
-	} else {
-		NVOBJ_CLASS(dev, 0x038a, GR); /* ifc (nv30) */
-		NVOBJ_CLASS(dev, 0x0389, GR); /* sifm (nv30) */
-		NVOBJ_CLASS(dev, 0x0362, GR); /* surf2d (nv30) */
-		NVOBJ_CLASS(dev, 0x039e, GR); /* swzsurf */
-
-		/* rankine */
-		if (0x00000003 & (1 << (dev_priv->chipset & 0x0f)))
-			NVOBJ_CLASS(dev, 0x0397, GR);
-		else
-		if (0x00000010 & (1 << (dev_priv->chipset & 0x0f)))
-			NVOBJ_CLASS(dev, 0x0697, GR);
-		else
-		if (0x000001e0 & (1 << (dev_priv->chipset & 0x0f)))
-			NVOBJ_CLASS(dev, 0x0497, GR);
-	}
-
-	return 0;
-}
diff --git a/drivers/gpu/drm/nouveau/nv30_fb.c b/drivers/gpu/drm/nouveau/nv30_fb.c
deleted file mode 100644
index 9cc4de8de5ca..000000000000
--- a/drivers/gpu/drm/nouveau/nv30_fb.c
+++ /dev/null
@@ -1,115 +0,0 @@
-/*
- * Copyright (C) 2010 Francisco Jerez.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining
- * a copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sublicense, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial
- * portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
- * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
- * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
- * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
- * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-
-#include <drm/drmP.h>
-#include "nouveau_drv.h"
-#include <drm/nouveau_drm.h>
-
-void
-nv30_fb_init_tile_region(struct drm_device *dev, int i, uint32_t addr,
-			 uint32_t size, uint32_t pitch, uint32_t flags)
-{
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i];
-
-	tile->addr = addr | 1;
-	tile->limit = max(1u, addr + size) - 1;
-	tile->pitch = pitch;
-}
-
-void
-nv30_fb_free_tile_region(struct drm_device *dev, int i)
-{
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i];
-
-	tile->addr = tile->limit = tile->pitch = 0;
-}
-
-static int
-calc_bias(struct drm_device *dev, int k, int i, int j)
-{
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	int b = (dev_priv->chipset > 0x30 ?
-		 nv_rd32(dev, 0x122c + 0x10 * k + 0x4 * j) >> (4 * (i ^ 1)) :
-		 0) & 0xf;
-
-	return 2 * (b & 0x8 ? b - 0x10 : b);
-}
-
-static int
-calc_ref(struct drm_device *dev, int l, int k, int i)
-{
-	int j, x = 0;
-
-	for (j = 0; j < 4; j++) {
-		int m = (l >> (8 * i) & 0xff) + calc_bias(dev, k, i, j);
-
-		x |= (0x80 | clamp(m, 0, 0x1f)) << (8 * j);
-	}
-
-	return x;
-}
-
-int
-nv30_fb_init(struct drm_device *dev)
-{
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct nouveau_fb_engine *pfb = &dev_priv->engine.fb;
-	int i, j;
-
-	pfb->num_tiles = NV10_PFB_TILE__SIZE;
-
-	/* Turn all the tiling regions off. */
-	for (i = 0; i < pfb->num_tiles; i++)
-		pfb->set_tile_region(dev, i);
-
-	/* Init the memory timing regs at 0x10037c/0x1003ac */
-	if (dev_priv->chipset == 0x30 ||
-	    dev_priv->chipset == 0x31 ||
-	    dev_priv->chipset == 0x35) {
-		/* Related to ROP count */
-		int n = (dev_priv->chipset == 0x31 ? 2 : 4);
-		int l = nv_rd32(dev, 0x1003d0);
-
-		for (i = 0; i < n; i++) {
-			for (j = 0; j < 3; j++)
-				nv_wr32(dev, 0x10037c + 0xc * i + 0x4 * j,
-					calc_ref(dev, l, 0, j));
-
-			for (j = 0; j < 2; j++)
-				nv_wr32(dev, 0x1003ac + 0x8 * i + 0x4 * j,
-					calc_ref(dev, l, 1, j));
-		}
-	}
-
-	return 0;
-}
-
-void
-nv30_fb_takedown(struct drm_device *dev)
-{
-}
diff --git a/drivers/gpu/drm/nouveau/nv31_mpeg.c b/drivers/gpu/drm/nouveau/nv31_mpeg.c
deleted file mode 100644
index 818deb67588e..000000000000
--- a/drivers/gpu/drm/nouveau/nv31_mpeg.c
+++ /dev/null
@@ -1,346 +0,0 @@
-/*
- * Copyright 2011 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Ben Skeggs
- */
-
-#include <drm/drmP.h>
-#include "nouveau_drv.h"
-#include "nouveau_fifo.h"
-#include "nouveau_ramht.h"
-
-struct nv31_mpeg_engine {
-	struct nouveau_exec_engine base;
-	atomic_t refcount;
-};
-
-
-static int
-nv31_mpeg_context_new(struct nouveau_channel *chan, int engine)
-{
-	struct nv31_mpeg_engine *pmpeg = nv_engine(chan->dev, engine);
-
-	if (!atomic_add_unless(&pmpeg->refcount, 1, 1))
-		return -EBUSY;
-
-	chan->engctx[engine] = (void *)0xdeadcafe;
-	return 0;
-}
-
-static void
-nv31_mpeg_context_del(struct nouveau_channel *chan, int engine)
-{
-	struct nv31_mpeg_engine *pmpeg = nv_engine(chan->dev, engine);
-	atomic_dec(&pmpeg->refcount);
-	chan->engctx[engine] = NULL;
-}
-
-static int
-nv40_mpeg_context_new(struct nouveau_channel *chan, int engine)
-{
-	struct drm_device *dev = chan->dev;
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct nouveau_gpuobj *ctx = NULL;
-	unsigned long flags;
-	int ret;
-
-	NV_DEBUG(dev, "ch%d\n", chan->id);
-
-	ret = nouveau_gpuobj_new(dev, NULL, 264 * 4, 16, NVOBJ_FLAG_ZERO_ALLOC |
-				 NVOBJ_FLAG_ZERO_FREE, &ctx);
-	if (ret)
-		return ret;
-
-	nv_wo32(ctx, 0x78, 0x02001ec1);
-
-	spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
-	nv_mask(dev, 0x002500, 0x00000001, 0x00000000);
-	if ((nv_rd32(dev, 0x003204) & 0x1f) == chan->id)
-		nv_wr32(dev, 0x00330c, ctx->pinst >> 4);
-	nv_wo32(chan->ramfc, 0x54, ctx->pinst >> 4);
-	nv_mask(dev, 0x002500, 0x00000001, 0x00000001);
-	spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
-
-	chan->engctx[engine] = ctx;
-	return 0;
-}
-
-static void
-nv40_mpeg_context_del(struct nouveau_channel *chan, int engine)
-{
-	struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
-	struct nouveau_gpuobj *ctx = chan->engctx[engine];
-	struct drm_device *dev = chan->dev;
-	unsigned long flags;
-	u32 inst = 0x80000000 | (ctx->pinst >> 4);
-
-	spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
-	nv_mask(dev, 0x00b32c, 0x00000001, 0x00000000);
-	if (nv_rd32(dev, 0x00b318) == inst)
-		nv_mask(dev, 0x00b318, 0x80000000, 0x00000000);
-	nv_mask(dev, 0x00b32c, 0x00000001, 0x00000001);
-	spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
-
-	nouveau_gpuobj_ref(NULL, &ctx);
-	chan->engctx[engine] = NULL;
-}
-
-static int
-nv31_mpeg_object_new(struct nouveau_channel *chan, int engine,
-		      u32 handle, u16 class)
-{
-	struct drm_device *dev = chan->dev;
-	struct nouveau_gpuobj *obj = NULL;
-	int ret;
-
-	ret = nouveau_gpuobj_new(dev, chan, 20, 16, NVOBJ_FLAG_ZERO_ALLOC |
-				 NVOBJ_FLAG_ZERO_FREE, &obj);
-	if (ret)
-		return ret;
-	obj->engine = 2;
-	obj->class  = class;
-
-	nv_wo32(obj, 0x00, class);
-
-	ret = nouveau_ramht_insert(chan, handle, obj);
-	nouveau_gpuobj_ref(NULL, &obj);
-	return ret;
-}
-
-static int
-nv31_mpeg_init(struct drm_device *dev, int engine)
-{
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct nv31_mpeg_engine *pmpeg = nv_engine(dev, engine);
-	int i;
-
-	/* VPE init */
-	nv_mask(dev, 0x000200, 0x00000002, 0x00000000);
-	nv_mask(dev, 0x000200, 0x00000002, 0x00000002);
-	nv_wr32(dev, 0x00b0e0, 0x00000020); /* nvidia: rd 0x01, wr 0x20 */
-	nv_wr32(dev, 0x00b0e8, 0x00000020); /* nvidia: rd 0x01, wr 0x20 */
-
-	for (i = 0; i < dev_priv->engine.fb.num_tiles; i++)
-		pmpeg->base.set_tile_region(dev, i);
-
-	/* PMPEG init */
-	nv_wr32(dev, 0x00b32c, 0x00000000);
-	nv_wr32(dev, 0x00b314, 0x00000100);
-	nv_wr32(dev, 0x00b220, nv44_graph_class(dev) ? 0x00000044 : 0x00000031);
-	nv_wr32(dev, 0x00b300, 0x02001ec1);
-	nv_mask(dev, 0x00b32c, 0x00000001, 0x00000001);
-
-	nv_wr32(dev, 0x00b100, 0xffffffff);
-	nv_wr32(dev, 0x00b140, 0xffffffff);
-
-	if (!nv_wait(dev, 0x00b200, 0x00000001, 0x00000000)) {
-		NV_ERROR(dev, "PMPEG init: 0x%08x\n", nv_rd32(dev, 0x00b200));
-		return -EBUSY;
-	}
-
-	return 0;
-}
-
-static int
-nv31_mpeg_fini(struct drm_device *dev, int engine, bool suspend)
-{
-	/*XXX: context save? */
-	nv_mask(dev, 0x00b32c, 0x00000001, 0x00000000);
-	nv_wr32(dev, 0x00b140, 0x00000000);
-	return 0;
-}
-
-static int
-nv31_mpeg_mthd_dma(struct nouveau_channel *chan, u32 class, u32 mthd, u32 data)
-{
-	struct drm_device *dev = chan->dev;
-	u32 inst = data << 4;
-	u32 dma0 = nv_ri32(dev, inst + 0);
-	u32 dma1 = nv_ri32(dev, inst + 4);
-	u32 dma2 = nv_ri32(dev, inst + 8);
-	u32 base = (dma2 & 0xfffff000) | (dma0 >> 20);
-	u32 size = dma1 + 1;
-
-	/* only allow linear DMA objects */
-	if (!(dma0 & 0x00002000))
-		return -EINVAL;
-
-	if (mthd == 0x0190) {
-		/* DMA_CMD */
-		nv_mask(dev, 0x00b300, 0x00030000, (dma0 & 0x00030000));
-		nv_wr32(dev, 0x00b334, base);
-		nv_wr32(dev, 0x00b324, size);
-	} else
-	if (mthd == 0x01a0) {
-		/* DMA_DATA */
-		nv_mask(dev, 0x00b300, 0x000c0000, (dma0 & 0x00030000) << 2);
-		nv_wr32(dev, 0x00b360, base);
-		nv_wr32(dev, 0x00b364, size);
-	} else {
-		/* DMA_IMAGE, VRAM only */
-		if (dma0 & 0x000c0000)
-			return -EINVAL;
-
-		nv_wr32(dev, 0x00b370, base);
-		nv_wr32(dev, 0x00b374, size);
-	}
-
-	return 0;
-}
-
-static int
-nv31_mpeg_isr_chid(struct drm_device *dev, u32 inst)
-{
-	struct nouveau_fifo_priv *pfifo = nv_engine(dev, NVOBJ_ENGINE_FIFO);
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct nouveau_gpuobj *ctx;
-	unsigned long flags;
-	int i;
-
-	/* hardcode drm channel id on nv3x, so swmthd lookup works */
-	if (dev_priv->card_type < NV_40)
-		return 0;
-
-	spin_lock_irqsave(&dev_priv->channels.lock, flags);
-	for (i = 0; i < pfifo->channels; i++) {
-		if (!dev_priv->channels.ptr[i])
-			continue;
-
-		ctx = dev_priv->channels.ptr[i]->engctx[NVOBJ_ENGINE_MPEG];
-		if (ctx && ctx->pinst == inst)
-			break;
-	}
-	spin_unlock_irqrestore(&dev_priv->channels.lock, flags);
-	return i;
-}
-
-static void
-nv31_vpe_set_tile_region(struct drm_device *dev, int i)
-{
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i];
-
-	nv_wr32(dev, 0x00b008 + (i * 0x10), tile->pitch);
-	nv_wr32(dev, 0x00b004 + (i * 0x10), tile->limit);
-	nv_wr32(dev, 0x00b000 + (i * 0x10), tile->addr);
-}
-
-static void
-nv31_mpeg_isr(struct drm_device *dev)
-{
-	u32 inst = (nv_rd32(dev, 0x00b318) & 0x000fffff) << 4;
-	u32 chid = nv31_mpeg_isr_chid(dev, inst);
-	u32 stat = nv_rd32(dev, 0x00b100);
-	u32 type = nv_rd32(dev, 0x00b230);
-	u32 mthd = nv_rd32(dev, 0x00b234);
-	u32 data = nv_rd32(dev, 0x00b238);
-	u32 show = stat;
-
-	if (stat & 0x01000000) {
-		/* happens on initial binding of the object */
-		if (type == 0x00000020 && mthd == 0x0000) {
-			nv_mask(dev, 0x00b308, 0x00000000, 0x00000000);
-			show &= ~0x01000000;
-		}
-
-		if (type == 0x00000010) {
-			if (!nouveau_gpuobj_mthd_call2(dev, chid, 0x3174, mthd, data))
-				show &= ~0x01000000;
-		}
-	}
-
-	nv_wr32(dev, 0x00b100, stat);
-	nv_wr32(dev, 0x00b230, 0x00000001);
-
-	if (show && nouveau_ratelimit()) {
-		NV_INFO(dev, "PMPEG: Ch %d [0x%08x] 0x%08x 0x%08x 0x%08x 0x%08x\n",
-			chid, inst, stat, type, mthd, data);
-	}
-}
-
-static void
-nv31_vpe_isr(struct drm_device *dev)
-{
-	if (nv_rd32(dev, 0x00b100))
-		nv31_mpeg_isr(dev);
-
-	if (nv_rd32(dev, 0x00b800)) {
-		u32 stat = nv_rd32(dev, 0x00b800);
-		NV_INFO(dev, "PMSRCH: 0x%08x\n", stat);
-		nv_wr32(dev, 0xb800, stat);
-	}
-}
-
-static void
-nv31_mpeg_destroy(struct drm_device *dev, int engine)
-{
-	struct nv31_mpeg_engine *pmpeg = nv_engine(dev, engine);
-
-	nouveau_irq_unregister(dev, 0);
-
-	NVOBJ_ENGINE_DEL(dev, MPEG);
-	kfree(pmpeg);
-}
-
-int
-nv31_mpeg_create(struct drm_device *dev)
-{
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct nv31_mpeg_engine *pmpeg;
-
-	pmpeg = kzalloc(sizeof(*pmpeg), GFP_KERNEL);
-	if (!pmpeg)
-		return -ENOMEM;
-	atomic_set(&pmpeg->refcount, 0);
-
-	pmpeg->base.destroy = nv31_mpeg_destroy;
-	pmpeg->base.init = nv31_mpeg_init;
-	pmpeg->base.fini = nv31_mpeg_fini;
-	if (dev_priv->card_type < NV_40) {
-		pmpeg->base.context_new = nv31_mpeg_context_new;
-		pmpeg->base.context_del = nv31_mpeg_context_del;
-	} else {
-		pmpeg->base.context_new = nv40_mpeg_context_new;
-		pmpeg->base.context_del = nv40_mpeg_context_del;
-	}
-	pmpeg->base.object_new = nv31_mpeg_object_new;
-
-	/* ISR vector, PMC_ENABLE bit,  and TILE regs are shared between
-	 * all VPE engines, for this driver's purposes the PMPEG engine
-	 * will be treated as the "master" and handle the global VPE
-	 * bits too
-	 */
-	pmpeg->base.set_tile_region = nv31_vpe_set_tile_region;
-	nouveau_irq_register(dev, 0, nv31_vpe_isr);
-
-	NVOBJ_ENGINE_ADD(dev, MPEG, &pmpeg->base);
-	NVOBJ_CLASS(dev, 0x3174, MPEG);
-	NVOBJ_MTHD (dev, 0x3174, 0x0190, nv31_mpeg_mthd_dma);
-	NVOBJ_MTHD (dev, 0x3174, 0x01a0, nv31_mpeg_mthd_dma);
-	NVOBJ_MTHD (dev, 0x3174, 0x01b0, nv31_mpeg_mthd_dma);
-
-#if 0
-	NVOBJ_ENGINE_ADD(dev, ME, &pme->base);
-	NVOBJ_CLASS(dev, 0x4075, ME);
-#endif
-	return 0;
-
-}
diff --git a/drivers/gpu/drm/nouveau/nv40_fb.c b/drivers/gpu/drm/nouveau/nv40_fb.c
deleted file mode 100644
index 88b4f7c43992..000000000000
--- a/drivers/gpu/drm/nouveau/nv40_fb.c
+++ /dev/null
@@ -1,162 +0,0 @@
-#include <drm/drmP.h>
-#include "nouveau_drv.h"
-#include <drm/nouveau_drm.h>
-
-void
-nv40_fb_set_tile_region(struct drm_device *dev, int i)
-{
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i];
-
-	switch (dev_priv->chipset) {
-	case 0x40:
-		nv_wr32(dev, NV10_PFB_TLIMIT(i), tile->limit);
-		nv_wr32(dev, NV10_PFB_TSIZE(i), tile->pitch);
-		nv_wr32(dev, NV10_PFB_TILE(i), tile->addr);
-		break;
-
-	default:
-		nv_wr32(dev, NV40_PFB_TLIMIT(i), tile->limit);
-		nv_wr32(dev, NV40_PFB_TSIZE(i), tile->pitch);
-		nv_wr32(dev, NV40_PFB_TILE(i), tile->addr);
-		break;
-	}
-}
-
-static void
-nv40_fb_init_gart(struct drm_device *dev)
-{
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct nouveau_gpuobj *gart = dev_priv->gart_info.sg_ctxdma;
-
-	if (dev_priv->gart_info.type != NOUVEAU_GART_HW) {
-		nv_wr32(dev, 0x100800, 0x00000001);
-		return;
-	}
-
-	nv_wr32(dev, 0x100800, gart->pinst | 0x00000002);
-	nv_mask(dev, 0x10008c, 0x00000100, 0x00000100);
-	nv_wr32(dev, 0x100820, 0x00000000);
-}
-
-static void
-nv44_fb_init_gart(struct drm_device *dev)
-{
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct nouveau_gpuobj *gart = dev_priv->gart_info.sg_ctxdma;
-	u32 vinst;
-
-	if (dev_priv->gart_info.type != NOUVEAU_GART_HW) {
-		nv_wr32(dev, 0x100850, 0x80000000);
-		nv_wr32(dev, 0x100800, 0x00000001);
-		return;
-	}
-
-	/* calculate vram address of this PRAMIN block, object
-	 * must be allocated on 512KiB alignment, and not exceed
-	 * a total size of 512KiB for this to work correctly
-	 */
-	vinst  = nv_rd32(dev, 0x10020c);
-	vinst -= ((gart->pinst >> 19) + 1) << 19;
-
-	nv_wr32(dev, 0x100850, 0x80000000);
-	nv_wr32(dev, 0x100818, dev_priv->gart_info.dummy.addr);
-
-	nv_wr32(dev, 0x100804, dev_priv->gart_info.aper_size);
-	nv_wr32(dev, 0x100850, 0x00008000);
-	nv_mask(dev, 0x10008c, 0x00000200, 0x00000200);
-	nv_wr32(dev, 0x100820, 0x00000000);
-	nv_wr32(dev, 0x10082c, 0x00000001);
-	nv_wr32(dev, 0x100800, vinst | 0x00000010);
-}
-
-int
-nv40_fb_vram_init(struct drm_device *dev)
-{
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-
-	/* 0x001218 is actually present on a few other NV4X I looked at,
-	 * and even contains sane values matching 0x100474.  From looking
-	 * at various vbios images however, this isn't the case everywhere.
-	 * So, I chose to use the same regs I've seen NVIDIA reading around
-	 * the memory detection, hopefully that'll get us the right numbers
-	 */
-	if (dev_priv->chipset == 0x40) {
-		u32 pbus1218 = nv_rd32(dev, 0x001218);
-		switch (pbus1218 & 0x00000300) {
-		case 0x00000000: dev_priv->vram_type = NV_MEM_TYPE_SDRAM; break;
-		case 0x00000100: dev_priv->vram_type = NV_MEM_TYPE_DDR1; break;
-		case 0x00000200: dev_priv->vram_type = NV_MEM_TYPE_GDDR3; break;
-		case 0x00000300: dev_priv->vram_type = NV_MEM_TYPE_DDR2; break;
-		}
-	} else
-	if (dev_priv->chipset == 0x49 || dev_priv->chipset == 0x4b) {
-		u32 pfb914 = nv_rd32(dev, 0x100914);
-		switch (pfb914 & 0x00000003) {
-		case 0x00000000: dev_priv->vram_type = NV_MEM_TYPE_DDR1; break;
-		case 0x00000001: dev_priv->vram_type = NV_MEM_TYPE_DDR2; break;
-		case 0x00000002: dev_priv->vram_type = NV_MEM_TYPE_GDDR3; break;
-		case 0x00000003: break;
-		}
-	} else
-	if (dev_priv->chipset != 0x4e) {
-		u32 pfb474 = nv_rd32(dev, 0x100474);
-		if (pfb474 & 0x00000004)
-			dev_priv->vram_type = NV_MEM_TYPE_GDDR3;
-		if (pfb474 & 0x00000002)
-			dev_priv->vram_type = NV_MEM_TYPE_DDR2;
-		if (pfb474 & 0x00000001)
-			dev_priv->vram_type = NV_MEM_TYPE_DDR1;
-	} else {
-		dev_priv->vram_type = NV_MEM_TYPE_STOLEN;
-	}
-
-	dev_priv->vram_size = nv_rd32(dev, 0x10020c) & 0xff000000;
-	return 0;
-}
-
-int
-nv40_fb_init(struct drm_device *dev)
-{
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct nouveau_fb_engine *pfb = &dev_priv->engine.fb;
-	uint32_t tmp;
-	int i;
-
-	if (dev_priv->chipset != 0x40 && dev_priv->chipset != 0x45) {
-		if (nv44_graph_class(dev))
-			nv44_fb_init_gart(dev);
-		else
-			nv40_fb_init_gart(dev);
-	}
-
-	switch (dev_priv->chipset) {
-	case 0x40:
-	case 0x45:
-		tmp = nv_rd32(dev, NV10_PFB_CLOSE_PAGE2);
-		nv_wr32(dev, NV10_PFB_CLOSE_PAGE2, tmp & ~(1 << 15));
-		pfb->num_tiles = NV10_PFB_TILE__SIZE;
-		break;
-	case 0x46: /* G72 */
-	case 0x47: /* G70 */
-	case 0x49: /* G71 */
-	case 0x4b: /* G73 */
-	case 0x4c: /* C51 (G7X version) */
-		pfb->num_tiles = NV40_PFB_TILE__SIZE_1;
-		break;
-	default:
-		pfb->num_tiles = NV40_PFB_TILE__SIZE_0;
-		break;
-	}
-
-	/* Turn all the tiling regions off. */
-	for (i = 0; i < pfb->num_tiles; i++)
-		pfb->set_tile_region(dev, i);
-
-	return 0;
-}
-
-void
-nv40_fb_takedown(struct drm_device *dev)
-{
-}
diff --git a/drivers/gpu/drm/nouveau/nv40_fifo.c b/drivers/gpu/drm/nouveau/nv40_fifo.c
deleted file mode 100644
index cf952d2048ed..000000000000
--- a/drivers/gpu/drm/nouveau/nv40_fifo.c
+++ /dev/null
@@ -1,209 +0,0 @@
-/*
- * Copyright (C) 2012 Ben Skeggs.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining
- * a copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sublicense, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial
- * portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
- * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
- * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
- * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
- * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-
-#include <drm/drmP.h>
-#include "nouveau_drv.h"
-#include "nouveau_fifo.h"
-#include "nouveau_util.h"
-#include "nouveau_ramht.h"
-
-static struct ramfc_desc {
-	unsigned bits:6;
-	unsigned ctxs:5;
-	unsigned ctxp:8;
-	unsigned regs:5;
-	unsigned regp;
-} nv40_ramfc[] = {
-	{ 32,  0, 0x00,  0, NV04_PFIFO_CACHE1_DMA_PUT },
-	{ 32,  0, 0x04,  0, NV04_PFIFO_CACHE1_DMA_GET },
-	{ 32,  0, 0x08,  0, NV10_PFIFO_CACHE1_REF_CNT },
-	{ 32,  0, 0x0c,  0, NV04_PFIFO_CACHE1_DMA_INSTANCE },
-	{ 32,  0, 0x10,  0, NV04_PFIFO_CACHE1_DMA_DCOUNT },
-	{ 32,  0, 0x14,  0, NV04_PFIFO_CACHE1_DMA_STATE },
-	{ 28,  0, 0x18,  0, NV04_PFIFO_CACHE1_DMA_FETCH },
-	{  2, 28, 0x18, 28, 0x002058 },
-	{ 32,  0, 0x1c,  0, NV04_PFIFO_CACHE1_ENGINE },
-	{ 32,  0, 0x20,  0, NV04_PFIFO_CACHE1_PULL1 },
-	{ 32,  0, 0x24,  0, NV10_PFIFO_CACHE1_ACQUIRE_VALUE },
-	{ 32,  0, 0x28,  0, NV10_PFIFO_CACHE1_ACQUIRE_TIMESTAMP },
-	{ 32,  0, 0x2c,  0, NV10_PFIFO_CACHE1_ACQUIRE_TIMEOUT },
-	{ 32,  0, 0x30,  0, NV10_PFIFO_CACHE1_SEMAPHORE },
-	{ 32,  0, 0x34,  0, NV10_PFIFO_CACHE1_DMA_SUBROUTINE },
-	{ 32,  0, 0x38,  0, NV40_PFIFO_GRCTX_INSTANCE },
-	{ 17,  0, 0x3c,  0, NV04_PFIFO_DMA_TIMESLICE },
-	{ 32,  0, 0x40,  0, 0x0032e4 },
-	{ 32,  0, 0x44,  0, 0x0032e8 },
-	{ 32,  0, 0x4c,  0, 0x002088 },
-	{ 32,  0, 0x50,  0, 0x003300 },
-	{ 32,  0, 0x54,  0, 0x00330c },
-	{}
-};
-
-struct nv40_fifo_priv {
-	struct nouveau_fifo_priv base;
-	struct ramfc_desc *ramfc_desc;
-};
-
-struct nv40_fifo_chan {
-	struct nouveau_fifo_chan base;
-	struct nouveau_gpuobj *ramfc;
-};
-
-static int
-nv40_fifo_context_new(struct nouveau_channel *chan, int engine)
-{
-	struct drm_device *dev = chan->dev;
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct nv40_fifo_priv *priv = nv_engine(dev, engine);
-	struct nv40_fifo_chan *fctx;
-	unsigned long flags;
-	int ret;
-
-	fctx = chan->engctx[engine] = kzalloc(sizeof(*fctx), GFP_KERNEL);
-	if (!fctx)
-		return -ENOMEM;
-
-	/* map channel control registers */
-	chan->user = ioremap(pci_resource_start(dev->pdev, 0) +
-			     NV03_USER(chan->id), PAGE_SIZE);
-	if (!chan->user) {
-		ret = -ENOMEM;
-		goto error;
-	}
-
-	/* initialise default fifo context */
-	ret = nouveau_gpuobj_new_fake(dev, dev_priv->ramfc->pinst +
-				      chan->id * 128, ~0, 128,
-				      NVOBJ_FLAG_ZERO_ALLOC |
-				      NVOBJ_FLAG_ZERO_FREE, &fctx->ramfc);
-	if (ret)
-		goto error;
-
-	nv_wo32(fctx->ramfc, 0x00, chan->pushbuf_base);
-	nv_wo32(fctx->ramfc, 0x04, chan->pushbuf_base);
-	nv_wo32(fctx->ramfc, 0x0c, chan->pushbuf->pinst >> 4);
-	nv_wo32(fctx->ramfc, 0x18, 0x30000000 |
-				   NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES |
-				   NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES |
-#ifdef __BIG_ENDIAN
-				   NV_PFIFO_CACHE1_BIG_ENDIAN |
-#endif
-				   NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8);
-	nv_wo32(fctx->ramfc, 0x3c, 0x0001ffff);
-
-	/* enable dma mode on the channel */
-	spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
-	nv_mask(dev, NV04_PFIFO_MODE, (1 << chan->id), (1 << chan->id));
-	spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
-
-	/*XXX: remove this later, need fifo engine context commit hook */
-	nouveau_gpuobj_ref(fctx->ramfc, &chan->ramfc);
-
-error:
-	if (ret)
-		priv->base.base.context_del(chan, engine);
-	return ret;
-}
-
-static int
-nv40_fifo_init(struct drm_device *dev, int engine)
-{
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct nv40_fifo_priv *priv = nv_engine(dev, engine);
-	int i;
-
-	nv_mask(dev, NV03_PMC_ENABLE, NV_PMC_ENABLE_PFIFO, 0);
-	nv_mask(dev, NV03_PMC_ENABLE, NV_PMC_ENABLE_PFIFO, NV_PMC_ENABLE_PFIFO);
-
-	nv_wr32(dev, 0x002040, 0x000000ff);
-	nv_wr32(dev, 0x002044, 0x2101ffff);
-	nv_wr32(dev, 0x002058, 0x00000001);
-
-	nv_wr32(dev, NV03_PFIFO_RAMHT, (0x03 << 24) /* search 128 */ |
-				       ((dev_priv->ramht->bits - 9) << 16) |
-				       (dev_priv->ramht->gpuobj->pinst >> 8));
-	nv_wr32(dev, NV03_PFIFO_RAMRO, dev_priv->ramro->pinst >> 8);
-
-	switch (dev_priv->chipset) {
-	case 0x47:
-	case 0x49:
-	case 0x4b:
-		nv_wr32(dev, 0x002230, 0x00000001);
-	case 0x40:
-	case 0x41:
-	case 0x42:
-	case 0x43:
-	case 0x45:
-	case 0x48:
-		nv_wr32(dev, 0x002220, 0x00030002);
-		break;
-	default:
-		nv_wr32(dev, 0x002230, 0x00000000);
-		nv_wr32(dev, 0x002220, ((dev_priv->vram_size - 512 * 1024 +
-					 dev_priv->ramfc->pinst) >> 16) |
-				       0x00030000);
-		break;
-	}
-
-	nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH1, priv->base.channels);
-
-	nv_wr32(dev, NV03_PFIFO_INTR_0, 0xffffffff);
-	nv_wr32(dev, NV03_PFIFO_INTR_EN_0, 0xffffffff);
-
-	nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH0, 1);
-	nv_wr32(dev, NV04_PFIFO_CACHE1_PULL0, 1);
-	nv_wr32(dev, NV03_PFIFO_CACHES, 1);
-
-	for (i = 0; i < priv->base.channels; i++) {
-		if (dev_priv->channels.ptr[i])
-			nv_mask(dev, NV04_PFIFO_MODE, (1 << i), (1 << i));
-	}
-
-	return 0;
-}
-
-int
-nv40_fifo_create(struct drm_device *dev)
-{
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct nv40_fifo_priv *priv;
-
-	priv = kzalloc(sizeof(*priv), GFP_KERNEL);
-	if (!priv)
-		return -ENOMEM;
-
-	priv->base.base.destroy = nv04_fifo_destroy;
-	priv->base.base.init = nv40_fifo_init;
-	priv->base.base.fini = nv04_fifo_fini;
-	priv->base.base.context_new = nv40_fifo_context_new;
-	priv->base.base.context_del = nv04_fifo_context_del;
-	priv->base.channels = 31;
-	priv->ramfc_desc = nv40_ramfc;
-	dev_priv->eng[NVOBJ_ENGINE_FIFO] = &priv->base.base;
-
-	nouveau_irq_register(dev, 8, nv04_fifo_isr);
-	return 0;
-}
diff --git a/drivers/gpu/drm/nouveau/nv40_graph.c b/drivers/gpu/drm/nouveau/nv40_graph.c
deleted file mode 100644
index 5489201bec0b..000000000000
--- a/drivers/gpu/drm/nouveau/nv40_graph.c
+++ /dev/null
@@ -1,466 +0,0 @@
-/*
- * Copyright (C) 2007 Ben Skeggs.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining
- * a copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sublicense, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial
- * portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
- * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
- * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
- * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
- * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-
-#include <drm/drmP.h>
-#include "nouveau_drv.h"
-#include "nouveau_fifo.h"
-#include "nouveau_ramht.h"
-
-struct nv40_graph_engine {
-	struct nouveau_exec_engine base;
-	u32 grctx_size;
-};
-
-static int
-nv40_graph_context_new(struct nouveau_channel *chan, int engine)
-{
-	struct nv40_graph_engine *pgraph = nv_engine(chan->dev, engine);
-	struct drm_device *dev = chan->dev;
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct nouveau_gpuobj *grctx = NULL;
-	unsigned long flags;
-	int ret;
-
-	ret = nouveau_gpuobj_new(dev, NULL, pgraph->grctx_size, 16,
-				 NVOBJ_FLAG_ZERO_ALLOC, &grctx);
-	if (ret)
-		return ret;
-
-	/* Initialise default context values */
-	nv40_grctx_fill(dev, grctx);
-	nv_wo32(grctx, 0, grctx->vinst);
-
-	/* init grctx pointer in ramfc, and on PFIFO if channel is
-	 * already active there
-	 */
-	spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
-	nv_wo32(chan->ramfc, 0x38, grctx->vinst >> 4);
-	nv_mask(dev, 0x002500, 0x00000001, 0x00000000);
-	if ((nv_rd32(dev, 0x003204) & 0x0000001f) == chan->id)
-		nv_wr32(dev, 0x0032e0, grctx->vinst >> 4);
-	nv_mask(dev, 0x002500, 0x00000001, 0x00000001);
-	spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
-
-	chan->engctx[engine] = grctx;
-	return 0;
-}
-
-static void
-nv40_graph_context_del(struct nouveau_channel *chan, int engine)
-{
-	struct nouveau_gpuobj *grctx = chan->engctx[engine];
-	struct drm_device *dev = chan->dev;
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	u32 inst = 0x01000000 | (grctx->pinst >> 4);
-	unsigned long flags;
-
-	spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
-	nv_mask(dev, 0x400720, 0x00000000, 0x00000001);
-	if (nv_rd32(dev, 0x40032c) == inst)
-		nv_mask(dev, 0x40032c, 0x01000000, 0x00000000);
-	if (nv_rd32(dev, 0x400330) == inst)
-		nv_mask(dev, 0x400330, 0x01000000, 0x00000000);
-	nv_mask(dev, 0x400720, 0x00000001, 0x00000001);
-	spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
-
-	/* Free the context resources */
-	nouveau_gpuobj_ref(NULL, &grctx);
-	chan->engctx[engine] = NULL;
-}
-
-int
-nv40_graph_object_new(struct nouveau_channel *chan, int engine,
-		      u32 handle, u16 class)
-{
-	struct drm_device *dev = chan->dev;
-	struct nouveau_gpuobj *obj = NULL;
-	int ret;
-
-	ret = nouveau_gpuobj_new(dev, chan, 20, 16, NVOBJ_FLAG_ZERO_FREE, &obj);
-	if (ret)
-		return ret;
-	obj->engine = 1;
-	obj->class  = class;
-
-	nv_wo32(obj, 0x00, class);
-	nv_wo32(obj, 0x04, 0x00000000);
-#ifndef __BIG_ENDIAN
-	nv_wo32(obj, 0x08, 0x00000000);
-#else
-	nv_wo32(obj, 0x08, 0x01000000);
-#endif
-	nv_wo32(obj, 0x0c, 0x00000000);
-	nv_wo32(obj, 0x10, 0x00000000);
-
-	ret = nouveau_ramht_insert(chan, handle, obj);
-	nouveau_gpuobj_ref(NULL, &obj);
-	return ret;
-}
-
-static void
-nv40_graph_set_tile_region(struct drm_device *dev, int i)
-{
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i];
-
-	switch (dev_priv->chipset) {
-	case 0x40:
-	case 0x41: /* guess */
-	case 0x42:
-	case 0x43:
-	case 0x45: /* guess */
-	case 0x4e:
-		nv_wr32(dev, NV20_PGRAPH_TSIZE(i), tile->pitch);
-		nv_wr32(dev, NV20_PGRAPH_TLIMIT(i), tile->limit);
-		nv_wr32(dev, NV20_PGRAPH_TILE(i), tile->addr);
-		nv_wr32(dev, NV40_PGRAPH_TSIZE1(i), tile->pitch);
-		nv_wr32(dev, NV40_PGRAPH_TLIMIT1(i), tile->limit);
-		nv_wr32(dev, NV40_PGRAPH_TILE1(i), tile->addr);
-		break;
-	case 0x44:
-	case 0x4a:
-		nv_wr32(dev, NV20_PGRAPH_TSIZE(i), tile->pitch);
-		nv_wr32(dev, NV20_PGRAPH_TLIMIT(i), tile->limit);
-		nv_wr32(dev, NV20_PGRAPH_TILE(i), tile->addr);
-		break;
-	case 0x46:
-	case 0x47:
-	case 0x49:
-	case 0x4b:
-	case 0x4c:
-	case 0x67:
-	default:
-		nv_wr32(dev, NV47_PGRAPH_TSIZE(i), tile->pitch);
-		nv_wr32(dev, NV47_PGRAPH_TLIMIT(i), tile->limit);
-		nv_wr32(dev, NV47_PGRAPH_TILE(i), tile->addr);
-		nv_wr32(dev, NV40_PGRAPH_TSIZE1(i), tile->pitch);
-		nv_wr32(dev, NV40_PGRAPH_TLIMIT1(i), tile->limit);
-		nv_wr32(dev, NV40_PGRAPH_TILE1(i), tile->addr);
-		break;
-	}
-}
-
-/*
- * G70		0x47
- * G71		0x49
- * NV45		0x48
- * G72[M]	0x46
- * G73		0x4b
- * C51_G7X	0x4c
- * C51		0x4e
- */
-int
-nv40_graph_init(struct drm_device *dev, int engine)
-{
-	struct nv40_graph_engine *pgraph = nv_engine(dev, engine);
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct nouveau_fb_engine *pfb = &dev_priv->engine.fb;
-	uint32_t vramsz;
-	int i, j;
-
-	nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) &
-			~NV_PMC_ENABLE_PGRAPH);
-	nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) |
-			 NV_PMC_ENABLE_PGRAPH);
-
-	/* generate and upload context program */
-	nv40_grctx_init(dev, &pgraph->grctx_size);
-
-	/* No context present currently */
-	nv_wr32(dev, NV40_PGRAPH_CTXCTL_CUR, 0x00000000);
-
-	nv_wr32(dev, NV03_PGRAPH_INTR   , 0xFFFFFFFF);
-	nv_wr32(dev, NV40_PGRAPH_INTR_EN, 0xFFFFFFFF);
-
-	nv_wr32(dev, NV04_PGRAPH_DEBUG_0, 0xFFFFFFFF);
-	nv_wr32(dev, NV04_PGRAPH_DEBUG_0, 0x00000000);
-	nv_wr32(dev, NV04_PGRAPH_DEBUG_1, 0x401287c0);
-	nv_wr32(dev, NV04_PGRAPH_DEBUG_3, 0xe0de8055);
-	nv_wr32(dev, NV10_PGRAPH_DEBUG_4, 0x00008000);
-	nv_wr32(dev, NV04_PGRAPH_LIMIT_VIOL_PIX, 0x00be3c5f);
-
-	nv_wr32(dev, NV10_PGRAPH_CTX_CONTROL, 0x10010100);
-	nv_wr32(dev, NV10_PGRAPH_STATE      , 0xFFFFFFFF);
-
-	j = nv_rd32(dev, 0x1540) & 0xff;
-	if (j) {
-		for (i = 0; !(j & 1); j >>= 1, i++)
-			;
-		nv_wr32(dev, 0x405000, i);
-	}
-
-	if (dev_priv->chipset == 0x40) {
-		nv_wr32(dev, 0x4009b0, 0x83280fff);
-		nv_wr32(dev, 0x4009b4, 0x000000a0);
-	} else {
-		nv_wr32(dev, 0x400820, 0x83280eff);
-		nv_wr32(dev, 0x400824, 0x000000a0);
-	}
-
-	switch (dev_priv->chipset) {
-	case 0x40:
-	case 0x45:
-		nv_wr32(dev, 0x4009b8, 0x0078e366);
-		nv_wr32(dev, 0x4009bc, 0x0000014c);
-		break;
-	case 0x41:
-	case 0x42: /* pciid also 0x00Cx */
-	/* case 0x0120: XXX (pciid) */
-		nv_wr32(dev, 0x400828, 0x007596ff);
-		nv_wr32(dev, 0x40082c, 0x00000108);
-		break;
-	case 0x43:
-		nv_wr32(dev, 0x400828, 0x0072cb77);
-		nv_wr32(dev, 0x40082c, 0x00000108);
-		break;
-	case 0x44:
-	case 0x46: /* G72 */
-	case 0x4a:
-	case 0x4c: /* G7x-based C51 */
-	case 0x4e:
-		nv_wr32(dev, 0x400860, 0);
-		nv_wr32(dev, 0x400864, 0);
-		break;
-	case 0x47: /* G70 */
-	case 0x49: /* G71 */
-	case 0x4b: /* G73 */
-		nv_wr32(dev, 0x400828, 0x07830610);
-		nv_wr32(dev, 0x40082c, 0x0000016A);
-		break;
-	default:
-		break;
-	}
-
-	nv_wr32(dev, 0x400b38, 0x2ffff800);
-	nv_wr32(dev, 0x400b3c, 0x00006000);
-
-	/* Tiling related stuff. */
-	switch (dev_priv->chipset) {
-	case 0x44:
-	case 0x4a:
-		nv_wr32(dev, 0x400bc4, 0x1003d888);
-		nv_wr32(dev, 0x400bbc, 0xb7a7b500);
-		break;
-	case 0x46:
-		nv_wr32(dev, 0x400bc4, 0x0000e024);
-		nv_wr32(dev, 0x400bbc, 0xb7a7b520);
-		break;
-	case 0x4c:
-	case 0x4e:
-	case 0x67:
-		nv_wr32(dev, 0x400bc4, 0x1003d888);
-		nv_wr32(dev, 0x400bbc, 0xb7a7b540);
-		break;
-	default:
-		break;
-	}
-
-	/* Turn all the tiling regions off. */
-	for (i = 0; i < pfb->num_tiles; i++)
-		nv40_graph_set_tile_region(dev, i);
-
-	/* begin RAM config */
-	vramsz = pci_resource_len(dev->pdev, 0) - 1;
-	switch (dev_priv->chipset) {
-	case 0x40:
-		nv_wr32(dev, 0x4009A4, nv_rd32(dev, NV04_PFB_CFG0));
-		nv_wr32(dev, 0x4009A8, nv_rd32(dev, NV04_PFB_CFG1));
-		nv_wr32(dev, 0x4069A4, nv_rd32(dev, NV04_PFB_CFG0));
-		nv_wr32(dev, 0x4069A8, nv_rd32(dev, NV04_PFB_CFG1));
-		nv_wr32(dev, 0x400820, 0);
-		nv_wr32(dev, 0x400824, 0);
-		nv_wr32(dev, 0x400864, vramsz);
-		nv_wr32(dev, 0x400868, vramsz);
-		break;
-	default:
-		switch (dev_priv->chipset) {
-		case 0x41:
-		case 0x42:
-		case 0x43:
-		case 0x45:
-		case 0x4e:
-		case 0x44:
-		case 0x4a:
-			nv_wr32(dev, 0x4009F0, nv_rd32(dev, NV04_PFB_CFG0));
-			nv_wr32(dev, 0x4009F4, nv_rd32(dev, NV04_PFB_CFG1));
-			break;
-		default:
-			nv_wr32(dev, 0x400DF0, nv_rd32(dev, NV04_PFB_CFG0));
-			nv_wr32(dev, 0x400DF4, nv_rd32(dev, NV04_PFB_CFG1));
-			break;
-		}
-		nv_wr32(dev, 0x4069F0, nv_rd32(dev, NV04_PFB_CFG0));
-		nv_wr32(dev, 0x4069F4, nv_rd32(dev, NV04_PFB_CFG1));
-		nv_wr32(dev, 0x400840, 0);
-		nv_wr32(dev, 0x400844, 0);
-		nv_wr32(dev, 0x4008A0, vramsz);
-		nv_wr32(dev, 0x4008A4, vramsz);
-		break;
-	}
-
-	return 0;
-}
-
-static int
-nv40_graph_fini(struct drm_device *dev, int engine, bool suspend)
-{
-	u32 inst = nv_rd32(dev, 0x40032c);
-	if (inst & 0x01000000) {
-		nv_wr32(dev, 0x400720, 0x00000000);
-		nv_wr32(dev, 0x400784, inst);
-		nv_mask(dev, 0x400310, 0x00000020, 0x00000020);
-		nv_mask(dev, 0x400304, 0x00000001, 0x00000001);
-		if (!nv_wait(dev, 0x400300, 0x00000001, 0x00000000)) {
-			u32 insn = nv_rd32(dev, 0x400308);
-			NV_ERROR(dev, "PGRAPH: ctxprog timeout 0x%08x\n", insn);
-		}
-		nv_mask(dev, 0x40032c, 0x01000000, 0x00000000);
-	}
-	return 0;
-}
-
-static int
-nv40_graph_isr_chid(struct drm_device *dev, u32 inst)
-{
-	struct nouveau_fifo_priv *pfifo = nv_engine(dev, NVOBJ_ENGINE_FIFO);
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct nouveau_gpuobj *grctx;
-	unsigned long flags;
-	int i;
-
-	spin_lock_irqsave(&dev_priv->channels.lock, flags);
-	for (i = 0; i < pfifo->channels; i++) {
-		if (!dev_priv->channels.ptr[i])
-			continue;
-		grctx = dev_priv->channels.ptr[i]->engctx[NVOBJ_ENGINE_GR];
-
-		if (grctx && grctx->pinst == inst)
-			break;
-	}
-	spin_unlock_irqrestore(&dev_priv->channels.lock, flags);
-	return i;
-}
-
-static void
-nv40_graph_isr(struct drm_device *dev)
-{
-	u32 stat;
-
-	while ((stat = nv_rd32(dev, NV03_PGRAPH_INTR))) {
-		u32 nsource = nv_rd32(dev, NV03_PGRAPH_NSOURCE);
-		u32 nstatus = nv_rd32(dev, NV03_PGRAPH_NSTATUS);
-		u32 inst = (nv_rd32(dev, 0x40032c) & 0x000fffff) << 4;
-		u32 chid = nv40_graph_isr_chid(dev, inst);
-		u32 addr = nv_rd32(dev, NV04_PGRAPH_TRAPPED_ADDR);
-		u32 subc = (addr & 0x00070000) >> 16;
-		u32 mthd = (addr & 0x00001ffc);
-		u32 data = nv_rd32(dev, NV04_PGRAPH_TRAPPED_DATA);
-		u32 class = nv_rd32(dev, 0x400160 + subc * 4) & 0xffff;
-		u32 show = stat;
-
-		if (stat & NV_PGRAPH_INTR_ERROR) {
-			if (nsource & NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD) {
-				if (!nouveau_gpuobj_mthd_call2(dev, chid, class, mthd, data))
-					show &= ~NV_PGRAPH_INTR_ERROR;
-			} else
-			if (nsource & NV03_PGRAPH_NSOURCE_DMA_VTX_PROTECTION) {
-				nv_mask(dev, 0x402000, 0, 0);
-			}
-		}
-
-		nv_wr32(dev, NV03_PGRAPH_INTR, stat);
-		nv_wr32(dev, NV04_PGRAPH_FIFO, 0x00000001);
-
-		if (show && nouveau_ratelimit()) {
-			NV_INFO(dev, "PGRAPH -");
-			nouveau_bitfield_print(nv10_graph_intr, show);
-			printk(" nsource:");
-			nouveau_bitfield_print(nv04_graph_nsource, nsource);
-			printk(" nstatus:");
-			nouveau_bitfield_print(nv10_graph_nstatus, nstatus);
-			printk("\n");
-			NV_INFO(dev, "PGRAPH - ch %d (0x%08x) subc %d "
-				     "class 0x%04x mthd 0x%04x data 0x%08x\n",
-				chid, inst, subc, class, mthd, data);
-		}
-	}
-}
-
-static void
-nv40_graph_destroy(struct drm_device *dev, int engine)
-{
-	struct nv40_graph_engine *pgraph = nv_engine(dev, engine);
-
-	nouveau_irq_unregister(dev, 12);
-
-	NVOBJ_ENGINE_DEL(dev, GR);
-	kfree(pgraph);
-}
-
-int
-nv40_graph_create(struct drm_device *dev)
-{
-	struct nv40_graph_engine *pgraph;
-
-	pgraph = kzalloc(sizeof(*pgraph), GFP_KERNEL);
-	if (!pgraph)
-		return -ENOMEM;
-
-	pgraph->base.destroy = nv40_graph_destroy;
-	pgraph->base.init = nv40_graph_init;
-	pgraph->base.fini = nv40_graph_fini;
-	pgraph->base.context_new = nv40_graph_context_new;
-	pgraph->base.context_del = nv40_graph_context_del;
-	pgraph->base.object_new = nv40_graph_object_new;
-	pgraph->base.set_tile_region = nv40_graph_set_tile_region;
-
-	NVOBJ_ENGINE_ADD(dev, GR, &pgraph->base);
-	nouveau_irq_register(dev, 12, nv40_graph_isr);
-
-	NVOBJ_CLASS(dev, 0x0030, GR); /* null */
-	NVOBJ_CLASS(dev, 0x0039, GR); /* m2mf */
-	NVOBJ_CLASS(dev, 0x004a, GR); /* gdirect */
-	NVOBJ_CLASS(dev, 0x009f, GR); /* imageblit (nv12) */
-	NVOBJ_CLASS(dev, 0x008a, GR); /* ifc */
-	NVOBJ_CLASS(dev, 0x0089, GR); /* sifm */
-	NVOBJ_CLASS(dev, 0x3089, GR); /* sifm (nv40) */
-	NVOBJ_CLASS(dev, 0x0062, GR); /* surf2d */
-	NVOBJ_CLASS(dev, 0x3062, GR); /* surf2d (nv40) */
-	NVOBJ_CLASS(dev, 0x0043, GR); /* rop */
-	NVOBJ_CLASS(dev, 0x0012, GR); /* beta1 */
-	NVOBJ_CLASS(dev, 0x0072, GR); /* beta4 */
-	NVOBJ_CLASS(dev, 0x0019, GR); /* cliprect */
-	NVOBJ_CLASS(dev, 0x0044, GR); /* pattern */
-	NVOBJ_CLASS(dev, 0x309e, GR); /* swzsurf */
-
-	/* curie */
-	if (nv44_graph_class(dev))
-		NVOBJ_CLASS(dev, 0x4497, GR);
-	else
-		NVOBJ_CLASS(dev, 0x4097, GR);
-
-	return 0;
-}
diff --git a/drivers/gpu/drm/nouveau/nv40_mc.c b/drivers/gpu/drm/nouveau/nv40_mc.c
deleted file mode 100644
index 788584364853..000000000000
--- a/drivers/gpu/drm/nouveau/nv40_mc.c
+++ /dev/null
@@ -1,27 +0,0 @@
-#include <drm/drmP.h>
-#include "nouveau_drv.h"
-#include <drm/nouveau_drm.h>
-
-int
-nv40_mc_init(struct drm_device *dev)
-{
-	/* Power up everything, resetting each individual unit will
-	 * be done later if needed.
-	 */
-	nv_wr32(dev, NV03_PMC_ENABLE, 0xFFFFFFFF);
-
-	if (nv44_graph_class(dev)) {
-		u32 tmp = nv_rd32(dev, NV04_PFB_FIFO_DATA);
-		nv_wr32(dev, NV40_PMC_1700, tmp);
-		nv_wr32(dev, NV40_PMC_1704, 0);
-		nv_wr32(dev, NV40_PMC_1708, 0);
-		nv_wr32(dev, NV40_PMC_170C, tmp);
-	}
-
-	return 0;
-}
-
-void
-nv40_mc_takedown(struct drm_device *dev)
-{
-}
diff --git a/drivers/gpu/drm/nouveau/nv40_pm.c b/drivers/gpu/drm/nouveau/nv40_pm.c
index b94dd87d592c..3382064c7f33 100644
--- a/drivers/gpu/drm/nouveau/nv40_pm.c
+++ b/drivers/gpu/drm/nouveau/nv40_pm.c
@@ -23,18 +23,24 @@
  */
 
 #include <drm/drmP.h>
-#include "nouveau_drv.h"
+#include "nouveau_drm.h"
 #include "nouveau_bios.h"
 #include "nouveau_pm.h"
 #include "nouveau_hw.h"
-#include "nouveau_fifo.h"
+
+#include <subdev/bios/pll.h>
+#include <subdev/clock.h>
+#include <subdev/timer.h>
+
+#include <engine/fifo.h>
 
 #define min2(a,b) ((a) < (b) ? (a) : (b))
 
 static u32
 read_pll_1(struct drm_device *dev, u32 reg)
 {
-	u32 ctrl = nv_rd32(dev, reg + 0x00);
+	struct nouveau_device *device = nouveau_dev(dev);
+	u32 ctrl = nv_rd32(device, reg + 0x00);
 	int P = (ctrl & 0x00070000) >> 16;
 	int N = (ctrl & 0x0000ff00) >> 8;
 	int M = (ctrl & 0x000000ff) >> 0;
@@ -49,8 +55,9 @@ read_pll_1(struct drm_device *dev, u32 reg)
 static u32
 read_pll_2(struct drm_device *dev, u32 reg)
 {
-	u32 ctrl = nv_rd32(dev, reg + 0x00);
-	u32 coef = nv_rd32(dev, reg + 0x04);
+	struct nouveau_device *device = nouveau_dev(dev);
+	u32 ctrl = nv_rd32(device, reg + 0x00);
+	u32 coef = nv_rd32(device, reg + 0x04);
 	int N2 = (coef & 0xff000000) >> 24;
 	int M2 = (coef & 0x00ff0000) >> 16;
 	int N1 = (coef & 0x0000ff00) >> 8;
@@ -89,7 +96,8 @@ read_clk(struct drm_device *dev, u32 src)
 int
 nv40_pm_clocks_get(struct drm_device *dev, struct nouveau_pm_level *perflvl)
 {
-	u32 ctrl = nv_rd32(dev, 0x00c040);
+	struct nouveau_device *device = nouveau_dev(dev);
+	u32 ctrl = nv_rd32(device, 0x00c040);
 
 	perflvl->core   = read_clk(dev, (ctrl & 0x00000003) >> 0);
 	perflvl->shader = read_clk(dev, (ctrl & 0x00000030) >> 4);
@@ -107,27 +115,30 @@ struct nv40_pm_state {
 };
 
 static int
-nv40_calc_pll(struct drm_device *dev, u32 reg, struct pll_lims *pll,
+nv40_calc_pll(struct drm_device *dev, u32 reg, struct nvbios_pll *pll,
 	      u32 clk, int *N1, int *M1, int *N2, int *M2, int *log2P)
 {
+	struct nouveau_device *device = nouveau_dev(dev);
+	struct nouveau_bios *bios = nouveau_bios(device);
+	struct nouveau_clock *pclk = nouveau_clock(device);
 	struct nouveau_pll_vals coef;
 	int ret;
 
-	ret = get_pll_limits(dev, reg, pll);
+	ret = nvbios_pll_parse(bios, reg, pll);
 	if (ret)
 		return ret;
 
-	if (clk < pll->vco1.maxfreq)
-		pll->vco2.maxfreq = 0;
+	if (clk < pll->vco1.max_freq)
+		pll->vco2.max_freq = 0;
 
-	ret = nouveau_calc_pll_mnp(dev, pll, clk, &coef);
+	pclk->pll_calc(pclk, pll, clk, &coef);
 	if (ret == 0)
 		return -ERANGE;
 
 	*N1 = coef.N1;
 	*M1 = coef.M1;
 	if (N2 && M2) {
-		if (pll->vco2.maxfreq) {
+		if (pll->vco2.max_freq) {
 			*N2 = coef.N2;
 			*M2 = coef.M2;
 		} else {
@@ -143,7 +154,7 @@ void *
 nv40_pm_clocks_pre(struct drm_device *dev, struct nouveau_pm_level *perflvl)
 {
 	struct nv40_pm_state *info;
-	struct pll_lims pll;
+	struct nvbios_pll pll;
 	int N1, N2, M1, M2, log2P;
 	int ret;
 
@@ -191,7 +202,7 @@ nv40_pm_clocks_pre(struct drm_device *dev, struct nouveau_pm_level *perflvl)
 		goto out;
 
 	info->mpll_ctrl  = 0x80000000 | (log2P << 16);
-	info->mpll_ctrl |= min2(pll.log2p_bias + log2P, pll.max_log2p) << 20;
+	info->mpll_ctrl |= min2(pll.bias_p + log2P, pll.max_p) << 20;
 	if (N2 == M2) {
 		info->mpll_ctrl |= 0x00000100;
 		info->mpll_coef  = (N1 << 8) | M1;
@@ -212,12 +223,13 @@ static bool
 nv40_pm_gr_idle(void *data)
 {
 	struct drm_device *dev = data;
+	struct nouveau_device *device = nouveau_dev(dev);
 
-	if ((nv_rd32(dev, 0x400760) & 0x000000f0) >> 4 !=
-	    (nv_rd32(dev, 0x400760) & 0x0000000f))
+	if ((nv_rd32(device, 0x400760) & 0x000000f0) >> 4 !=
+	    (nv_rd32(device, 0x400760) & 0x0000000f))
 		return false;
 
-	if (nv_rd32(dev, 0x400700))
+	if (nv_rd32(device, 0x400700))
 		return false;
 
 	return true;
@@ -226,7 +238,9 @@ nv40_pm_gr_idle(void *data)
 int
 nv40_pm_clocks_set(struct drm_device *dev, void *pre_state)
 {
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_device *device = nouveau_dev(dev);
+	struct nouveau_fifo *pfifo = nouveau_fifo(device);
+	struct nouveau_drm *drm = nouveau_drm(dev);
 	struct nv40_pm_state *info = pre_state;
 	unsigned long flags;
 	struct bit_entry M;
@@ -236,12 +250,12 @@ nv40_pm_clocks_set(struct drm_device *dev, void *pre_state)
 
 	/* determine which CRTCs are active, fetch VGA_SR1 for each */
 	for (i = 0; i < 2; i++) {
-		u32 vbl = nv_rd32(dev, 0x600808 + (i * 0x2000));
+		u32 vbl = nv_rd32(device, 0x600808 + (i * 0x2000));
 		u32 cnt = 0;
 		do {
-			if (vbl != nv_rd32(dev, 0x600808 + (i * 0x2000))) {
-				nv_wr08(dev, 0x0c03c4 + (i * 0x2000), 0x01);
-				sr1[i] = nv_rd08(dev, 0x0c03c5 + (i * 0x2000));
+			if (vbl != nv_rd32(device, 0x600808 + (i * 0x2000))) {
+				nv_wr08(device, 0x0c03c4 + (i * 0x2000), 0x01);
+				sr1[i] = nv_rd08(device, 0x0c03c5 + (i * 0x2000));
 				if (!(sr1[i] & 0x20))
 					crtc_mask |= (1 << i);
 				break;
@@ -251,28 +265,20 @@ nv40_pm_clocks_set(struct drm_device *dev, void *pre_state)
 	}
 
 	/* halt and idle engines */
-	spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
-	nv_mask(dev, 0x002500, 0x00000001, 0x00000000);
-	if (!nv_wait(dev, 0x002500, 0x00000010, 0x00000000))
-		goto resume;
-	nv_mask(dev, 0x003220, 0x00000001, 0x00000000);
-	if (!nv_wait(dev, 0x003220, 0x00000010, 0x00000000))
-		goto resume;
-	nv_mask(dev, 0x003200, 0x00000001, 0x00000000);
-	nv04_fifo_cache_pull(dev, false);
+	pfifo->pause(pfifo, &flags);
 
-	if (!nv_wait_cb(dev, nv40_pm_gr_idle, dev))
+	if (!nv_wait_cb(device, nv40_pm_gr_idle, dev))
 		goto resume;
 
 	ret = 0;
 
 	/* set engine clocks */
-	nv_mask(dev, 0x00c040, 0x00000333, 0x00000000);
-	nv_wr32(dev, 0x004004, info->npll_coef);
-	nv_mask(dev, 0x004000, 0xc0070100, info->npll_ctrl);
-	nv_mask(dev, 0x004008, 0xc007ffff, info->spll);
+	nv_mask(device, 0x00c040, 0x00000333, 0x00000000);
+	nv_wr32(device, 0x004004, info->npll_coef);
+	nv_mask(device, 0x004000, 0xc0070100, info->npll_ctrl);
+	nv_mask(device, 0x004008, 0xc007ffff, info->spll);
 	mdelay(5);
-	nv_mask(dev, 0x00c040, 0x00000333, info->ctrl);
+	nv_mask(device, 0x00c040, 0x00000333, info->ctrl);
 
 	if (!info->mpll_ctrl)
 		goto resume;
@@ -281,52 +287,52 @@ nv40_pm_clocks_set(struct drm_device *dev, void *pre_state)
 	for (i = 0; i < 2; i++) {
 		if (!(crtc_mask & (1 << i)))
 			continue;
-		nv_wait(dev, 0x600808 + (i * 0x2000), 0x00010000, 0x00000000);
-		nv_wait(dev, 0x600808 + (i * 0x2000), 0x00010000, 0x00010000);
-		nv_wr08(dev, 0x0c03c4 + (i * 0x2000), 0x01);
-		nv_wr08(dev, 0x0c03c5 + (i * 0x2000), sr1[i] | 0x20);
+		nv_wait(device, 0x600808 + (i * 0x2000), 0x00010000, 0x00000000);
+		nv_wait(device, 0x600808 + (i * 0x2000), 0x00010000, 0x00010000);
+		nv_wr08(device, 0x0c03c4 + (i * 0x2000), 0x01);
+		nv_wr08(device, 0x0c03c5 + (i * 0x2000), sr1[i] | 0x20);
 	}
 
 	/* prepare ram for reclocking */
-	nv_wr32(dev, 0x1002d4, 0x00000001); /* precharge */
-	nv_wr32(dev, 0x1002d0, 0x00000001); /* refresh */
-	nv_wr32(dev, 0x1002d0, 0x00000001); /* refresh */
-	nv_mask(dev, 0x100210, 0x80000000, 0x00000000); /* no auto refresh */
-	nv_wr32(dev, 0x1002dc, 0x00000001); /* enable self-refresh */
+	nv_wr32(device, 0x1002d4, 0x00000001); /* precharge */
+	nv_wr32(device, 0x1002d0, 0x00000001); /* refresh */
+	nv_wr32(device, 0x1002d0, 0x00000001); /* refresh */
+	nv_mask(device, 0x100210, 0x80000000, 0x00000000); /* no auto refresh */
+	nv_wr32(device, 0x1002dc, 0x00000001); /* enable self-refresh */
 
 	/* change the PLL of each memory partition */
-	nv_mask(dev, 0x00c040, 0x0000c000, 0x00000000);
-	switch (dev_priv->chipset) {
+	nv_mask(device, 0x00c040, 0x0000c000, 0x00000000);
+	switch (nv_device(drm->device)->chipset) {
 	case 0x40:
 	case 0x45:
 	case 0x41:
 	case 0x42:
 	case 0x47:
-		nv_mask(dev, 0x004044, 0xc0771100, info->mpll_ctrl);
-		nv_mask(dev, 0x00402c, 0xc0771100, info->mpll_ctrl);
-		nv_wr32(dev, 0x004048, info->mpll_coef);
-		nv_wr32(dev, 0x004030, info->mpll_coef);
+		nv_mask(device, 0x004044, 0xc0771100, info->mpll_ctrl);
+		nv_mask(device, 0x00402c, 0xc0771100, info->mpll_ctrl);
+		nv_wr32(device, 0x004048, info->mpll_coef);
+		nv_wr32(device, 0x004030, info->mpll_coef);
 	case 0x43:
 	case 0x49:
 	case 0x4b:
-		nv_mask(dev, 0x004038, 0xc0771100, info->mpll_ctrl);
-		nv_wr32(dev, 0x00403c, info->mpll_coef);
+		nv_mask(device, 0x004038, 0xc0771100, info->mpll_ctrl);
+		nv_wr32(device, 0x00403c, info->mpll_coef);
 	default:
-		nv_mask(dev, 0x004020, 0xc0771100, info->mpll_ctrl);
-		nv_wr32(dev, 0x004024, info->mpll_coef);
+		nv_mask(device, 0x004020, 0xc0771100, info->mpll_ctrl);
+		nv_wr32(device, 0x004024, info->mpll_coef);
 		break;
 	}
 	udelay(100);
-	nv_mask(dev, 0x00c040, 0x0000c000, 0x0000c000);
+	nv_mask(device, 0x00c040, 0x0000c000, 0x0000c000);
 
 	/* re-enable normal operation of memory controller */
-	nv_wr32(dev, 0x1002dc, 0x00000000);
-	nv_mask(dev, 0x100210, 0x80000000, 0x80000000);
+	nv_wr32(device, 0x1002dc, 0x00000000);
+	nv_mask(device, 0x100210, 0x80000000, 0x80000000);
 	udelay(100);
 
 	/* execute memory reset script from vbios */
 	if (!bit_table(dev, 'M', &M))
-		nouveau_bios_init_exec(dev, ROM16(M.data[0]));
+		nouveau_bios_run_init_table(dev, ROM16(M.data[0]), NULL, 0);
 
 	/* make sure we're in vblank (hopefully the same one as before), and
 	 * then re-enable crtc memory access
@@ -334,62 +340,14 @@ nv40_pm_clocks_set(struct drm_device *dev, void *pre_state)
 	for (i = 0; i < 2; i++) {
 		if (!(crtc_mask & (1 << i)))
 			continue;
-		nv_wait(dev, 0x600808 + (i * 0x2000), 0x00010000, 0x00010000);
-		nv_wr08(dev, 0x0c03c4 + (i * 0x2000), 0x01);
-		nv_wr08(dev, 0x0c03c5 + (i * 0x2000), sr1[i]);
+		nv_wait(device, 0x600808 + (i * 0x2000), 0x00010000, 0x00010000);
+		nv_wr08(device, 0x0c03c4 + (i * 0x2000), 0x01);
+		nv_wr08(device, 0x0c03c5 + (i * 0x2000), sr1[i]);
 	}
 
 	/* resume engines */
 resume:
-	nv_wr32(dev, 0x003250, 0x00000001);
-	nv_mask(dev, 0x003220, 0x00000001, 0x00000001);
-	nv_wr32(dev, 0x003200, 0x00000001);
-	nv_wr32(dev, 0x002500, 0x00000001);
-	spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
-
+	pfifo->start(pfifo, &flags);
 	kfree(info);
 	return ret;
 }
-
-int
-nv40_pm_pwm_get(struct drm_device *dev, int line, u32 *divs, u32 *duty)
-{
-	if (line == 2) {
-		u32 reg = nv_rd32(dev, 0x0010f0);
-		if (reg & 0x80000000) {
-			*duty = (reg & 0x7fff0000) >> 16;
-			*divs = (reg & 0x00007fff);
-			return 0;
-		}
-	} else
-	if (line == 9) {
-		u32 reg = nv_rd32(dev, 0x0015f4);
-		if (reg & 0x80000000) {
-			*divs = nv_rd32(dev, 0x0015f8);
-			*duty = (reg & 0x7fffffff);
-			return 0;
-		}
-	} else {
-		NV_ERROR(dev, "unknown pwm ctrl for gpio %d\n", line);
-		return -ENODEV;
-	}
-
-	return -EINVAL;
-}
-
-int
-nv40_pm_pwm_set(struct drm_device *dev, int line, u32 divs, u32 duty)
-{
-	if (line == 2) {
-		nv_wr32(dev, 0x0010f0, 0x80000000 | (duty << 16) | divs);
-	} else
-	if (line == 9) {
-		nv_wr32(dev, 0x0015f8, divs);
-		nv_wr32(dev, 0x0015f4, duty | 0x80000000);
-	} else {
-		NV_ERROR(dev, "unknown pwm ctrl for gpio %d\n", line);
-		return -ENODEV;
-	}
-
-	return 0;
-}
diff --git a/drivers/gpu/drm/nouveau/nv50_crtc.c b/drivers/gpu/drm/nouveau/nv50_crtc.c
index 7f3ae75032d6..222de77d6269 100644
--- a/drivers/gpu/drm/nouveau/nv50_crtc.c
+++ b/drivers/gpu/drm/nouveau/nv50_crtc.c
@@ -27,24 +27,27 @@
 #include <drm/drmP.h>
 #include <drm/drm_crtc_helper.h>
 
-#define NOUVEAU_DMA_DEBUG (nouveau_reg_debug & NOUVEAU_REG_DEBUG_EVO)
 #include "nouveau_reg.h"
-#include "nouveau_drv.h"
+#include "nouveau_drm.h"
+#include "nouveau_dma.h"
+#include "nouveau_gem.h"
 #include "nouveau_hw.h"
 #include "nouveau_encoder.h"
 #include "nouveau_crtc.h"
-#include "nouveau_fb.h"
 #include "nouveau_connector.h"
 #include "nv50_display.h"
 
+#include <subdev/clock.h>
+
 static void
 nv50_crtc_lut_load(struct drm_crtc *crtc)
 {
+	struct nouveau_drm *drm = nouveau_drm(crtc->dev);
 	struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
 	void __iomem *lut = nvbo_kmap_obj_iovirtual(nv_crtc->lut.nvbo);
 	int i;
 
-	NV_DEBUG_KMS(crtc->dev, "\n");
+	NV_DEBUG(drm, "\n");
 
 	for (i = 0; i < 256; i++) {
 		writew(nv_crtc->lut.r[i] >> 2, lut + 8*i + 0);
@@ -63,25 +66,25 @@ int
 nv50_crtc_blank(struct nouveau_crtc *nv_crtc, bool blanked)
 {
 	struct drm_device *dev = nv_crtc->base.dev;
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_drm *drm = nouveau_drm(dev);
 	struct nouveau_channel *evo = nv50_display(dev)->master;
 	int index = nv_crtc->index, ret;
 
-	NV_DEBUG_KMS(dev, "index %d\n", nv_crtc->index);
-	NV_DEBUG_KMS(dev, "%s\n", blanked ? "blanked" : "unblanked");
+	NV_DEBUG(drm, "index %d\n", nv_crtc->index);
+	NV_DEBUG(drm, "%s\n", blanked ? "blanked" : "unblanked");
 
 	if (blanked) {
 		nv_crtc->cursor.hide(nv_crtc, false);
 
-		ret = RING_SPACE(evo, dev_priv->chipset != 0x50 ? 7 : 5);
+		ret = RING_SPACE(evo, nv_device(drm->device)->chipset != 0x50 ? 7 : 5);
 		if (ret) {
-			NV_ERROR(dev, "no space while blanking crtc\n");
+			NV_ERROR(drm, "no space while blanking crtc\n");
 			return ret;
 		}
 		BEGIN_NV04(evo, 0, NV50_EVO_CRTC(index, CLUT_MODE), 2);
 		OUT_RING(evo, NV50_EVO_CRTC_CLUT_MODE_BLANK);
 		OUT_RING(evo, 0);
-		if (dev_priv->chipset != 0x50) {
+		if (nv_device(drm->device)->chipset != 0x50) {
 			BEGIN_NV04(evo, 0, NV84_EVO_CRTC(index, CLUT_DMA), 1);
 			OUT_RING(evo, NV84_EVO_CRTC_CLUT_DMA_HANDLE_NONE);
 		}
@@ -94,9 +97,9 @@ nv50_crtc_blank(struct nouveau_crtc *nv_crtc, bool blanked)
 		else
 			nv_crtc->cursor.hide(nv_crtc, false);
 
-		ret = RING_SPACE(evo, dev_priv->chipset != 0x50 ? 10 : 8);
+		ret = RING_SPACE(evo, nv_device(drm->device)->chipset != 0x50 ? 10 : 8);
 		if (ret) {
-			NV_ERROR(dev, "no space while unblanking crtc\n");
+			NV_ERROR(drm, "no space while unblanking crtc\n");
 			return ret;
 		}
 		BEGIN_NV04(evo, 0, NV50_EVO_CRTC(index, CLUT_MODE), 2);
@@ -104,7 +107,7 @@ nv50_crtc_blank(struct nouveau_crtc *nv_crtc, bool blanked)
 				NV50_EVO_CRTC_CLUT_MODE_OFF :
 				NV50_EVO_CRTC_CLUT_MODE_ON);
 		OUT_RING(evo, nv_crtc->lut.nvbo->bo.offset >> 8);
-		if (dev_priv->chipset != 0x50) {
+		if (nv_device(drm->device)->chipset != 0x50) {
 			BEGIN_NV04(evo, 0, NV84_EVO_CRTC(index, CLUT_DMA), 1);
 			OUT_RING(evo, NvEvoVRAM);
 		}
@@ -113,7 +116,7 @@ nv50_crtc_blank(struct nouveau_crtc *nv_crtc, bool blanked)
 		OUT_RING(evo, nv_crtc->fb.offset >> 8);
 		OUT_RING(evo, 0);
 		BEGIN_NV04(evo, 0, NV50_EVO_CRTC(index, FB_DMA), 1);
-		if (dev_priv->chipset != 0x50)
+		if (nv_device(drm->device)->chipset != 0x50)
 			if (nv_crtc->fb.tile_flags == 0x7a00 ||
 			    nv_crtc->fb.tile_flags == 0xfe00)
 				OUT_RING(evo, NvEvoFB32);
@@ -173,17 +176,18 @@ static int
 nv50_crtc_set_color_vibrance(struct nouveau_crtc *nv_crtc, bool update)
 {
 	struct drm_device *dev = nv_crtc->base.dev;
+	struct nouveau_drm *drm = nouveau_drm(dev);
 	struct nouveau_channel *evo = nv50_display(dev)->master;
 	int ret;
 	int adj;
 	u32 hue, vib;
 
-	NV_DEBUG_KMS(dev, "vibrance = %i, hue = %i\n",
+	NV_DEBUG(drm, "vibrance = %i, hue = %i\n",
 		     nv_crtc->color_vibrance, nv_crtc->vibrant_hue);
 
 	ret = RING_SPACE(evo, 2 + (update ? 2 : 0));
 	if (ret) {
-		NV_ERROR(dev, "no space while setting color vibrance\n");
+		NV_ERROR(drm, "no space while setting color vibrance\n");
 		return ret;
 	}
 
@@ -228,17 +232,18 @@ nv50_crtc_set_scale(struct nouveau_crtc *nv_crtc, bool update)
 	struct nouveau_connector *nv_connector;
 	struct drm_crtc *crtc = &nv_crtc->base;
 	struct drm_device *dev = crtc->dev;
+	struct nouveau_drm *drm = nouveau_drm(dev);
 	struct nouveau_channel *evo = nv50_display(dev)->master;
 	struct drm_display_mode *umode = &crtc->mode;
 	struct drm_display_mode *omode;
 	int scaling_mode, ret;
 	u32 ctrl = 0, oX, oY;
 
-	NV_DEBUG_KMS(dev, "\n");
+	NV_DEBUG(drm, "\n");
 
 	nv_connector = nouveau_crtc_connector_get(nv_crtc);
 	if (!nv_connector || !nv_connector->native_mode) {
-		NV_ERROR(dev, "no native mode, forcing panel scaling\n");
+		NV_ERROR(drm, "no native mode, forcing panel scaling\n");
 		scaling_mode = DRM_MODE_SCALE_NONE;
 	} else {
 		scaling_mode = nv_connector->scaling_mode;
@@ -328,63 +333,19 @@ nv50_crtc_set_scale(struct nouveau_crtc *nv_crtc, bool update)
 int
 nv50_crtc_set_clock(struct drm_device *dev, int head, int pclk)
 {
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct pll_lims pll;
-	uint32_t reg1, reg2;
-	int ret, N1, M1, N2, M2, P;
-
-	ret = get_pll_limits(dev, PLL_VPLL0 + head, &pll);
-	if (ret)
-		return ret;
-
-	if (pll.vco2.maxfreq) {
-		ret = nv50_calc_pll(dev, &pll, pclk, &N1, &M1, &N2, &M2, &P);
-		if (ret <= 0)
-			return 0;
-
-		NV_DEBUG(dev, "pclk %d out %d NM1 %d %d NM2 %d %d P %d\n",
-			 pclk, ret, N1, M1, N2, M2, P);
-
-		reg1 = nv_rd32(dev, pll.reg + 4) & 0xff00ff00;
-		reg2 = nv_rd32(dev, pll.reg + 8) & 0x8000ff00;
-		nv_wr32(dev, pll.reg + 0, 0x10000611);
-		nv_wr32(dev, pll.reg + 4, reg1 | (M1 << 16) | N1);
-		nv_wr32(dev, pll.reg + 8, reg2 | (P << 28) | (M2 << 16) | N2);
-	} else
-	if (dev_priv->chipset < NV_C0) {
-		ret = nva3_calc_pll(dev, &pll, pclk, &N1, &N2, &M1, &P);
-		if (ret <= 0)
-			return 0;
-
-		NV_DEBUG(dev, "pclk %d out %d N %d fN 0x%04x M %d P %d\n",
-			 pclk, ret, N1, N2, M1, P);
-
-		reg1 = nv_rd32(dev, pll.reg + 4) & 0xffc00000;
-		nv_wr32(dev, pll.reg + 0, 0x50000610);
-		nv_wr32(dev, pll.reg + 4, reg1 | (P << 16) | (M1 << 8) | N1);
-		nv_wr32(dev, pll.reg + 8, N2);
-	} else {
-		ret = nva3_calc_pll(dev, &pll, pclk, &N1, &N2, &M1, &P);
-		if (ret <= 0)
-			return 0;
-
-		NV_DEBUG(dev, "pclk %d out %d N %d fN 0x%04x M %d P %d\n",
-			 pclk, ret, N1, N2, M1, P);
+	struct nouveau_device *device = nouveau_dev(dev);
+	struct nouveau_clock *clk = nouveau_clock(device);
 
-		nv_mask(dev, pll.reg + 0x0c, 0x00000000, 0x00000100);
-		nv_wr32(dev, pll.reg + 0x04, (P << 16) | (N1 << 8) | M1);
-		nv_wr32(dev, pll.reg + 0x10, N2 << 16);
-	}
-
-	return 0;
+	return clk->pll_set(clk, PLL_VPLL0 + head, pclk);
 }
 
 static void
 nv50_crtc_destroy(struct drm_crtc *crtc)
 {
 	struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
+	struct nouveau_drm *drm = nouveau_drm(crtc->dev);
 
-	NV_DEBUG_KMS(crtc->dev, "\n");
+	NV_DEBUG(drm, "\n");
 
 	nouveau_bo_unmap(nv_crtc->lut.nvbo);
 	nouveau_bo_ref(NULL, &nv_crtc->lut.nvbo);
@@ -473,13 +434,15 @@ nv50_crtc_gamma_set(struct drm_crtc *crtc, u16 *r, u16 *g, u16 *b,
 static void
 nv50_crtc_save(struct drm_crtc *crtc)
 {
-	NV_ERROR(crtc->dev, "!!\n");
+	struct nouveau_drm *drm = nouveau_drm(crtc->dev);
+	NV_ERROR(drm, "!!\n");
 }
 
 static void
 nv50_crtc_restore(struct drm_crtc *crtc)
 {
-	NV_ERROR(crtc->dev, "!!\n");
+	struct nouveau_drm *drm = nouveau_drm(crtc->dev);
+	NV_ERROR(drm, "!!\n");
 }
 
 static const struct drm_crtc_funcs nv50_crtc_funcs = {
@@ -503,8 +466,9 @@ nv50_crtc_prepare(struct drm_crtc *crtc)
 {
 	struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
 	struct drm_device *dev = crtc->dev;
+	struct nouveau_drm *drm = nouveau_drm(dev);
 
-	NV_DEBUG_KMS(dev, "index %d\n", nv_crtc->index);
+	NV_DEBUG(drm, "index %d\n", nv_crtc->index);
 
 	nv50_display_flip_stop(crtc);
 	drm_vblank_pre_modeset(dev, nv_crtc->index);
@@ -515,9 +479,10 @@ static void
 nv50_crtc_commit(struct drm_crtc *crtc)
 {
 	struct drm_device *dev = crtc->dev;
+	struct nouveau_drm *drm = nouveau_drm(dev);
 	struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
 
-	NV_DEBUG_KMS(dev, "index %d\n", nv_crtc->index);
+	NV_DEBUG(drm, "index %d\n", nv_crtc->index);
 
 	nv50_crtc_blank(nv_crtc, false);
 	drm_vblank_post_modeset(dev, nv_crtc->index);
@@ -539,17 +504,17 @@ nv50_crtc_do_mode_set_base(struct drm_crtc *crtc,
 {
 	struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
 	struct drm_device *dev = nv_crtc->base.dev;
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_drm *drm = nouveau_drm(dev);
 	struct nouveau_channel *evo = nv50_display(dev)->master;
 	struct drm_framebuffer *drm_fb;
 	struct nouveau_framebuffer *fb;
 	int ret;
 
-	NV_DEBUG_KMS(dev, "index %d\n", nv_crtc->index);
+	NV_DEBUG(drm, "index %d\n", nv_crtc->index);
 
 	/* no fb bound */
 	if (!atomic && !crtc->fb) {
-		NV_DEBUG_KMS(dev, "No FB bound\n");
+		NV_DEBUG(drm, "No FB bound\n");
 		return 0;
 	}
 
@@ -579,7 +544,7 @@ nv50_crtc_do_mode_set_base(struct drm_crtc *crtc,
 	nv_crtc->fb.offset = fb->nvbo->bo.offset;
 	nv_crtc->fb.tile_flags = nouveau_bo_tile_layout(fb->nvbo);
 	nv_crtc->fb.cpp = drm_fb->bits_per_pixel / 8;
-	if (!nv_crtc->fb.blanked && dev_priv->chipset != 0x50) {
+	if (!nv_crtc->fb.blanked && nv_device(drm->device)->chipset != 0x50) {
 		ret = RING_SPACE(evo, 2);
 		if (ret)
 			return ret;
@@ -737,10 +702,11 @@ static const struct drm_crtc_helper_funcs nv50_crtc_helper_funcs = {
 int
 nv50_crtc_create(struct drm_device *dev, int index)
 {
+	struct nouveau_drm *drm = nouveau_drm(dev);
 	struct nouveau_crtc *nv_crtc = NULL;
 	int ret, i;
 
-	NV_DEBUG_KMS(dev, "\n");
+	NV_DEBUG(drm, "\n");
 
 	nv_crtc = kzalloc(sizeof(*nv_crtc), GFP_KERNEL);
 	if (!nv_crtc)
diff --git a/drivers/gpu/drm/nouveau/nv50_cursor.c b/drivers/gpu/drm/nouveau/nv50_cursor.c
index b290b7b1f65d..223da113ceee 100644
--- a/drivers/gpu/drm/nouveau/nv50_cursor.c
+++ b/drivers/gpu/drm/nouveau/nv50_cursor.c
@@ -26,9 +26,8 @@
 
 #include <drm/drmP.h>
 
-#define NOUVEAU_DMA_DEBUG (nouveau_reg_debug & NOUVEAU_REG_DEBUG_EVO)
-#include "nouveau_reg.h"
-#include "nouveau_drv.h"
+#include "nouveau_drm.h"
+#include "nouveau_dma.h"
 #include "nouveau_crtc.h"
 #include "nv50_display.h"
 
@@ -36,22 +35,22 @@ static void
 nv50_cursor_show(struct nouveau_crtc *nv_crtc, bool update)
 {
 	struct drm_device *dev = nv_crtc->base.dev;
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_drm *drm = nouveau_drm(dev);
 	struct nouveau_channel *evo = nv50_display(dev)->master;
 	int ret;
 
-	NV_DEBUG_KMS(dev, "\n");
+	NV_DEBUG(drm, "\n");
 
 	if (update && nv_crtc->cursor.visible)
 		return;
 
-	ret = RING_SPACE(evo, (dev_priv->chipset != 0x50 ? 5 : 3) + update * 2);
+	ret = RING_SPACE(evo, (nv_device(drm->device)->chipset != 0x50 ? 5 : 3) + update * 2);
 	if (ret) {
-		NV_ERROR(dev, "no space while unhiding cursor\n");
+		NV_ERROR(drm, "no space while unhiding cursor\n");
 		return;
 	}
 
-	if (dev_priv->chipset != 0x50) {
+	if (nv_device(drm->device)->chipset != 0x50) {
 		BEGIN_NV04(evo, 0, NV84_EVO_CRTC(nv_crtc->index, CURSOR_DMA), 1);
 		OUT_RING(evo, NvEvoVRAM);
 	}
@@ -71,24 +70,24 @@ static void
 nv50_cursor_hide(struct nouveau_crtc *nv_crtc, bool update)
 {
 	struct drm_device *dev = nv_crtc->base.dev;
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_drm *drm = nouveau_drm(dev);
 	struct nouveau_channel *evo = nv50_display(dev)->master;
 	int ret;
 
-	NV_DEBUG_KMS(dev, "\n");
+	NV_DEBUG(drm, "\n");
 
 	if (update && !nv_crtc->cursor.visible)
 		return;
 
-	ret = RING_SPACE(evo, (dev_priv->chipset != 0x50 ? 5 : 3) + update * 2);
+	ret = RING_SPACE(evo, (nv_device(drm->device)->chipset != 0x50 ? 5 : 3) + update * 2);
 	if (ret) {
-		NV_ERROR(dev, "no space while hiding cursor\n");
+		NV_ERROR(drm, "no space while hiding cursor\n");
 		return;
 	}
 	BEGIN_NV04(evo, 0, NV50_EVO_CRTC(nv_crtc->index, CURSOR_CTRL), 2);
 	OUT_RING(evo, NV50_EVO_CRTC_CURSOR_CTRL_HIDE);
 	OUT_RING(evo, 0);
-	if (dev_priv->chipset != 0x50) {
+	if (nv_device(drm->device)->chipset != 0x50) {
 		BEGIN_NV04(evo, 0, NV84_EVO_CRTC(nv_crtc->index, CURSOR_DMA), 1);
 		OUT_RING(evo, NV84_EVO_CRTC_CURSOR_DMA_HANDLE_NONE);
 	}
@@ -104,19 +103,18 @@ nv50_cursor_hide(struct nouveau_crtc *nv_crtc, bool update)
 static void
 nv50_cursor_set_pos(struct nouveau_crtc *nv_crtc, int x, int y)
 {
-	struct drm_device *dev = nv_crtc->base.dev;
+	struct nouveau_device *device = nouveau_dev(nv_crtc->base.dev);
 
 	nv_crtc->cursor_saved_x = x; nv_crtc->cursor_saved_y = y;
-	nv_wr32(dev, NV50_PDISPLAY_CURSOR_USER_POS(nv_crtc->index),
+	nv_wr32(device, NV50_PDISPLAY_CURSOR_USER_POS(nv_crtc->index),
 		((y & 0xFFFF) << 16) | (x & 0xFFFF));
 	/* Needed to make the cursor move. */
-	nv_wr32(dev, NV50_PDISPLAY_CURSOR_USER_POS_CTRL(nv_crtc->index), 0);
+	nv_wr32(device, NV50_PDISPLAY_CURSOR_USER_POS_CTRL(nv_crtc->index), 0);
 }
 
 static void
 nv50_cursor_set_offset(struct nouveau_crtc *nv_crtc, uint32_t offset)
 {
-	NV_DEBUG_KMS(nv_crtc->base.dev, "\n");
 	if (offset == nv_crtc->cursor.offset)
 		return;
 
diff --git a/drivers/gpu/drm/nouveau/nv50_dac.c b/drivers/gpu/drm/nouveau/nv50_dac.c
index 2bff2e588d87..6a30a1748573 100644
--- a/drivers/gpu/drm/nouveau/nv50_dac.c
+++ b/drivers/gpu/drm/nouveau/nv50_dac.c
@@ -29,18 +29,21 @@
 
 #define NOUVEAU_DMA_DEBUG (nouveau_reg_debug & NOUVEAU_REG_DEBUG_EVO)
 #include "nouveau_reg.h"
-#include "nouveau_drv.h"
+#include "nouveau_drm.h"
 #include "nouveau_dma.h"
 #include "nouveau_encoder.h"
 #include "nouveau_connector.h"
 #include "nouveau_crtc.h"
 #include "nv50_display.h"
 
+#include <subdev/timer.h>
+
 static void
 nv50_dac_disconnect(struct drm_encoder *encoder)
 {
 	struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
 	struct drm_device *dev = encoder->dev;
+	struct nouveau_drm *drm = nouveau_drm(dev);
 	struct nouveau_channel *evo = nv50_display(dev)->master;
 	int ret;
 
@@ -48,11 +51,11 @@ nv50_dac_disconnect(struct drm_encoder *encoder)
 		return;
 	nv50_crtc_blank(nouveau_crtc(nv_encoder->crtc), true);
 
-	NV_DEBUG_KMS(dev, "Disconnecting DAC %d\n", nv_encoder->or);
+	NV_DEBUG(drm, "Disconnecting DAC %d\n", nv_encoder->or);
 
 	ret = RING_SPACE(evo, 4);
 	if (ret) {
-		NV_ERROR(dev, "no space while disconnecting DAC\n");
+		NV_ERROR(drm, "no space while disconnecting DAC\n");
 		return;
 	}
 	BEGIN_NV04(evo, 0, NV50_EVO_DAC(nv_encoder->or, MODE_CTRL), 1);
@@ -67,43 +70,43 @@ static enum drm_connector_status
 nv50_dac_detect(struct drm_encoder *encoder, struct drm_connector *connector)
 {
 	struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
-	struct drm_device *dev = encoder->dev;
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_device *device = nouveau_dev(encoder->dev);
+	struct nouveau_drm *drm = nouveau_drm(encoder->dev);
 	enum drm_connector_status status = connector_status_disconnected;
 	uint32_t dpms_state, load_pattern, load_state;
 	int or = nv_encoder->or;
 
-	nv_wr32(dev, NV50_PDISPLAY_DAC_CLK_CTRL1(or), 0x00000001);
-	dpms_state = nv_rd32(dev, NV50_PDISPLAY_DAC_DPMS_CTRL(or));
+	nv_wr32(device, NV50_PDISPLAY_DAC_CLK_CTRL1(or), 0x00000001);
+	dpms_state = nv_rd32(device, NV50_PDISPLAY_DAC_DPMS_CTRL(or));
 
-	nv_wr32(dev, NV50_PDISPLAY_DAC_DPMS_CTRL(or),
+	nv_wr32(device, NV50_PDISPLAY_DAC_DPMS_CTRL(or),
 		0x00150000 | NV50_PDISPLAY_DAC_DPMS_CTRL_PENDING);
-	if (!nv_wait(dev, NV50_PDISPLAY_DAC_DPMS_CTRL(or),
+	if (!nv_wait(device, NV50_PDISPLAY_DAC_DPMS_CTRL(or),
 		     NV50_PDISPLAY_DAC_DPMS_CTRL_PENDING, 0)) {
-		NV_ERROR(dev, "timeout: DAC_DPMS_CTRL_PENDING(%d) == 0\n", or);
-		NV_ERROR(dev, "DAC_DPMS_CTRL(%d) = 0x%08x\n", or,
-			  nv_rd32(dev, NV50_PDISPLAY_DAC_DPMS_CTRL(or)));
+		NV_ERROR(drm, "timeout: DAC_DPMS_CTRL_PENDING(%d) == 0\n", or);
+		NV_ERROR(drm, "DAC_DPMS_CTRL(%d) = 0x%08x\n", or,
+			  nv_rd32(device, NV50_PDISPLAY_DAC_DPMS_CTRL(or)));
 		return status;
 	}
 
 	/* Use bios provided value if possible. */
-	if (dev_priv->vbios.dactestval) {
-		load_pattern = dev_priv->vbios.dactestval;
-		NV_DEBUG_KMS(dev, "Using bios provided load_pattern of %d\n",
+	if (drm->vbios.dactestval) {
+		load_pattern = drm->vbios.dactestval;
+		NV_DEBUG(drm, "Using bios provided load_pattern of %d\n",
 			  load_pattern);
 	} else {
 		load_pattern = 340;
-		NV_DEBUG_KMS(dev, "Using default load_pattern of %d\n",
+		NV_DEBUG(drm, "Using default load_pattern of %d\n",
 			 load_pattern);
 	}
 
-	nv_wr32(dev, NV50_PDISPLAY_DAC_LOAD_CTRL(or),
+	nv_wr32(device, NV50_PDISPLAY_DAC_LOAD_CTRL(or),
 		NV50_PDISPLAY_DAC_LOAD_CTRL_ACTIVE | load_pattern);
 	mdelay(45); /* give it some time to process */
-	load_state = nv_rd32(dev, NV50_PDISPLAY_DAC_LOAD_CTRL(or));
+	load_state = nv_rd32(device, NV50_PDISPLAY_DAC_LOAD_CTRL(or));
 
-	nv_wr32(dev, NV50_PDISPLAY_DAC_LOAD_CTRL(or), 0);
-	nv_wr32(dev, NV50_PDISPLAY_DAC_DPMS_CTRL(or), dpms_state |
+	nv_wr32(device, NV50_PDISPLAY_DAC_LOAD_CTRL(or), 0);
+	nv_wr32(device, NV50_PDISPLAY_DAC_DPMS_CTRL(or), dpms_state |
 		NV50_PDISPLAY_DAC_DPMS_CTRL_PENDING);
 
 	if ((load_state & NV50_PDISPLAY_DAC_LOAD_CTRL_PRESENT) ==
@@ -111,9 +114,9 @@ nv50_dac_detect(struct drm_encoder *encoder, struct drm_connector *connector)
 		status = connector_status_connected;
 
 	if (status == connector_status_connected)
-		NV_DEBUG_KMS(dev, "Load was detected on output with or %d\n", or);
+		NV_DEBUG(drm, "Load was detected on output with or %d\n", or);
 	else
-		NV_DEBUG_KMS(dev, "Load was not detected on output with or %d\n", or);
+		NV_DEBUG(drm, "Load was not detected on output with or %d\n", or);
 
 	return status;
 }
@@ -121,23 +124,24 @@ nv50_dac_detect(struct drm_encoder *encoder, struct drm_connector *connector)
 static void
 nv50_dac_dpms(struct drm_encoder *encoder, int mode)
 {
-	struct drm_device *dev = encoder->dev;
+	struct nouveau_device *device = nouveau_dev(encoder->dev);
+	struct nouveau_drm *drm = nouveau_drm(encoder->dev);
 	struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
 	uint32_t val;
 	int or = nv_encoder->or;
 
-	NV_DEBUG_KMS(dev, "or %d mode %d\n", or, mode);
+	NV_DEBUG(drm, "or %d mode %d\n", or, mode);
 
 	/* wait for it to be done */
-	if (!nv_wait(dev, NV50_PDISPLAY_DAC_DPMS_CTRL(or),
+	if (!nv_wait(device, NV50_PDISPLAY_DAC_DPMS_CTRL(or),
 		     NV50_PDISPLAY_DAC_DPMS_CTRL_PENDING, 0)) {
-		NV_ERROR(dev, "timeout: DAC_DPMS_CTRL_PENDING(%d) == 0\n", or);
-		NV_ERROR(dev, "DAC_DPMS_CTRL(%d) = 0x%08x\n", or,
-			 nv_rd32(dev, NV50_PDISPLAY_DAC_DPMS_CTRL(or)));
+		NV_ERROR(drm, "timeout: DAC_DPMS_CTRL_PENDING(%d) == 0\n", or);
+		NV_ERROR(drm, "DAC_DPMS_CTRL(%d) = 0x%08x\n", or,
+			 nv_rd32(device, NV50_PDISPLAY_DAC_DPMS_CTRL(or)));
 		return;
 	}
 
-	val = nv_rd32(dev, NV50_PDISPLAY_DAC_DPMS_CTRL(or)) & ~0x7F;
+	val = nv_rd32(device, NV50_PDISPLAY_DAC_DPMS_CTRL(or)) & ~0x7F;
 
 	if (mode != DRM_MODE_DPMS_ON)
 		val |= NV50_PDISPLAY_DAC_DPMS_CTRL_BLANKED;
@@ -158,20 +162,22 @@ nv50_dac_dpms(struct drm_encoder *encoder, int mode)
 		break;
 	}
 
-	nv_wr32(dev, NV50_PDISPLAY_DAC_DPMS_CTRL(or), val |
+	nv_wr32(device, NV50_PDISPLAY_DAC_DPMS_CTRL(or), val |
 		NV50_PDISPLAY_DAC_DPMS_CTRL_PENDING);
 }
 
 static void
 nv50_dac_save(struct drm_encoder *encoder)
 {
-	NV_ERROR(encoder->dev, "!!\n");
+	struct nouveau_drm *drm = nouveau_drm(encoder->dev);
+	NV_ERROR(drm, "!!\n");
 }
 
 static void
 nv50_dac_restore(struct drm_encoder *encoder)
 {
-	NV_ERROR(encoder->dev, "!!\n");
+	struct nouveau_drm *drm = nouveau_drm(encoder->dev);
+	NV_ERROR(drm, "!!\n");
 }
 
 static bool
@@ -179,14 +185,15 @@ nv50_dac_mode_fixup(struct drm_encoder *encoder,
 		    const struct drm_display_mode *mode,
 		    struct drm_display_mode *adjusted_mode)
 {
+	struct nouveau_drm *drm = nouveau_drm(encoder->dev);
 	struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
 	struct nouveau_connector *connector;
 
-	NV_DEBUG_KMS(encoder->dev, "or %d\n", nv_encoder->or);
+	NV_DEBUG(drm, "or %d\n", nv_encoder->or);
 
 	connector = nouveau_encoder_connector_get(nv_encoder);
 	if (!connector) {
-		NV_ERROR(encoder->dev, "Encoder has no connector\n");
+		NV_ERROR(drm, "Encoder has no connector\n");
 		return false;
 	}
 
@@ -207,13 +214,14 @@ nv50_dac_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
 		  struct drm_display_mode *adjusted_mode)
 {
 	struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+	struct nouveau_drm *drm = nouveau_drm(encoder->dev);
 	struct drm_device *dev = encoder->dev;
 	struct nouveau_channel *evo = nv50_display(dev)->master;
 	struct nouveau_crtc *crtc = nouveau_crtc(encoder->crtc);
 	uint32_t mode_ctl = 0, mode_ctl2 = 0;
 	int ret;
 
-	NV_DEBUG_KMS(dev, "or %d type %d crtc %d\n",
+	NV_DEBUG(drm, "or %d type %d crtc %d\n",
 		     nv_encoder->or, nv_encoder->dcb->type, crtc->index);
 
 	nv50_dac_dpms(encoder, DRM_MODE_DPMS_ON);
@@ -224,10 +232,10 @@ nv50_dac_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
 		mode_ctl |= NV50_EVO_DAC_MODE_CTRL_CRTC0;
 
 	/* Lacking a working tv-out, this is not a 100% sure. */
-	if (nv_encoder->dcb->type == OUTPUT_ANALOG)
+	if (nv_encoder->dcb->type == DCB_OUTPUT_ANALOG)
 		mode_ctl |= 0x40;
 	else
-	if (nv_encoder->dcb->type == OUTPUT_TV)
+	if (nv_encoder->dcb->type == DCB_OUTPUT_TV)
 		mode_ctl |= 0x100;
 
 	if (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC)
@@ -238,7 +246,7 @@ nv50_dac_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
 
 	ret = RING_SPACE(evo, 3);
 	if (ret) {
-		NV_ERROR(dev, "no space while connecting DAC\n");
+		NV_ERROR(drm, "no space while connecting DAC\n");
 		return;
 	}
 	BEGIN_NV04(evo, 0, NV50_EVO_DAC(nv_encoder->or, MODE_CTRL), 2);
@@ -271,11 +279,12 @@ static void
 nv50_dac_destroy(struct drm_encoder *encoder)
 {
 	struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+	struct nouveau_drm *drm = nouveau_drm(encoder->dev);
 
 	if (!encoder)
 		return;
 
-	NV_DEBUG_KMS(encoder->dev, "\n");
+	NV_DEBUG(drm, "\n");
 
 	drm_encoder_cleanup(encoder);
 	kfree(nv_encoder);
@@ -286,7 +295,7 @@ static const struct drm_encoder_funcs nv50_dac_encoder_funcs = {
 };
 
 int
-nv50_dac_create(struct drm_connector *connector, struct dcb_entry *entry)
+nv50_dac_create(struct drm_connector *connector, struct dcb_output *entry)
 {
 	struct nouveau_encoder *nv_encoder;
 	struct drm_encoder *encoder;
diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c
index f868a13e5c2d..f97b42cbb6bb 100644
--- a/drivers/gpu/drm/nouveau/nv50_display.c
+++ b/drivers/gpu/drm/nouveau/nv50_display.c
@@ -24,28 +24,30 @@
  *
  */
 
-#define NOUVEAU_DMA_DEBUG (nouveau_reg_debug & NOUVEAU_REG_DEBUG_EVO)
+#include "nouveau_drm.h"
+#include "nouveau_dma.h"
+
 #include "nv50_display.h"
 #include "nouveau_crtc.h"
 #include "nouveau_encoder.h"
 #include "nouveau_connector.h"
-#include "nouveau_fb.h"
 #include "nouveau_fbcon.h"
-#include "nouveau_ramht.h"
-#include "nouveau_software.h"
 #include <drm/drm_crtc_helper.h>
+#include "nouveau_fence.h"
+
+#include <core/gpuobj.h>
+#include <subdev/timer.h>
 
-static void nv50_display_isr(struct drm_device *);
 static void nv50_display_bh(unsigned long);
 
 static inline int
 nv50_sor_nr(struct drm_device *dev)
 {
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_device *device = nouveau_dev(dev);
 
-	if (dev_priv->chipset  < 0x90 ||
-	    dev_priv->chipset == 0x92 ||
-	    dev_priv->chipset == 0xa0)
+	if (device->chipset  < 0x90 ||
+	    device->chipset == 0x92 ||
+	    device->chipset == 0xa0)
 		return 2;
 
 	return 4;
@@ -54,73 +56,29 @@ nv50_sor_nr(struct drm_device *dev)
 u32
 nv50_display_active_crtcs(struct drm_device *dev)
 {
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_device *device = nouveau_dev(dev);
 	u32 mask = 0;
 	int i;
 
-	if (dev_priv->chipset  < 0x90 ||
-	    dev_priv->chipset == 0x92 ||
-	    dev_priv->chipset == 0xa0) {
+	if (device->chipset  < 0x90 ||
+	    device->chipset == 0x92 ||
+	    device->chipset == 0xa0) {
 		for (i = 0; i < 2; i++)
-			mask |= nv_rd32(dev, NV50_PDISPLAY_SOR_MODE_CTRL_C(i));
+			mask |= nv_rd32(device, NV50_PDISPLAY_SOR_MODE_CTRL_C(i));
 	} else {
 		for (i = 0; i < 4; i++)
-			mask |= nv_rd32(dev, NV90_PDISPLAY_SOR_MODE_CTRL_C(i));
+			mask |= nv_rd32(device, NV90_PDISPLAY_SOR_MODE_CTRL_C(i));
 	}
 
 	for (i = 0; i < 3; i++)
-		mask |= nv_rd32(dev, NV50_PDISPLAY_DAC_MODE_CTRL_C(i));
+		mask |= nv_rd32(device, NV50_PDISPLAY_DAC_MODE_CTRL_C(i));
 
 	return mask & 3;
 }
 
-static int
-evo_icmd(struct drm_device *dev, int ch, u32 mthd, u32 data)
-{
-	int ret = 0;
-	nv_mask(dev, 0x610300 + (ch * 0x08), 0x00000001, 0x00000001);
-	nv_wr32(dev, 0x610304 + (ch * 0x08), data);
-	nv_wr32(dev, 0x610300 + (ch * 0x08), 0x80000001 | mthd);
-	if (!nv_wait(dev, 0x610300 + (ch * 0x08), 0x80000000, 0x00000000))
-		ret = -EBUSY;
-	if (ret || (nouveau_reg_debug & NOUVEAU_REG_DEBUG_EVO))
-		NV_INFO(dev, "EvoPIO: %d 0x%04x 0x%08x\n", ch, mthd, data);
-	nv_mask(dev, 0x610300 + (ch * 0x08), 0x00000001, 0x00000000);
-	return ret;
-}
-
 int
 nv50_display_early_init(struct drm_device *dev)
 {
-	u32 ctrl = nv_rd32(dev, 0x610200);
-	int i;
-
-	/* check if master evo channel is already active, a good a sign as any
-	 * that the display engine is in a weird state (hibernate/kexec), if
-	 * it is, do our best to reset the display engine...
-	 */
-	if ((ctrl & 0x00000003) == 0x00000003) {
-		NV_INFO(dev, "PDISP: EVO(0) 0x%08x, resetting...\n", ctrl);
-
-		/* deactivate both heads first, PDISP will disappear forever
-		 * (well, until you power cycle) on some boards as soon as
-		 * PMC_ENABLE is hit unless they are..
-		 */
-		for (i = 0; i < 2; i++) {
-			evo_icmd(dev, 0, 0x0880 + (i * 0x400), 0x05000000);
-			evo_icmd(dev, 0, 0x089c + (i * 0x400), 0);
-			evo_icmd(dev, 0, 0x0840 + (i * 0x400), 0);
-			evo_icmd(dev, 0, 0x0844 + (i * 0x400), 0);
-			evo_icmd(dev, 0, 0x085c + (i * 0x400), 0);
-			evo_icmd(dev, 0, 0x0874 + (i * 0x400), 0);
-		}
-		evo_icmd(dev, 0, 0x0080, 0);
-
-		/* reset PDISP */
-		nv_mask(dev, 0x000200, 0x40000000, 0x00000000);
-		nv_mask(dev, 0x000200, 0x40000000, 0x40000000);
-	}
-
 	return 0;
 }
 
@@ -132,11 +90,8 @@ nv50_display_late_takedown(struct drm_device *dev)
 int
 nv50_display_sync(struct drm_device *dev)
 {
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct nouveau_timer_engine *ptimer = &dev_priv->engine.timer;
 	struct nv50_display *disp = nv50_display(dev);
 	struct nouveau_channel *evo = disp->master;
-	u64 start;
 	int ret;
 
 	ret = RING_SPACE(evo, 6);
@@ -148,29 +103,28 @@ nv50_display_sync(struct drm_device *dev)
 		BEGIN_NV04(evo, 0, 0x0084, 1);
 		OUT_RING  (evo, 0x00000000);
 
-		nv_wo32(disp->ntfy, 0x000, 0x00000000);
+		nv_wo32(disp->ramin, 0x2000, 0x00000000);
 		FIRE_RING (evo);
 
-		start = ptimer->read(dev);
-		do {
-			if (nv_ro32(disp->ntfy, 0x000))
-				return 0;
-		} while (ptimer->read(dev) - start < 2000000000ULL);
+		if (nv_wait_ne(disp->ramin, 0x2000, 0xffffffff, 0x00000000))
+			return 0;
 	}
 
-	return -EBUSY;
+	return 0;
 }
 
 int
 nv50_display_init(struct drm_device *dev)
 {
+	struct nouveau_drm *drm = nouveau_drm(dev);
+	struct nouveau_device *device = nouveau_dev(dev);
 	struct nouveau_channel *evo;
 	int ret, i;
 	u32 val;
 
-	NV_DEBUG_KMS(dev, "\n");
+	NV_DEBUG(drm, "\n");
 
-	nv_wr32(dev, 0x00610184, nv_rd32(dev, 0x00614004));
+	nv_wr32(device, 0x00610184, nv_rd32(device, 0x00614004));
 
 	/*
 	 * I think the 0x006101XX range is some kind of main control area
@@ -178,82 +132,82 @@ nv50_display_init(struct drm_device *dev)
 	 */
 	/* CRTC? */
 	for (i = 0; i < 2; i++) {
-		val = nv_rd32(dev, 0x00616100 + (i * 0x800));
-		nv_wr32(dev, 0x00610190 + (i * 0x10), val);
-		val = nv_rd32(dev, 0x00616104 + (i * 0x800));
-		nv_wr32(dev, 0x00610194 + (i * 0x10), val);
-		val = nv_rd32(dev, 0x00616108 + (i * 0x800));
-		nv_wr32(dev, 0x00610198 + (i * 0x10), val);
-		val = nv_rd32(dev, 0x0061610c + (i * 0x800));
-		nv_wr32(dev, 0x0061019c + (i * 0x10), val);
+		val = nv_rd32(device, 0x00616100 + (i * 0x800));
+		nv_wr32(device, 0x00610190 + (i * 0x10), val);
+		val = nv_rd32(device, 0x00616104 + (i * 0x800));
+		nv_wr32(device, 0x00610194 + (i * 0x10), val);
+		val = nv_rd32(device, 0x00616108 + (i * 0x800));
+		nv_wr32(device, 0x00610198 + (i * 0x10), val);
+		val = nv_rd32(device, 0x0061610c + (i * 0x800));
+		nv_wr32(device, 0x0061019c + (i * 0x10), val);
 	}
 
 	/* DAC */
 	for (i = 0; i < 3; i++) {
-		val = nv_rd32(dev, 0x0061a000 + (i * 0x800));
-		nv_wr32(dev, 0x006101d0 + (i * 0x04), val);
+		val = nv_rd32(device, 0x0061a000 + (i * 0x800));
+		nv_wr32(device, 0x006101d0 + (i * 0x04), val);
 	}
 
 	/* SOR */
 	for (i = 0; i < nv50_sor_nr(dev); i++) {
-		val = nv_rd32(dev, 0x0061c000 + (i * 0x800));
-		nv_wr32(dev, 0x006101e0 + (i * 0x04), val);
+		val = nv_rd32(device, 0x0061c000 + (i * 0x800));
+		nv_wr32(device, 0x006101e0 + (i * 0x04), val);
 	}
 
 	/* EXT */
 	for (i = 0; i < 3; i++) {
-		val = nv_rd32(dev, 0x0061e000 + (i * 0x800));
-		nv_wr32(dev, 0x006101f0 + (i * 0x04), val);
+		val = nv_rd32(device, 0x0061e000 + (i * 0x800));
+		nv_wr32(device, 0x006101f0 + (i * 0x04), val);
 	}
 
 	for (i = 0; i < 3; i++) {
-		nv_wr32(dev, NV50_PDISPLAY_DAC_DPMS_CTRL(i), 0x00550000 |
+		nv_wr32(device, NV50_PDISPLAY_DAC_DPMS_CTRL(i), 0x00550000 |
 			NV50_PDISPLAY_DAC_DPMS_CTRL_PENDING);
-		nv_wr32(dev, NV50_PDISPLAY_DAC_CLK_CTRL1(i), 0x00000001);
+		nv_wr32(device, NV50_PDISPLAY_DAC_CLK_CTRL1(i), 0x00000001);
 	}
 
 	/* The precise purpose is unknown, i suspect it has something to do
 	 * with text mode.
 	 */
-	if (nv_rd32(dev, NV50_PDISPLAY_INTR_1) & 0x100) {
-		nv_wr32(dev, NV50_PDISPLAY_INTR_1, 0x100);
-		nv_wr32(dev, 0x006194e8, nv_rd32(dev, 0x006194e8) & ~1);
-		if (!nv_wait(dev, 0x006194e8, 2, 0)) {
-			NV_ERROR(dev, "timeout: (0x6194e8 & 2) != 0\n");
-			NV_ERROR(dev, "0x6194e8 = 0x%08x\n",
-						nv_rd32(dev, 0x6194e8));
+	if (nv_rd32(device, NV50_PDISPLAY_INTR_1) & 0x100) {
+		nv_wr32(device, NV50_PDISPLAY_INTR_1, 0x100);
+		nv_wr32(device, 0x006194e8, nv_rd32(device, 0x006194e8) & ~1);
+		if (!nv_wait(device, 0x006194e8, 2, 0)) {
+			NV_ERROR(drm, "timeout: (0x6194e8 & 2) != 0\n");
+			NV_ERROR(drm, "0x6194e8 = 0x%08x\n",
+						nv_rd32(device, 0x6194e8));
 			return -EBUSY;
 		}
 	}
 
 	for (i = 0; i < 2; i++) {
-		nv_wr32(dev, NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(i), 0x2000);
-		if (!nv_wait(dev, NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(i),
+		nv_wr32(device, NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(i), 0x2000);
+		if (!nv_wait(device, NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(i),
 			     NV50_PDISPLAY_CURSOR_CURSOR_CTRL2_STATUS, 0)) {
-			NV_ERROR(dev, "timeout: CURSOR_CTRL2_STATUS == 0\n");
-			NV_ERROR(dev, "CURSOR_CTRL2 = 0x%08x\n",
-				 nv_rd32(dev, NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(i)));
+			NV_ERROR(drm, "timeout: CURSOR_CTRL2_STATUS == 0\n");
+			NV_ERROR(drm, "CURSOR_CTRL2 = 0x%08x\n",
+				 nv_rd32(device, NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(i)));
 			return -EBUSY;
 		}
 
-		nv_wr32(dev, NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(i),
+		nv_wr32(device, NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(i),
 			NV50_PDISPLAY_CURSOR_CURSOR_CTRL2_ON);
-		if (!nv_wait(dev, NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(i),
+		if (!nv_wait(device, NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(i),
 			     NV50_PDISPLAY_CURSOR_CURSOR_CTRL2_STATUS,
 			     NV50_PDISPLAY_CURSOR_CURSOR_CTRL2_STATUS_ACTIVE)) {
-			NV_ERROR(dev, "timeout: "
+			NV_ERROR(drm, "timeout: "
 				      "CURSOR_CTRL2_STATUS_ACTIVE(%d)\n", i);
-			NV_ERROR(dev, "CURSOR_CTRL2(%d) = 0x%08x\n", i,
-				 nv_rd32(dev, NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(i)));
+			NV_ERROR(drm, "CURSOR_CTRL2(%d) = 0x%08x\n", i,
+				 nv_rd32(device, NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(i)));
 			return -EBUSY;
 		}
 	}
 
-	nv_wr32(dev, NV50_PDISPLAY_PIO_CTRL, 0x00000000);
-	nv_mask(dev, NV50_PDISPLAY_INTR_0, 0x00000000, 0x00000000);
-	nv_wr32(dev, NV50_PDISPLAY_INTR_EN_0, 0x00000000);
-	nv_mask(dev, NV50_PDISPLAY_INTR_1, 0x00000000, 0x00000000);
-	nv_wr32(dev, NV50_PDISPLAY_INTR_EN_1,
+	nv_wr32(device, NV50_PDISPLAY_PIO_CTRL, 0x00000000);
+	nv_mask(device, NV50_PDISPLAY_INTR_0, 0x00000000, 0x00000000);
+	nv_wr32(device, NV50_PDISPLAY_INTR_EN_0, 0x00000000);
+	nv_mask(device, NV50_PDISPLAY_INTR_1, 0x00000000, 0x00000000);
+	nv_wr32(device, NV50_PDISPLAY_INTR_EN_1,
 		     NV50_PDISPLAY_INTR_EN_1_CLK_UNK10 |
 		     NV50_PDISPLAY_INTR_EN_1_CLK_UNK20 |
 		     NV50_PDISPLAY_INTR_EN_1_CLK_UNK40);
@@ -263,7 +217,7 @@ nv50_display_init(struct drm_device *dev)
 		return ret;
 	evo = nv50_display(dev)->master;
 
-	nv_wr32(dev, NV50_PDISPLAY_OBJECTS, (evo->ramin->vinst >> 8) | 9);
+	nv_wr32(device, NV50_PDISPLAY_OBJECTS, (nv50_display(dev)->ramin->addr >> 8) | 9);
 
 	ret = RING_SPACE(evo, 3);
 	if (ret)
@@ -278,12 +232,14 @@ nv50_display_init(struct drm_device *dev)
 void
 nv50_display_fini(struct drm_device *dev)
 {
+	struct nouveau_drm *drm = nouveau_drm(dev);
+	struct nouveau_device *device = nouveau_dev(dev);
 	struct nv50_display *disp = nv50_display(dev);
 	struct nouveau_channel *evo = disp->master;
 	struct drm_crtc *drm_crtc;
 	int ret, i;
 
-	NV_DEBUG_KMS(dev, "\n");
+	NV_DEBUG(drm, "\n");
 
 	list_for_each_entry(drm_crtc, &dev->mode_config.crtc_list, head) {
 		struct nouveau_crtc *crtc = nouveau_crtc(drm_crtc);
@@ -308,55 +264,59 @@ nv50_display_fini(struct drm_device *dev)
 		if (!crtc->base.enabled)
 			continue;
 
-		nv_wr32(dev, NV50_PDISPLAY_INTR_1, mask);
-		if (!nv_wait(dev, NV50_PDISPLAY_INTR_1, mask, mask)) {
-			NV_ERROR(dev, "timeout: (0x610024 & 0x%08x) == "
+		nv_wr32(device, NV50_PDISPLAY_INTR_1, mask);
+		if (!nv_wait(device, NV50_PDISPLAY_INTR_1, mask, mask)) {
+			NV_ERROR(drm, "timeout: (0x610024 & 0x%08x) == "
 				      "0x%08x\n", mask, mask);
-			NV_ERROR(dev, "0x610024 = 0x%08x\n",
-				 nv_rd32(dev, NV50_PDISPLAY_INTR_1));
+			NV_ERROR(drm, "0x610024 = 0x%08x\n",
+				 nv_rd32(device, NV50_PDISPLAY_INTR_1));
 		}
 	}
 
 	for (i = 0; i < 2; i++) {
-		nv_wr32(dev, NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(i), 0);
-		if (!nv_wait(dev, NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(i),
+		nv_wr32(device, NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(i), 0);
+		if (!nv_wait(device, NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(i),
 			     NV50_PDISPLAY_CURSOR_CURSOR_CTRL2_STATUS, 0)) {
-			NV_ERROR(dev, "timeout: CURSOR_CTRL2_STATUS == 0\n");
-			NV_ERROR(dev, "CURSOR_CTRL2 = 0x%08x\n",
-				 nv_rd32(dev, NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(i)));
+			NV_ERROR(drm, "timeout: CURSOR_CTRL2_STATUS == 0\n");
+			NV_ERROR(drm, "CURSOR_CTRL2 = 0x%08x\n",
+				 nv_rd32(device, NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(i)));
 		}
 	}
 
 	nv50_evo_fini(dev);
 
 	for (i = 0; i < 3; i++) {
-		if (!nv_wait(dev, NV50_PDISPLAY_SOR_DPMS_STATE(i),
+		if (!nv_wait(device, NV50_PDISPLAY_SOR_DPMS_STATE(i),
 			     NV50_PDISPLAY_SOR_DPMS_STATE_WAIT, 0)) {
-			NV_ERROR(dev, "timeout: SOR_DPMS_STATE_WAIT(%d) == 0\n", i);
-			NV_ERROR(dev, "SOR_DPMS_STATE(%d) = 0x%08x\n", i,
-				  nv_rd32(dev, NV50_PDISPLAY_SOR_DPMS_STATE(i)));
+			NV_ERROR(drm, "timeout: SOR_DPMS_STATE_WAIT(%d) == 0\n", i);
+			NV_ERROR(drm, "SOR_DPMS_STATE(%d) = 0x%08x\n", i,
+				  nv_rd32(device, NV50_PDISPLAY_SOR_DPMS_STATE(i)));
 		}
 	}
 
 	/* disable interrupts. */
-	nv_wr32(dev, NV50_PDISPLAY_INTR_EN_1, 0x00000000);
+	nv_wr32(device, NV50_PDISPLAY_INTR_EN_1, 0x00000000);
 }
 
 int
 nv50_display_create(struct drm_device *dev)
 {
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct dcb_table *dcb = &dev_priv->vbios.dcb;
+	struct nouveau_drm *drm = nouveau_drm(dev);
+	struct dcb_table *dcb = &drm->vbios.dcb;
 	struct drm_connector *connector, *ct;
 	struct nv50_display *priv;
 	int ret, i;
 
-	NV_DEBUG_KMS(dev, "\n");
+	NV_DEBUG(drm, "\n");
 
 	priv = kzalloc(sizeof(*priv), GFP_KERNEL);
 	if (!priv)
 		return -ENOMEM;
-	dev_priv->engine.display.priv = priv;
+
+	nouveau_display(dev)->priv = priv;
+	nouveau_display(dev)->dtor = nv50_display_destroy;
+	nouveau_display(dev)->init = nv50_display_init;
+	nouveau_display(dev)->fini = nv50_display_fini;
 
 	/* Create CRTC objects */
 	for (i = 0; i < 2; i++) {
@@ -367,10 +327,10 @@ nv50_display_create(struct drm_device *dev)
 
 	/* We setup the encoders from the BIOS table */
 	for (i = 0 ; i < dcb->entries; i++) {
-		struct dcb_entry *entry = &dcb->entry[i];
+		struct dcb_output *entry = &dcb->entry[i];
 
 		if (entry->location != DCB_LOC_ON_CHIP) {
-			NV_WARN(dev, "Off-chip encoder %d/%d unsupported\n",
+			NV_WARN(drm, "Off-chip encoder %d/%d unsupported\n",
 				entry->type, ffs(entry->or) - 1);
 			continue;
 		}
@@ -380,16 +340,16 @@ nv50_display_create(struct drm_device *dev)
 			continue;
 
 		switch (entry->type) {
-		case OUTPUT_TMDS:
-		case OUTPUT_LVDS:
-		case OUTPUT_DP:
+		case DCB_OUTPUT_TMDS:
+		case DCB_OUTPUT_LVDS:
+		case DCB_OUTPUT_DP:
 			nv50_sor_create(connector, entry);
 			break;
-		case OUTPUT_ANALOG:
+		case DCB_OUTPUT_ANALOG:
 			nv50_dac_create(connector, entry);
 			break;
 		default:
-			NV_WARN(dev, "DCB encoder %d unknown\n", entry->type);
+			NV_WARN(drm, "DCB encoder %d unknown\n", entry->type);
 			continue;
 		}
 	}
@@ -397,14 +357,13 @@ nv50_display_create(struct drm_device *dev)
 	list_for_each_entry_safe(connector, ct,
 				 &dev->mode_config.connector_list, head) {
 		if (!connector->encoder_ids[0]) {
-			NV_WARN(dev, "%s has no encoders, removing\n",
+			NV_WARN(drm, "%s has no encoders, removing\n",
 				drm_get_connector_name(connector));
 			connector->funcs->destroy(connector);
 		}
 	}
 
 	tasklet_init(&priv->tasklet, nv50_display_bh, (unsigned long)dev);
-	nouveau_irq_register(dev, 26, nv50_display_isr);
 
 	ret = nv50_evo_create(dev);
 	if (ret) {
@@ -420,13 +379,16 @@ nv50_display_destroy(struct drm_device *dev)
 {
 	struct nv50_display *disp = nv50_display(dev);
 
-	NV_DEBUG_KMS(dev, "\n");
-
 	nv50_evo_destroy(dev);
-	nouveau_irq_unregister(dev, 26);
 	kfree(disp);
 }
 
+struct nouveau_bo *
+nv50_display_crtc_sema(struct drm_device *dev, int crtc)
+{
+	return nv50_display(dev)->crtc[crtc].sem.bo;
+}
+
 void
 nv50_display_flip_stop(struct drm_crtc *crtc)
 {
@@ -457,7 +419,7 @@ int
 nv50_display_flip_next(struct drm_crtc *crtc, struct drm_framebuffer *fb,
 		       struct nouveau_channel *chan)
 {
-	struct drm_nouveau_private *dev_priv = crtc->dev->dev_private;
+	struct nouveau_drm *drm = nouveau_drm(crtc->dev);
 	struct nouveau_framebuffer *nv_fb = nouveau_framebuffer(fb);
 	struct nv50_display *disp = nv50_display(crtc->dev);
 	struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
@@ -477,7 +439,7 @@ nv50_display_flip_next(struct drm_crtc *crtc, struct drm_framebuffer *fb,
 			return ret;
 		}
 
-		if (dev_priv->chipset < 0xc0) {
+		if (nv_device(drm->device)->chipset < 0xc0) {
 			BEGIN_NV04(chan, 0, 0x0060, 2);
 			OUT_RING  (chan, NvEvoSema0 + nv_crtc->index);
 			OUT_RING  (chan, dispc->sem.offset);
@@ -487,12 +449,12 @@ nv50_display_flip_next(struct drm_crtc *crtc, struct drm_framebuffer *fb,
 			OUT_RING  (chan, dispc->sem.offset ^ 0x10);
 			OUT_RING  (chan, 0x74b1e000);
 			BEGIN_NV04(chan, 0, 0x0060, 1);
-			if (dev_priv->chipset < 0x84)
+			if (nv_device(drm->device)->chipset < 0x84)
 				OUT_RING  (chan, NvSema);
 			else
-				OUT_RING  (chan, chan->vram_handle);
+				OUT_RING  (chan, chan->vram);
 		} else {
-			u64 offset = nvc0_software_crtc(chan, nv_crtc->index);
+			u64 offset = nvc0_fence_crtc(chan, nv_crtc->index);
 			offset += dispc->sem.offset;
 			BEGIN_NVC0(chan, 0, 0x0010, 4);
 			OUT_RING  (chan, upper_32_bits(offset));
@@ -555,13 +517,13 @@ nv50_display_flip_next(struct drm_crtc *crtc, struct drm_framebuffer *fb,
 }
 
 static u16
-nv50_display_script_select(struct drm_device *dev, struct dcb_entry *dcb,
+nv50_display_script_select(struct drm_device *dev, struct dcb_output *dcb,
 			   u32 mc, int pxclk)
 {
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_drm *drm = nouveau_drm(dev);
 	struct nouveau_connector *nv_connector = NULL;
 	struct drm_encoder *encoder;
-	struct nvbios *bios = &dev_priv->vbios;
+	struct nvbios *bios = &drm->vbios;
 	u32 script = 0, or;
 
 	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
@@ -576,7 +538,7 @@ nv50_display_script_select(struct drm_device *dev, struct dcb_entry *dcb,
 
 	or = ffs(dcb->or) - 1;
 	switch (dcb->type) {
-	case OUTPUT_LVDS:
+	case DCB_OUTPUT_LVDS:
 		script = (mc >> 8) & 0xf;
 		if (bios->fp_no_ddc) {
 			if (bios->fp.dual_link)
@@ -609,34 +571,20 @@ nv50_display_script_select(struct drm_device *dev, struct dcb_entry *dcb,
 			    (nv_connector->edid->input & 0x70) >= 0x20)
 				script |= 0x0200;
 		}
-
-		if (nouveau_uscript_lvds >= 0) {
-			NV_INFO(dev, "override script 0x%04x with 0x%04x "
-				     "for output LVDS-%d\n", script,
-				     nouveau_uscript_lvds, or);
-			script = nouveau_uscript_lvds;
-		}
 		break;
-	case OUTPUT_TMDS:
+	case DCB_OUTPUT_TMDS:
 		script = (mc >> 8) & 0xf;
 		if (pxclk >= 165000)
 			script |= 0x0100;
-
-		if (nouveau_uscript_tmds >= 0) {
-			NV_INFO(dev, "override script 0x%04x with 0x%04x "
-				     "for output TMDS-%d\n", script,
-				     nouveau_uscript_tmds, or);
-			script = nouveau_uscript_tmds;
-		}
 		break;
-	case OUTPUT_DP:
+	case DCB_OUTPUT_DP:
 		script = (mc >> 8) & 0xf;
 		break;
-	case OUTPUT_ANALOG:
+	case DCB_OUTPUT_ANALOG:
 		script = 0xff;
 		break;
 	default:
-		NV_ERROR(dev, "modeset on unsupported output type!\n");
+		NV_ERROR(drm, "modeset on unsupported output type!\n");
 		break;
 	}
 
@@ -644,59 +592,18 @@ nv50_display_script_select(struct drm_device *dev, struct dcb_entry *dcb,
 }
 
 static void
-nv50_display_vblank_crtc_handler(struct drm_device *dev, int crtc)
-{
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct nouveau_software_priv *psw = nv_engine(dev, NVOBJ_ENGINE_SW);
-	struct nouveau_software_chan *pch, *tmp;
-
-	list_for_each_entry_safe(pch, tmp, &psw->vblank, vblank.list) {
-		if (pch->vblank.head != crtc)
-			continue;
-
-		spin_lock(&psw->peephole_lock);
-		nv_wr32(dev, 0x001704, pch->vblank.channel);
-		nv_wr32(dev, 0x001710, 0x80000000 | pch->vblank.ctxdma);
-		if (dev_priv->chipset == 0x50) {
-			nv_wr32(dev, 0x001570, pch->vblank.offset);
-			nv_wr32(dev, 0x001574, pch->vblank.value);
-		} else {
-			nv_wr32(dev, 0x060010, pch->vblank.offset);
-			nv_wr32(dev, 0x060014, pch->vblank.value);
-		}
-		spin_unlock(&psw->peephole_lock);
-
-		list_del(&pch->vblank.list);
-		drm_vblank_put(dev, crtc);
-	}
-
-	drm_handle_vblank(dev, crtc);
-}
-
-static void
-nv50_display_vblank_handler(struct drm_device *dev, uint32_t intr)
-{
-	if (intr & NV50_PDISPLAY_INTR_1_VBLANK_CRTC_0)
-		nv50_display_vblank_crtc_handler(dev, 0);
-
-	if (intr & NV50_PDISPLAY_INTR_1_VBLANK_CRTC_1)
-		nv50_display_vblank_crtc_handler(dev, 1);
-
-	nv_wr32(dev, NV50_PDISPLAY_INTR_1, NV50_PDISPLAY_INTR_1_VBLANK_CRTC);
-}
-
-static void
 nv50_display_unk10_handler(struct drm_device *dev)
 {
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_device *device = nouveau_dev(dev);
+	struct nouveau_drm *drm = nouveau_drm(dev);
 	struct nv50_display *disp = nv50_display(dev);
-	u32 unk30 = nv_rd32(dev, 0x610030), mc;
-	int i, crtc, or = 0, type = OUTPUT_ANY;
+	u32 unk30 = nv_rd32(device, 0x610030), mc;
+	int i, crtc, or = 0, type = DCB_OUTPUT_ANY;
 
-	NV_DEBUG_KMS(dev, "0x610030: 0x%08x\n", unk30);
+	NV_DEBUG(drm, "0x610030: 0x%08x\n", unk30);
 	disp->irq.dcb = NULL;
 
-	nv_wr32(dev, 0x619494, nv_rd32(dev, 0x619494) & ~8);
+	nv_wr32(device, 0x619494, nv_rd32(device, 0x619494) & ~8);
 
 	/* Determine which CRTC we're dealing with, only 1 ever will be
 	 * signalled at the same time with the current nouveau code.
@@ -711,44 +618,44 @@ nv50_display_unk10_handler(struct drm_device *dev)
 		goto ack;
 
 	/* Find which encoder was connected to the CRTC */
-	for (i = 0; type == OUTPUT_ANY && i < 3; i++) {
-		mc = nv_rd32(dev, NV50_PDISPLAY_DAC_MODE_CTRL_C(i));
-		NV_DEBUG_KMS(dev, "DAC-%d mc: 0x%08x\n", i, mc);
+	for (i = 0; type == DCB_OUTPUT_ANY && i < 3; i++) {
+		mc = nv_rd32(device, NV50_PDISPLAY_DAC_MODE_CTRL_C(i));
+		NV_DEBUG(drm, "DAC-%d mc: 0x%08x\n", i, mc);
 		if (!(mc & (1 << crtc)))
 			continue;
 
 		switch ((mc & 0x00000f00) >> 8) {
-		case 0: type = OUTPUT_ANALOG; break;
-		case 1: type = OUTPUT_TV; break;
+		case 0: type = DCB_OUTPUT_ANALOG; break;
+		case 1: type = DCB_OUTPUT_TV; break;
 		default:
-			NV_ERROR(dev, "invalid mc, DAC-%d: 0x%08x\n", i, mc);
+			NV_ERROR(drm, "invalid mc, DAC-%d: 0x%08x\n", i, mc);
 			goto ack;
 		}
 
 		or = i;
 	}
 
-	for (i = 0; type == OUTPUT_ANY && i < nv50_sor_nr(dev); i++) {
-		if (dev_priv->chipset  < 0x90 ||
-		    dev_priv->chipset == 0x92 ||
-		    dev_priv->chipset == 0xa0)
-			mc = nv_rd32(dev, NV50_PDISPLAY_SOR_MODE_CTRL_C(i));
+	for (i = 0; type == DCB_OUTPUT_ANY && i < nv50_sor_nr(dev); i++) {
+		if (nv_device(drm->device)->chipset  < 0x90 ||
+		    nv_device(drm->device)->chipset == 0x92 ||
+		    nv_device(drm->device)->chipset == 0xa0)
+			mc = nv_rd32(device, NV50_PDISPLAY_SOR_MODE_CTRL_C(i));
 		else
-			mc = nv_rd32(dev, NV90_PDISPLAY_SOR_MODE_CTRL_C(i));
+			mc = nv_rd32(device, NV90_PDISPLAY_SOR_MODE_CTRL_C(i));
 
-		NV_DEBUG_KMS(dev, "SOR-%d mc: 0x%08x\n", i, mc);
+		NV_DEBUG(drm, "SOR-%d mc: 0x%08x\n", i, mc);
 		if (!(mc & (1 << crtc)))
 			continue;
 
 		switch ((mc & 0x00000f00) >> 8) {
-		case 0: type = OUTPUT_LVDS; break;
-		case 1: type = OUTPUT_TMDS; break;
-		case 2: type = OUTPUT_TMDS; break;
-		case 5: type = OUTPUT_TMDS; break;
-		case 8: type = OUTPUT_DP; break;
-		case 9: type = OUTPUT_DP; break;
+		case 0: type = DCB_OUTPUT_LVDS; break;
+		case 1: type = DCB_OUTPUT_TMDS; break;
+		case 2: type = DCB_OUTPUT_TMDS; break;
+		case 5: type = DCB_OUTPUT_TMDS; break;
+		case 8: type = DCB_OUTPUT_DP; break;
+		case 9: type = DCB_OUTPUT_DP; break;
 		default:
-			NV_ERROR(dev, "invalid mc, SOR-%d: 0x%08x\n", i, mc);
+			NV_ERROR(drm, "invalid mc, SOR-%d: 0x%08x\n", i, mc);
 			goto ack;
 		}
 
@@ -756,12 +663,12 @@ nv50_display_unk10_handler(struct drm_device *dev)
 	}
 
 	/* There was no encoder to disable */
-	if (type == OUTPUT_ANY)
+	if (type == DCB_OUTPUT_ANY)
 		goto ack;
 
 	/* Disable the encoder */
-	for (i = 0; i < dev_priv->vbios.dcb.entries; i++) {
-		struct dcb_entry *dcb = &dev_priv->vbios.dcb.entry[i];
+	for (i = 0; i < drm->vbios.dcb.entries; i++) {
+		struct dcb_output *dcb = &drm->vbios.dcb.entry[i];
 
 		if (dcb->type == type && (dcb->or & (1 << or))) {
 			nouveau_bios_run_display_table(dev, 0, -1, dcb, -1);
@@ -770,22 +677,23 @@ nv50_display_unk10_handler(struct drm_device *dev)
 		}
 	}
 
-	NV_ERROR(dev, "no dcb for %d %d 0x%08x\n", or, type, mc);
+	NV_ERROR(drm, "no dcb for %d %d 0x%08x\n", or, type, mc);
 ack:
-	nv_wr32(dev, NV50_PDISPLAY_INTR_1, NV50_PDISPLAY_INTR_1_CLK_UNK10);
-	nv_wr32(dev, 0x610030, 0x80000000);
+	nv_wr32(device, NV50_PDISPLAY_INTR_1, NV50_PDISPLAY_INTR_1_CLK_UNK10);
+	nv_wr32(device, 0x610030, 0x80000000);
 }
 
 static void
 nv50_display_unk20_handler(struct drm_device *dev)
 {
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_device *device = nouveau_dev(dev);
+	struct nouveau_drm *drm = nouveau_drm(dev);
 	struct nv50_display *disp = nv50_display(dev);
-	u32 unk30 = nv_rd32(dev, 0x610030), tmp, pclk, script, mc = 0;
-	struct dcb_entry *dcb;
-	int i, crtc, or = 0, type = OUTPUT_ANY;
+	u32 unk30 = nv_rd32(device, 0x610030), tmp, pclk, script, mc = 0;
+	struct dcb_output *dcb;
+	int i, crtc, or = 0, type = DCB_OUTPUT_ANY;
 
-	NV_DEBUG_KMS(dev, "0x610030: 0x%08x\n", unk30);
+	NV_DEBUG(drm, "0x610030: 0x%08x\n", unk30);
 	dcb = disp->irq.dcb;
 	if (dcb) {
 		nouveau_bios_run_display_table(dev, 0, -2, dcb, -1);
@@ -795,86 +703,86 @@ nv50_display_unk20_handler(struct drm_device *dev)
 	/* CRTC clock change requested? */
 	crtc = ffs((unk30 & 0x00000600) >> 9) - 1;
 	if (crtc >= 0) {
-		pclk  = nv_rd32(dev, NV50_PDISPLAY_CRTC_P(crtc, CLOCK));
+		pclk  = nv_rd32(device, NV50_PDISPLAY_CRTC_P(crtc, CLOCK));
 		pclk &= 0x003fffff;
 		if (pclk)
 			nv50_crtc_set_clock(dev, crtc, pclk);
 
-		tmp = nv_rd32(dev, NV50_PDISPLAY_CRTC_CLK_CTRL2(crtc));
+		tmp = nv_rd32(device, NV50_PDISPLAY_CRTC_CLK_CTRL2(crtc));
 		tmp &= ~0x000000f;
-		nv_wr32(dev, NV50_PDISPLAY_CRTC_CLK_CTRL2(crtc), tmp);
+		nv_wr32(device, NV50_PDISPLAY_CRTC_CLK_CTRL2(crtc), tmp);
 	}
 
 	/* Nothing needs to be done for the encoder */
 	crtc = ffs((unk30 & 0x00000180) >> 7) - 1;
 	if (crtc < 0)
 		goto ack;
-	pclk  = nv_rd32(dev, NV50_PDISPLAY_CRTC_P(crtc, CLOCK)) & 0x003fffff;
+	pclk  = nv_rd32(device, NV50_PDISPLAY_CRTC_P(crtc, CLOCK)) & 0x003fffff;
 
 	/* Find which encoder is connected to the CRTC */
-	for (i = 0; type == OUTPUT_ANY && i < 3; i++) {
-		mc = nv_rd32(dev, NV50_PDISPLAY_DAC_MODE_CTRL_P(i));
-		NV_DEBUG_KMS(dev, "DAC-%d mc: 0x%08x\n", i, mc);
+	for (i = 0; type == DCB_OUTPUT_ANY && i < 3; i++) {
+		mc = nv_rd32(device, NV50_PDISPLAY_DAC_MODE_CTRL_P(i));
+		NV_DEBUG(drm, "DAC-%d mc: 0x%08x\n", i, mc);
 		if (!(mc & (1 << crtc)))
 			continue;
 
 		switch ((mc & 0x00000f00) >> 8) {
-		case 0: type = OUTPUT_ANALOG; break;
-		case 1: type = OUTPUT_TV; break;
+		case 0: type = DCB_OUTPUT_ANALOG; break;
+		case 1: type = DCB_OUTPUT_TV; break;
 		default:
-			NV_ERROR(dev, "invalid mc, DAC-%d: 0x%08x\n", i, mc);
+			NV_ERROR(drm, "invalid mc, DAC-%d: 0x%08x\n", i, mc);
 			goto ack;
 		}
 
 		or = i;
 	}
 
-	for (i = 0; type == OUTPUT_ANY && i < nv50_sor_nr(dev); i++) {
-		if (dev_priv->chipset  < 0x90 ||
-		    dev_priv->chipset == 0x92 ||
-		    dev_priv->chipset == 0xa0)
-			mc = nv_rd32(dev, NV50_PDISPLAY_SOR_MODE_CTRL_P(i));
+	for (i = 0; type == DCB_OUTPUT_ANY && i < nv50_sor_nr(dev); i++) {
+		if (nv_device(drm->device)->chipset  < 0x90 ||
+		    nv_device(drm->device)->chipset == 0x92 ||
+		    nv_device(drm->device)->chipset == 0xa0)
+			mc = nv_rd32(device, NV50_PDISPLAY_SOR_MODE_CTRL_P(i));
 		else
-			mc = nv_rd32(dev, NV90_PDISPLAY_SOR_MODE_CTRL_P(i));
+			mc = nv_rd32(device, NV90_PDISPLAY_SOR_MODE_CTRL_P(i));
 
-		NV_DEBUG_KMS(dev, "SOR-%d mc: 0x%08x\n", i, mc);
+		NV_DEBUG(drm, "SOR-%d mc: 0x%08x\n", i, mc);
 		if (!(mc & (1 << crtc)))
 			continue;
 
 		switch ((mc & 0x00000f00) >> 8) {
-		case 0: type = OUTPUT_LVDS; break;
-		case 1: type = OUTPUT_TMDS; break;
-		case 2: type = OUTPUT_TMDS; break;
-		case 5: type = OUTPUT_TMDS; break;
-		case 8: type = OUTPUT_DP; break;
-		case 9: type = OUTPUT_DP; break;
+		case 0: type = DCB_OUTPUT_LVDS; break;
+		case 1: type = DCB_OUTPUT_TMDS; break;
+		case 2: type = DCB_OUTPUT_TMDS; break;
+		case 5: type = DCB_OUTPUT_TMDS; break;
+		case 8: type = DCB_OUTPUT_DP; break;
+		case 9: type = DCB_OUTPUT_DP; break;
 		default:
-			NV_ERROR(dev, "invalid mc, SOR-%d: 0x%08x\n", i, mc);
+			NV_ERROR(drm, "invalid mc, SOR-%d: 0x%08x\n", i, mc);
 			goto ack;
 		}
 
 		or = i;
 	}
 
-	if (type == OUTPUT_ANY)
+	if (type == DCB_OUTPUT_ANY)
 		goto ack;
 
 	/* Enable the encoder */
-	for (i = 0; i < dev_priv->vbios.dcb.entries; i++) {
-		dcb = &dev_priv->vbios.dcb.entry[i];
+	for (i = 0; i < drm->vbios.dcb.entries; i++) {
+		dcb = &drm->vbios.dcb.entry[i];
 		if (dcb->type == type && (dcb->or & (1 << or)))
 			break;
 	}
 
-	if (i == dev_priv->vbios.dcb.entries) {
-		NV_ERROR(dev, "no dcb for %d %d 0x%08x\n", or, type, mc);
+	if (i == drm->vbios.dcb.entries) {
+		NV_ERROR(drm, "no dcb for %d %d 0x%08x\n", or, type, mc);
 		goto ack;
 	}
 
 	script = nv50_display_script_select(dev, dcb, mc, pclk);
 	nouveau_bios_run_display_table(dev, script, pclk, dcb, -1);
 
-	if (type == OUTPUT_DP) {
+	if (type == DCB_OUTPUT_DP) {
 		int link = !(dcb->dpconf.sor.link & 1);
 		if ((mc & 0x000f0000) == 0x00020000)
 			nv50_sor_dp_calc_tu(dev, or, link, pclk, 18);
@@ -882,14 +790,14 @@ nv50_display_unk20_handler(struct drm_device *dev)
 			nv50_sor_dp_calc_tu(dev, or, link, pclk, 24);
 	}
 
-	if (dcb->type != OUTPUT_ANALOG) {
-		tmp = nv_rd32(dev, NV50_PDISPLAY_SOR_CLK_CTRL2(or));
+	if (dcb->type != DCB_OUTPUT_ANALOG) {
+		tmp = nv_rd32(device, NV50_PDISPLAY_SOR_CLK_CTRL2(or));
 		tmp &= ~0x00000f0f;
 		if (script & 0x0100)
 			tmp |= 0x00000101;
-		nv_wr32(dev, NV50_PDISPLAY_SOR_CLK_CTRL2(or), tmp);
+		nv_wr32(device, NV50_PDISPLAY_SOR_CLK_CTRL2(or), tmp);
 	} else {
-		nv_wr32(dev, NV50_PDISPLAY_DAC_CLK_CTRL2(or), 0);
+		nv_wr32(device, NV50_PDISPLAY_DAC_CLK_CTRL2(or), 0);
 	}
 
 	disp->irq.dcb = dcb;
@@ -897,8 +805,8 @@ nv50_display_unk20_handler(struct drm_device *dev)
 	disp->irq.script = script;
 
 ack:
-	nv_wr32(dev, NV50_PDISPLAY_INTR_1, NV50_PDISPLAY_INTR_1_CLK_UNK20);
-	nv_wr32(dev, 0x610030, 0x80000000);
+	nv_wr32(device, NV50_PDISPLAY_INTR_1, NV50_PDISPLAY_INTR_1_CLK_UNK20);
+	nv_wr32(device, 0x610030, 0x80000000);
 }
 
 /* If programming a TMDS output on a SOR that can also be configured for
@@ -910,23 +818,24 @@ ack:
  * programmed for DisplayPort.
  */
 static void
-nv50_display_unk40_dp_set_tmds(struct drm_device *dev, struct dcb_entry *dcb)
+nv50_display_unk40_dp_set_tmds(struct drm_device *dev, struct dcb_output *dcb)
 {
+	struct nouveau_device *device = nouveau_dev(dev);
 	int or = ffs(dcb->or) - 1, link = !(dcb->dpconf.sor.link & 1);
 	struct drm_encoder *encoder;
 	u32 tmp;
 
-	if (dcb->type != OUTPUT_TMDS)
+	if (dcb->type != DCB_OUTPUT_TMDS)
 		return;
 
 	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
 		struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
 
-		if (nv_encoder->dcb->type == OUTPUT_DP &&
+		if (nv_encoder->dcb->type == DCB_OUTPUT_DP &&
 		    nv_encoder->dcb->or & (1 << or)) {
-			tmp  = nv_rd32(dev, NV50_SOR_DP_CTRL(or, link));
+			tmp  = nv_rd32(device, NV50_SOR_DP_CTRL(or, link));
 			tmp &= ~NV50_SOR_DP_CTRL_ENABLED;
-			nv_wr32(dev, NV50_SOR_DP_CTRL(or, link), tmp);
+			nv_wr32(device, NV50_SOR_DP_CTRL(or, link), tmp);
 			break;
 		}
 	}
@@ -935,12 +844,14 @@ nv50_display_unk40_dp_set_tmds(struct drm_device *dev, struct dcb_entry *dcb)
 static void
 nv50_display_unk40_handler(struct drm_device *dev)
 {
+	struct nouveau_device *device = nouveau_dev(dev);
+	struct nouveau_drm *drm = nouveau_drm(dev);
 	struct nv50_display *disp = nv50_display(dev);
-	struct dcb_entry *dcb = disp->irq.dcb;
+	struct dcb_output *dcb = disp->irq.dcb;
 	u16 script = disp->irq.script;
-	u32 unk30 = nv_rd32(dev, 0x610030), pclk = disp->irq.pclk;
+	u32 unk30 = nv_rd32(device, 0x610030), pclk = disp->irq.pclk;
 
-	NV_DEBUG_KMS(dev, "0x610030: 0x%08x\n", unk30);
+	NV_DEBUG(drm, "0x610030: 0x%08x\n", unk30);
 	disp->irq.dcb = NULL;
 	if (!dcb)
 		goto ack;
@@ -949,21 +860,23 @@ nv50_display_unk40_handler(struct drm_device *dev)
 	nv50_display_unk40_dp_set_tmds(dev, dcb);
 
 ack:
-	nv_wr32(dev, NV50_PDISPLAY_INTR_1, NV50_PDISPLAY_INTR_1_CLK_UNK40);
-	nv_wr32(dev, 0x610030, 0x80000000);
-	nv_wr32(dev, 0x619494, nv_rd32(dev, 0x619494) | 8);
+	nv_wr32(device, NV50_PDISPLAY_INTR_1, NV50_PDISPLAY_INTR_1_CLK_UNK40);
+	nv_wr32(device, 0x610030, 0x80000000);
+	nv_wr32(device, 0x619494, nv_rd32(device, 0x619494) | 8);
 }
 
 static void
 nv50_display_bh(unsigned long data)
 {
 	struct drm_device *dev = (struct drm_device *)data;
+	struct nouveau_device *device = nouveau_dev(dev);
+	struct nouveau_drm *drm = nouveau_drm(dev);
 
 	for (;;) {
-		uint32_t intr0 = nv_rd32(dev, NV50_PDISPLAY_INTR_0);
-		uint32_t intr1 = nv_rd32(dev, NV50_PDISPLAY_INTR_1);
+		uint32_t intr0 = nv_rd32(device, NV50_PDISPLAY_INTR_0);
+		uint32_t intr1 = nv_rd32(device, NV50_PDISPLAY_INTR_1);
 
-		NV_DEBUG_KMS(dev, "PDISPLAY_INTR_BH 0x%08x 0x%08x\n", intr0, intr1);
+		NV_DEBUG(drm, "PDISPLAY_INTR_BH 0x%08x 0x%08x\n", intr0, intr1);
 
 		if (intr1 & NV50_PDISPLAY_INTR_1_CLK_UNK10)
 			nv50_display_unk10_handler(dev);
@@ -977,13 +890,15 @@ nv50_display_bh(unsigned long data)
 			break;
 	}
 
-	nv_wr32(dev, NV03_PMC_INTR_EN_0, 1);
+	nv_wr32(device, NV03_PMC_INTR_EN_0, 1);
 }
 
 static void
 nv50_display_error_handler(struct drm_device *dev)
 {
-	u32 channels = (nv_rd32(dev, NV50_PDISPLAY_INTR_0) & 0x001f0000) >> 16;
+	struct nouveau_device *device = nouveau_dev(dev);
+	struct nouveau_drm *drm = nouveau_drm(dev);
+	u32 channels = (nv_rd32(device, NV50_PDISPLAY_INTR_0) & 0x001f0000) >> 16;
 	u32 addr, data;
 	int chid;
 
@@ -991,29 +906,31 @@ nv50_display_error_handler(struct drm_device *dev)
 		if (!(channels & (1 << chid)))
 			continue;
 
-		nv_wr32(dev, NV50_PDISPLAY_INTR_0, 0x00010000 << chid);
-		addr = nv_rd32(dev, NV50_PDISPLAY_TRAPPED_ADDR(chid));
-		data = nv_rd32(dev, NV50_PDISPLAY_TRAPPED_DATA(chid));
-		NV_ERROR(dev, "EvoCh %d Mthd 0x%04x Data 0x%08x "
+		nv_wr32(device, NV50_PDISPLAY_INTR_0, 0x00010000 << chid);
+		addr = nv_rd32(device, NV50_PDISPLAY_TRAPPED_ADDR(chid));
+		data = nv_rd32(device, NV50_PDISPLAY_TRAPPED_DATA(chid));
+		NV_ERROR(drm, "EvoCh %d Mthd 0x%04x Data 0x%08x "
 			      "(0x%04x 0x%02x)\n", chid,
 			 addr & 0xffc, data, addr >> 16, (addr >> 12) & 0xf);
 
-		nv_wr32(dev, NV50_PDISPLAY_TRAPPED_ADDR(chid), 0x90000000);
+		nv_wr32(device, NV50_PDISPLAY_TRAPPED_ADDR(chid), 0x90000000);
 	}
 }
 
-static void
-nv50_display_isr(struct drm_device *dev)
+void
+nv50_display_intr(struct drm_device *dev)
 {
+	struct nouveau_device *device = nouveau_dev(dev);
+	struct nouveau_drm *drm = nouveau_drm(dev);
 	struct nv50_display *disp = nv50_display(dev);
 	uint32_t delayed = 0;
 
-	while (nv_rd32(dev, NV50_PMC_INTR_0) & NV50_PMC_INTR_0_DISPLAY) {
-		uint32_t intr0 = nv_rd32(dev, NV50_PDISPLAY_INTR_0);
-		uint32_t intr1 = nv_rd32(dev, NV50_PDISPLAY_INTR_1);
+	while (nv_rd32(device, NV50_PMC_INTR_0) & NV50_PMC_INTR_0_DISPLAY) {
+		uint32_t intr0 = nv_rd32(device, NV50_PDISPLAY_INTR_0);
+		uint32_t intr1 = nv_rd32(device, NV50_PDISPLAY_INTR_1);
 		uint32_t clock;
 
-		NV_DEBUG_KMS(dev, "PDISPLAY_INTR 0x%08x 0x%08x\n", intr0, intr1);
+		NV_DEBUG(drm, "PDISPLAY_INTR 0x%08x 0x%08x\n", intr0, intr1);
 
 		if (!intr0 && !(intr1 & ~delayed))
 			break;
@@ -1024,29 +941,29 @@ nv50_display_isr(struct drm_device *dev)
 		}
 
 		if (intr1 & NV50_PDISPLAY_INTR_1_VBLANK_CRTC) {
-			nv50_display_vblank_handler(dev, intr1);
 			intr1 &= ~NV50_PDISPLAY_INTR_1_VBLANK_CRTC;
+			delayed |= NV50_PDISPLAY_INTR_1_VBLANK_CRTC;
 		}
 
 		clock = (intr1 & (NV50_PDISPLAY_INTR_1_CLK_UNK10 |
 				  NV50_PDISPLAY_INTR_1_CLK_UNK20 |
 				  NV50_PDISPLAY_INTR_1_CLK_UNK40));
 		if (clock) {
-			nv_wr32(dev, NV03_PMC_INTR_EN_0, 0);
+			nv_wr32(device, NV03_PMC_INTR_EN_0, 0);
 			tasklet_schedule(&disp->tasklet);
 			delayed |= clock;
 			intr1 &= ~clock;
 		}
 
 		if (intr0) {
-			NV_ERROR(dev, "unknown PDISPLAY_INTR_0: 0x%08x\n", intr0);
-			nv_wr32(dev, NV50_PDISPLAY_INTR_0, intr0);
+			NV_ERROR(drm, "unknown PDISPLAY_INTR_0: 0x%08x\n", intr0);
+			nv_wr32(device, NV50_PDISPLAY_INTR_0, intr0);
 		}
 
 		if (intr1) {
-			NV_ERROR(dev,
+			NV_ERROR(drm,
 				 "unknown PDISPLAY_INTR_1: 0x%08x\n", intr1);
-			nv_wr32(dev, NV50_PDISPLAY_INTR_1, intr1);
+			nv_wr32(device, NV50_PDISPLAY_INTR_1, intr1);
 		}
 	}
 }
diff --git a/drivers/gpu/drm/nouveau/nv50_display.h b/drivers/gpu/drm/nouveau/nv50_display.h
index 009ec2a811c4..973554d8a7a6 100644
--- a/drivers/gpu/drm/nouveau/nv50_display.h
+++ b/drivers/gpu/drm/nouveau/nv50_display.h
@@ -27,12 +27,9 @@
 #ifndef __NV50_DISPLAY_H__
 #define __NV50_DISPLAY_H__
 
-#include <drm/drmP.h>
-#include "nouveau_drv.h"
-#include "nouveau_dma.h"
-#include "nouveau_reg.h"
+#include "nouveau_display.h"
 #include "nouveau_crtc.h"
-#include "nouveau_software.h"
+#include "nouveau_reg.h"
 #include "nv50_evo.h"
 
 struct nv50_display_crtc {
@@ -46,13 +43,16 @@ struct nv50_display_crtc {
 
 struct nv50_display {
 	struct nouveau_channel *master;
-	struct nouveau_gpuobj *ntfy;
+
+	struct nouveau_gpuobj *ramin;
+	u32 dmao;
+	u32 hash;
 
 	struct nv50_display_crtc crtc[2];
 
 	struct tasklet_struct tasklet;
 	struct {
-		struct dcb_entry *dcb;
+		struct dcb_output *dcb;
 		u16 script;
 		u32 pclk;
 	} irq;
@@ -61,8 +61,7 @@ struct nv50_display {
 static inline struct nv50_display *
 nv50_display(struct drm_device *dev)
 {
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	return dev_priv->engine.display.priv;
+	return nouveau_display(dev)->priv;
 }
 
 int nv50_display_early_init(struct drm_device *dev);
@@ -71,6 +70,7 @@ int nv50_display_create(struct drm_device *dev);
 int nv50_display_init(struct drm_device *dev);
 void nv50_display_fini(struct drm_device *dev);
 void nv50_display_destroy(struct drm_device *dev);
+void nv50_display_intr(struct drm_device *);
 int nv50_crtc_blank(struct nouveau_crtc *, bool blank);
 int nv50_crtc_set_clock(struct drm_device *, int head, int pclk);
 
@@ -90,4 +90,17 @@ void nv50_evo_dmaobj_init(struct nouveau_gpuobj *, u32 memtype, u64 base,
 int  nv50_evo_dmaobj_new(struct nouveau_channel *, u32 handle, u32 memtype,
 			 u64 base, u64 size, struct nouveau_gpuobj **);
 
+int  nvd0_display_create(struct drm_device *);
+void nvd0_display_destroy(struct drm_device *);
+int  nvd0_display_init(struct drm_device *);
+void nvd0_display_fini(struct drm_device *);
+void nvd0_display_intr(struct drm_device *);
+
+void nvd0_display_flip_stop(struct drm_crtc *);
+int  nvd0_display_flip_next(struct drm_crtc *, struct drm_framebuffer *,
+			    struct nouveau_channel *, u32 swap_interval);
+
+struct nouveau_bo *nv50_display_crtc_sema(struct drm_device *, int head);
+struct nouveau_bo *nvd0_display_crtc_sema(struct drm_device *, int head);
+
 #endif /* __NV50_DISPLAY_H__ */
diff --git a/drivers/gpu/drm/nouveau/nv50_evo.c b/drivers/gpu/drm/nouveau/nv50_evo.c
index dabcd8787176..9f6f55cdfa77 100644
--- a/drivers/gpu/drm/nouveau/nv50_evo.c
+++ b/drivers/gpu/drm/nouveau/nv50_evo.c
@@ -24,11 +24,29 @@
 
 #include <drm/drmP.h>
 
-#include "nouveau_drv.h"
+#include "nouveau_drm.h"
 #include "nouveau_dma.h"
-#include "nouveau_ramht.h"
 #include "nv50_display.h"
 
+#include <core/gpuobj.h>
+
+#include <subdev/timer.h>
+#include <subdev/fb.h>
+
+static u32
+nv50_evo_rd32(struct nouveau_object *object, u32 addr)
+{
+	void __iomem *iomem = object->oclass->ofuncs->rd08;
+	return ioread32_native(iomem + addr);
+}
+
+static void
+nv50_evo_wr32(struct nouveau_object *object, u32 addr, u32 data)
+{
+	void __iomem *iomem = object->oclass->ofuncs->rd08;
+	iowrite32_native(data, iomem + addr);
+}
+
 static void
 nv50_evo_channel_del(struct nouveau_channel **pevo)
 {
@@ -38,26 +56,29 @@ nv50_evo_channel_del(struct nouveau_channel **pevo)
 		return;
 	*pevo = NULL;
 
-	nouveau_ramht_ref(NULL, &evo->ramht, evo);
-	nouveau_gpuobj_channel_takedown(evo);
-	nouveau_bo_unmap(evo->pushbuf_bo);
-	nouveau_bo_ref(NULL, &evo->pushbuf_bo);
+	nouveau_bo_unmap(evo->push.buffer);
+	nouveau_bo_ref(NULL, &evo->push.buffer);
 
-	if (evo->user)
-		iounmap(evo->user);
+	if (evo->object)
+		iounmap(evo->object->oclass->ofuncs);
 
 	kfree(evo);
 }
 
-void
-nv50_evo_dmaobj_init(struct nouveau_gpuobj *obj, u32 memtype, u64 base, u64 size)
+int
+nv50_evo_dmaobj_new(struct nouveau_channel *evo, u32 handle, u32 memtype,
+		    u64 base, u64 size, struct nouveau_gpuobj **pobj)
 {
-	struct drm_nouveau_private *dev_priv = obj->dev->dev_private;
+	struct drm_device *dev = evo->fence;
+	struct nouveau_drm *drm = nouveau_drm(dev);
+	struct nv50_display *disp = nv50_display(dev);
+	u32 dmao = disp->dmao;
+	u32 hash = disp->hash;
 	u32 flags5;
 
-	if (dev_priv->chipset < 0xc0) {
+	if (nv_device(drm->device)->chipset < 0xc0) {
 		/* not supported on 0x50, specified in format mthd */
-		if (dev_priv->chipset == 0x50)
+		if (nv_device(drm->device)->chipset == 0x50)
 			memtype = 0;
 		flags5 = 0x00010000;
 	} else {
@@ -67,42 +88,28 @@ nv50_evo_dmaobj_init(struct nouveau_gpuobj *obj, u32 memtype, u64 base, u64 size
 			flags5 = 0x00020000;
 	}
 
-	nv50_gpuobj_dma_init(obj, 0, 0x3d, base, size, NV_MEM_TARGET_VRAM,
-			     NV_MEM_ACCESS_RW, (memtype >> 8) & 0xff, 0);
-	nv_wo32(obj, 0x14, flags5);
-	dev_priv->engine.instmem.flush(obj->dev);
-}
+	nv_wo32(disp->ramin, dmao + 0x00, 0x0019003d | (memtype << 22));
+	nv_wo32(disp->ramin, dmao + 0x04, lower_32_bits(base + size - 1));
+	nv_wo32(disp->ramin, dmao + 0x08, lower_32_bits(base));
+	nv_wo32(disp->ramin, dmao + 0x0c, upper_32_bits(base + size - 1) << 24 |
+					  upper_32_bits(base));
+	nv_wo32(disp->ramin, dmao + 0x10, 0x00000000);
+	nv_wo32(disp->ramin, dmao + 0x14, flags5);
 
-int
-nv50_evo_dmaobj_new(struct nouveau_channel *evo, u32 handle, u32 memtype,
-		    u64 base, u64 size, struct nouveau_gpuobj **pobj)
-{
-	struct nv50_display *disp = nv50_display(evo->dev);
-	struct nouveau_gpuobj *obj = NULL;
-	int ret;
-
-	ret = nouveau_gpuobj_new(evo->dev, disp->master, 6*4, 32, 0, &obj);
-	if (ret)
-		return ret;
-	obj->engine = NVOBJ_ENGINE_DISPLAY;
-
-	nv50_evo_dmaobj_init(obj, memtype, base, size);
-
-	ret = nouveau_ramht_insert(evo, handle, obj);
-	if (ret)
-		goto out;
+	nv_wo32(disp->ramin, hash + 0x00, handle);
+	nv_wo32(disp->ramin, hash + 0x04, (evo->handle << 28) | (dmao << 10) |
+					   evo->handle);
 
-	if (pobj)
-		nouveau_gpuobj_ref(obj, pobj);
-out:
-	nouveau_gpuobj_ref(NULL, &obj);
-	return ret;
+	disp->dmao += 0x20;
+	disp->hash += 0x08;
+	return 0;
 }
 
 static int
 nv50_evo_channel_new(struct drm_device *dev, int chid,
 		     struct nouveau_channel **pevo)
 {
+	struct nouveau_drm *drm = nouveau_drm(dev);
 	struct nv50_display *disp = nv50_display(dev);
 	struct nouveau_channel *evo;
 	int ret;
@@ -112,79 +119,84 @@ nv50_evo_channel_new(struct drm_device *dev, int chid,
 		return -ENOMEM;
 	*pevo = evo;
 
-	evo->id = chid;
-	evo->dev = dev;
+	evo->drm = drm;
+	evo->handle = chid;
+	evo->fence = dev;
 	evo->user_get = 4;
 	evo->user_put = 0;
 
 	ret = nouveau_bo_new(dev, 4096, 0, TTM_PL_FLAG_VRAM, 0, 0, NULL,
-			     &evo->pushbuf_bo);
+			     &evo->push.buffer);
 	if (ret == 0)
-		ret = nouveau_bo_pin(evo->pushbuf_bo, TTM_PL_FLAG_VRAM);
+		ret = nouveau_bo_pin(evo->push.buffer, TTM_PL_FLAG_VRAM);
 	if (ret) {
-		NV_ERROR(dev, "Error creating EVO DMA push buffer: %d\n", ret);
+		NV_ERROR(drm, "Error creating EVO DMA push buffer: %d\n", ret);
 		nv50_evo_channel_del(pevo);
 		return ret;
 	}
 
-	ret = nouveau_bo_map(evo->pushbuf_bo);
+	ret = nouveau_bo_map(evo->push.buffer);
 	if (ret) {
-		NV_ERROR(dev, "Error mapping EVO DMA push buffer: %d\n", ret);
+		NV_ERROR(drm, "Error mapping EVO DMA push buffer: %d\n", ret);
 		nv50_evo_channel_del(pevo);
 		return ret;
 	}
 
-	evo->user = ioremap(pci_resource_start(dev->pdev, 0) +
-			    NV50_PDISPLAY_USER(evo->id), PAGE_SIZE);
-	if (!evo->user) {
-		NV_ERROR(dev, "Error mapping EVO control regs.\n");
-		nv50_evo_channel_del(pevo);
-		return -ENOMEM;
-	}
-
-	/* bind primary evo channel's ramht to the channel */
-	if (disp->master && evo != disp->master)
-		nouveau_ramht_ref(disp->master->ramht, &evo->ramht, NULL);
-
+	evo->object = kzalloc(sizeof(*evo->object), GFP_KERNEL);
+#ifdef NOUVEAU_OBJECT_MAGIC
+	evo->object->_magic = NOUVEAU_OBJECT_MAGIC;
+#endif
+	evo->object->parent = nv_object(disp->ramin)->parent;
+	evo->object->engine = nv_object(disp->ramin)->engine;
+	evo->object->oclass =
+		kzalloc(sizeof(*evo->object->oclass), GFP_KERNEL);
+	evo->object->oclass->ofuncs =
+		kzalloc(sizeof(*evo->object->oclass->ofuncs), GFP_KERNEL);
+	evo->object->oclass->ofuncs->rd32 = nv50_evo_rd32;
+	evo->object->oclass->ofuncs->wr32 = nv50_evo_wr32;
+	evo->object->oclass->ofuncs->rd08 =
+		ioremap(pci_resource_start(dev->pdev, 0) +
+			NV50_PDISPLAY_USER(evo->handle), PAGE_SIZE);
 	return 0;
 }
 
 static int
 nv50_evo_channel_init(struct nouveau_channel *evo)
 {
-	struct drm_device *dev = evo->dev;
-	int id = evo->id, ret, i;
-	u64 pushbuf = evo->pushbuf_bo->bo.offset;
+	struct nouveau_drm *drm = evo->drm;
+	struct nouveau_device *device = nv_device(drm->device);
+	int id = evo->handle, ret, i;
+	u64 pushbuf = evo->push.buffer->bo.offset;
 	u32 tmp;
 
-	tmp = nv_rd32(dev, NV50_PDISPLAY_EVO_CTRL(id));
+	tmp = nv_rd32(device, NV50_PDISPLAY_EVO_CTRL(id));
 	if ((tmp & 0x009f0000) == 0x00020000)
-		nv_wr32(dev, NV50_PDISPLAY_EVO_CTRL(id), tmp | 0x00800000);
+		nv_wr32(device, NV50_PDISPLAY_EVO_CTRL(id), tmp | 0x00800000);
 
-	tmp = nv_rd32(dev, NV50_PDISPLAY_EVO_CTRL(id));
+	tmp = nv_rd32(device, NV50_PDISPLAY_EVO_CTRL(id));
 	if ((tmp & 0x003f0000) == 0x00030000)
-		nv_wr32(dev, NV50_PDISPLAY_EVO_CTRL(id), tmp | 0x00600000);
+		nv_wr32(device, NV50_PDISPLAY_EVO_CTRL(id), tmp | 0x00600000);
 
 	/* initialise fifo */
-	nv_wr32(dev, NV50_PDISPLAY_EVO_DMA_CB(id), pushbuf >> 8 |
+	nv_wr32(device, NV50_PDISPLAY_EVO_DMA_CB(id), pushbuf >> 8 |
 		     NV50_PDISPLAY_EVO_DMA_CB_LOCATION_VRAM |
 		     NV50_PDISPLAY_EVO_DMA_CB_VALID);
-	nv_wr32(dev, NV50_PDISPLAY_EVO_UNK2(id), 0x00010000);
-	nv_wr32(dev, NV50_PDISPLAY_EVO_HASH_TAG(id), id);
-	nv_mask(dev, NV50_PDISPLAY_EVO_CTRL(id), NV50_PDISPLAY_EVO_CTRL_DMA,
+	nv_wr32(device, NV50_PDISPLAY_EVO_UNK2(id), 0x00010000);
+	nv_wr32(device, NV50_PDISPLAY_EVO_HASH_TAG(id), id);
+	nv_mask(device, NV50_PDISPLAY_EVO_CTRL(id), NV50_PDISPLAY_EVO_CTRL_DMA,
 		     NV50_PDISPLAY_EVO_CTRL_DMA_ENABLED);
 
-	nv_wr32(dev, NV50_PDISPLAY_USER_PUT(id), 0x00000000);
-	nv_wr32(dev, NV50_PDISPLAY_EVO_CTRL(id), 0x01000003 |
+	nv_wr32(device, NV50_PDISPLAY_USER_PUT(id), 0x00000000);
+	nv_wr32(device, NV50_PDISPLAY_EVO_CTRL(id), 0x01000003 |
 		     NV50_PDISPLAY_EVO_CTRL_DMA_ENABLED);
-	if (!nv_wait(dev, NV50_PDISPLAY_EVO_CTRL(id), 0x80000000, 0x00000000)) {
-		NV_ERROR(dev, "EvoCh %d init timeout: 0x%08x\n", id,
-			 nv_rd32(dev, NV50_PDISPLAY_EVO_CTRL(id)));
+	if (!nv_wait(device, NV50_PDISPLAY_EVO_CTRL(id), 0x80000000, 0x00000000)) {
+		NV_ERROR(drm, "EvoCh %d init timeout: 0x%08x\n", id,
+			 nv_rd32(device, NV50_PDISPLAY_EVO_CTRL(id)));
 		return -EBUSY;
 	}
 
 	/* enable error reporting on the channel */
-	nv_mask(dev, 0x610028, 0x00000000, 0x00010001 << id);
+	nv_mask(device, 0x610028, 0x00000000, 0x00010001 << id);
 
 	evo->dma.max = (4096/4) - 2;
 	evo->dma.max &= ~7;
@@ -205,16 +217,17 @@ nv50_evo_channel_init(struct nouveau_channel *evo)
 static void
 nv50_evo_channel_fini(struct nouveau_channel *evo)
 {
-	struct drm_device *dev = evo->dev;
-	int id = evo->id;
-
-	nv_mask(dev, 0x610028, 0x00010001 << id, 0x00000000);
-	nv_mask(dev, NV50_PDISPLAY_EVO_CTRL(id), 0x00001010, 0x00001000);
-	nv_wr32(dev, NV50_PDISPLAY_INTR_0, (1 << id));
-	nv_mask(dev, NV50_PDISPLAY_EVO_CTRL(id), 0x00000003, 0x00000000);
-	if (!nv_wait(dev, NV50_PDISPLAY_EVO_CTRL(id), 0x001e0000, 0x00000000)) {
-		NV_ERROR(dev, "EvoCh %d takedown timeout: 0x%08x\n", id,
-			 nv_rd32(dev, NV50_PDISPLAY_EVO_CTRL(id)));
+	struct nouveau_drm *drm = evo->drm;
+	struct nouveau_device *device = nv_device(drm->device);
+	int id = evo->handle;
+
+	nv_mask(device, 0x610028, 0x00010001 << id, 0x00000000);
+	nv_mask(device, NV50_PDISPLAY_EVO_CTRL(id), 0x00001010, 0x00001000);
+	nv_wr32(device, NV50_PDISPLAY_INTR_0, (1 << id));
+	nv_mask(device, NV50_PDISPLAY_EVO_CTRL(id), 0x00000003, 0x00000000);
+	if (!nv_wait(device, NV50_PDISPLAY_EVO_CTRL(id), 0x001e0000, 0x00000000)) {
+		NV_ERROR(drm, "EvoCh %d takedown timeout: 0x%08x\n", id,
+			 nv_rd32(device, NV50_PDISPLAY_EVO_CTRL(id)));
 	}
 }
 
@@ -231,93 +244,66 @@ nv50_evo_destroy(struct drm_device *dev)
 		}
 		nv50_evo_channel_del(&disp->crtc[i].sync);
 	}
-	nouveau_gpuobj_ref(NULL, &disp->ntfy);
 	nv50_evo_channel_del(&disp->master);
+	nouveau_gpuobj_ref(NULL, &disp->ramin);
 }
 
 int
 nv50_evo_create(struct drm_device *dev)
 {
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_drm *drm = nouveau_drm(dev);
+	struct nouveau_fb *pfb = nouveau_fb(drm->device);
 	struct nv50_display *disp = nv50_display(dev);
-	struct nouveau_gpuobj *ramht = NULL;
 	struct nouveau_channel *evo;
 	int ret, i, j;
 
-	/* create primary evo channel, the one we use for modesetting
-	 * purporses
-	 */
-	ret = nv50_evo_channel_new(dev, 0, &disp->master);
-	if (ret)
-		return ret;
-	evo = disp->master;
-
 	/* setup object management on it, any other evo channel will
 	 * use this also as there's no per-channel support on the
 	 * hardware
 	 */
-	ret = nouveau_gpuobj_new(dev, NULL, 32768, 65536,
-				 NVOBJ_FLAG_ZERO_ALLOC, &evo->ramin);
-	if (ret) {
-		NV_ERROR(dev, "Error allocating EVO channel memory: %d\n", ret);
-		goto err;
-	}
-
-	ret = drm_mm_init(&evo->ramin_heap, 0, 32768);
+	ret = nouveau_gpuobj_new(drm->device, NULL, 32768, 65536,
+				 NVOBJ_FLAG_ZERO_ALLOC, &disp->ramin);
 	if (ret) {
-		NV_ERROR(dev, "Error initialising EVO PRAMIN heap: %d\n", ret);
+		NV_ERROR(drm, "Error allocating EVO channel memory: %d\n", ret);
 		goto err;
 	}
 
-	ret = nouveau_gpuobj_new(dev, evo, 4096, 16, 0, &ramht);
-	if (ret) {
-		NV_ERROR(dev, "Unable to allocate EVO RAMHT: %d\n", ret);
-		goto err;
-	}
-
-	ret = nouveau_ramht_new(dev, ramht, &evo->ramht);
-	nouveau_gpuobj_ref(NULL, &ramht);
-	if (ret)
-		goto err;
+	disp->hash = 0x0000;
+	disp->dmao = 0x1000;
 
-	/* not sure exactly what this is..
-	 *
-	 * the first dword of the structure is used by nvidia to wait on
-	 * full completion of an EVO "update" command.
-	 *
-	 * method 0x8c on the master evo channel will fill a lot more of
-	 * this structure with some undefined info
+	/* create primary evo channel, the one we use for modesetting
+	 * purporses
 	 */
-	ret = nouveau_gpuobj_new(dev, disp->master, 0x1000, 0,
-				 NVOBJ_FLAG_ZERO_ALLOC, &disp->ntfy);
+	ret = nv50_evo_channel_new(dev, 0, &disp->master);
 	if (ret)
-		goto err;
+		return ret;
+	evo = disp->master;
 
 	ret = nv50_evo_dmaobj_new(disp->master, NvEvoSync, 0x0000,
-				  disp->ntfy->vinst, disp->ntfy->size, NULL);
+				  disp->ramin->addr + 0x2000, 0x1000, NULL);
 	if (ret)
 		goto err;
 
 	/* create some default objects for the scanout memtypes we support */
 	ret = nv50_evo_dmaobj_new(disp->master, NvEvoVRAM, 0x0000,
-				  0, dev_priv->vram_size, NULL);
+				  0, pfb->ram.size, NULL);
 	if (ret)
 		goto err;
 
 	ret = nv50_evo_dmaobj_new(disp->master, NvEvoVRAM_LP, 0x80000000,
-				  0, dev_priv->vram_size, NULL);
+				  0, pfb->ram.size, NULL);
 	if (ret)
 		goto err;
 
 	ret = nv50_evo_dmaobj_new(disp->master, NvEvoFB32, 0x80000000 |
-				  (dev_priv->chipset < 0xc0 ? 0x7a00 : 0xfe00),
-				  0, dev_priv->vram_size, NULL);
+				  (nv_device(drm->device)->chipset < 0xc0 ? 0x7a : 0xfe),
+				  0, pfb->ram.size, NULL);
 	if (ret)
 		goto err;
 
 	ret = nv50_evo_dmaobj_new(disp->master, NvEvoFB16, 0x80000000 |
-				  (dev_priv->chipset < 0xc0 ? 0x7000 : 0xfe00),
-				  0, dev_priv->vram_size, NULL);
+				  (nv_device(drm->device)->chipset < 0xc0 ? 0x70 : 0xfe),
+				  0, pfb->ram.size, NULL);
 	if (ret)
 		goto err;
 
@@ -352,21 +338,21 @@ nv50_evo_create(struct drm_device *dev)
 			goto err;
 
 		ret = nv50_evo_dmaobj_new(dispc->sync, NvEvoVRAM_LP, 0x80000000,
-					  0, dev_priv->vram_size, NULL);
+					  0, pfb->ram.size, NULL);
 		if (ret)
 			goto err;
 
 		ret = nv50_evo_dmaobj_new(dispc->sync, NvEvoFB32, 0x80000000 |
-					  (dev_priv->chipset < 0xc0 ?
-					  0x7a00 : 0xfe00),
-					  0, dev_priv->vram_size, NULL);
+					  (nv_device(drm->device)->chipset < 0xc0 ?
+					  0x7a : 0xfe),
+					  0, pfb->ram.size, NULL);
 		if (ret)
 			goto err;
 
 		ret = nv50_evo_dmaobj_new(dispc->sync, NvEvoFB16, 0x80000000 |
-					  (dev_priv->chipset < 0xc0 ?
-					  0x7000 : 0xfe00),
-					  0, dev_priv->vram_size, NULL);
+					  (nv_device(drm->device)->chipset < 0xc0 ?
+					  0x70 : 0xfe),
+					  0, pfb->ram.size, NULL);
 		if (ret)
 			goto err;
 
diff --git a/drivers/gpu/drm/nouveau/nv50_fb.c b/drivers/gpu/drm/nouveau/nv50_fb.c
deleted file mode 100644
index befd5fb7155f..000000000000
--- a/drivers/gpu/drm/nouveau/nv50_fb.c
+++ /dev/null
@@ -1,295 +0,0 @@
-#include <drm/drmP.h>
-#include "nouveau_drv.h"
-#include <drm/nouveau_drm.h>
-#include "nouveau_fifo.h"
-
-struct nv50_fb_priv {
-	struct page *r100c08_page;
-	dma_addr_t r100c08;
-};
-
-static void
-nv50_fb_destroy(struct drm_device *dev)
-{
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct nouveau_fb_engine *pfb = &dev_priv->engine.fb;
-	struct nv50_fb_priv *priv = pfb->priv;
-
-	if (drm_mm_initialized(&pfb->tag_heap))
-		drm_mm_takedown(&pfb->tag_heap);
-
-	if (priv->r100c08_page) {
-		pci_unmap_page(dev->pdev, priv->r100c08, PAGE_SIZE,
-			       PCI_DMA_BIDIRECTIONAL);
-		__free_page(priv->r100c08_page);
-	}
-
-	kfree(priv);
-	pfb->priv = NULL;
-}
-
-static int
-nv50_fb_create(struct drm_device *dev)
-{
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct nouveau_fb_engine *pfb = &dev_priv->engine.fb;
-	struct nv50_fb_priv *priv;
-	u32 tagmem;
-	int ret;
-
-	priv = kzalloc(sizeof(*priv), GFP_KERNEL);
-	if (!priv)
-		return -ENOMEM;
-	pfb->priv = priv;
-
-	priv->r100c08_page = alloc_page(GFP_KERNEL | __GFP_ZERO);
-	if (!priv->r100c08_page) {
-		nv50_fb_destroy(dev);
-		return -ENOMEM;
-	}
-
-	priv->r100c08 = pci_map_page(dev->pdev, priv->r100c08_page, 0,
-				     PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
-	if (pci_dma_mapping_error(dev->pdev, priv->r100c08)) {
-		nv50_fb_destroy(dev);
-		return -EFAULT;
-	}
-
-	tagmem = nv_rd32(dev, 0x100320);
-	NV_DEBUG(dev, "%d tags available\n", tagmem);
-	ret = drm_mm_init(&pfb->tag_heap, 0, tagmem);
-	if (ret) {
-		nv50_fb_destroy(dev);
-		return ret;
-	}
-
-	return 0;
-}
-
-int
-nv50_fb_init(struct drm_device *dev)
-{
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct nv50_fb_priv *priv;
-	int ret;
-
-	if (!dev_priv->engine.fb.priv) {
-		ret = nv50_fb_create(dev);
-		if (ret)
-			return ret;
-	}
-	priv = dev_priv->engine.fb.priv;
-
-	/* Not a clue what this is exactly.  Without pointing it at a
-	 * scratch page, VRAM->GART blits with M2MF (as in DDX DFS)
-	 * cause IOMMU "read from address 0" errors (rh#561267)
-	 */
-	nv_wr32(dev, 0x100c08, priv->r100c08 >> 8);
-
-	/* This is needed to get meaningful information from 100c90
-	 * on traps. No idea what these values mean exactly. */
-	switch (dev_priv->chipset) {
-	case 0x50:
-		nv_wr32(dev, 0x100c90, 0x000707ff);
-		break;
-	case 0xa3:
-	case 0xa5:
-	case 0xa8:
-		nv_wr32(dev, 0x100c90, 0x000d0fff);
-		break;
-	case 0xaf:
-		nv_wr32(dev, 0x100c90, 0x089d1fff);
-		break;
-	default:
-		nv_wr32(dev, 0x100c90, 0x001d07ff);
-		break;
-	}
-
-	return 0;
-}
-
-void
-nv50_fb_takedown(struct drm_device *dev)
-{
-	nv50_fb_destroy(dev);
-}
-
-static struct nouveau_enum vm_dispatch_subclients[] = {
-	{ 0x00000000, "GRCTX", NULL },
-	{ 0x00000001, "NOTIFY", NULL },
-	{ 0x00000002, "QUERY", NULL },
-	{ 0x00000003, "COND", NULL },
-	{ 0x00000004, "M2M_IN", NULL },
-	{ 0x00000005, "M2M_OUT", NULL },
-	{ 0x00000006, "M2M_NOTIFY", NULL },
-	{}
-};
-
-static struct nouveau_enum vm_ccache_subclients[] = {
-	{ 0x00000000, "CB", NULL },
-	{ 0x00000001, "TIC", NULL },
-	{ 0x00000002, "TSC", NULL },
-	{}
-};
-
-static struct nouveau_enum vm_prop_subclients[] = {
-	{ 0x00000000, "RT0", NULL },
-	{ 0x00000001, "RT1", NULL },
-	{ 0x00000002, "RT2", NULL },
-	{ 0x00000003, "RT3", NULL },
-	{ 0x00000004, "RT4", NULL },
-	{ 0x00000005, "RT5", NULL },
-	{ 0x00000006, "RT6", NULL },
-	{ 0x00000007, "RT7", NULL },
-	{ 0x00000008, "ZETA", NULL },
-	{ 0x00000009, "LOCAL", NULL },
-	{ 0x0000000a, "GLOBAL", NULL },
-	{ 0x0000000b, "STACK", NULL },
-	{ 0x0000000c, "DST2D", NULL },
-	{}
-};
-
-static struct nouveau_enum vm_pfifo_subclients[] = {
-	{ 0x00000000, "PUSHBUF", NULL },
-	{ 0x00000001, "SEMAPHORE", NULL },
-	{}
-};
-
-static struct nouveau_enum vm_bar_subclients[] = {
-	{ 0x00000000, "FB", NULL },
-	{ 0x00000001, "IN", NULL },
-	{}
-};
-
-static struct nouveau_enum vm_client[] = {
-	{ 0x00000000, "STRMOUT", NULL },
-	{ 0x00000003, "DISPATCH", vm_dispatch_subclients },
-	{ 0x00000004, "PFIFO_WRITE", NULL },
-	{ 0x00000005, "CCACHE", vm_ccache_subclients },
-	{ 0x00000006, "PPPP", NULL },
-	{ 0x00000007, "CLIPID", NULL },
-	{ 0x00000008, "PFIFO_READ", NULL },
-	{ 0x00000009, "VFETCH", NULL },
-	{ 0x0000000a, "TEXTURE", NULL },
-	{ 0x0000000b, "PROP", vm_prop_subclients },
-	{ 0x0000000c, "PVP", NULL },
-	{ 0x0000000d, "PBSP", NULL },
-	{ 0x0000000e, "PCRYPT", NULL },
-	{ 0x0000000f, "PCOUNTER", NULL },
-	{ 0x00000011, "PDAEMON", NULL },
-	{}
-};
-
-static struct nouveau_enum vm_engine[] = {
-	{ 0x00000000, "PGRAPH", NULL },
-	{ 0x00000001, "PVP", NULL },
-	{ 0x00000004, "PEEPHOLE", NULL },
-	{ 0x00000005, "PFIFO", vm_pfifo_subclients },
-	{ 0x00000006, "BAR", vm_bar_subclients },
-	{ 0x00000008, "PPPP", NULL },
-	{ 0x00000009, "PBSP", NULL },
-	{ 0x0000000a, "PCRYPT", NULL },
-	{ 0x0000000b, "PCOUNTER", NULL },
-	{ 0x0000000c, "SEMAPHORE_BG", NULL },
-	{ 0x0000000d, "PCOPY", NULL },
-	{ 0x0000000e, "PDAEMON", NULL },
-	{}
-};
-
-static struct nouveau_enum vm_fault[] = {
-	{ 0x00000000, "PT_NOT_PRESENT", NULL },
-	{ 0x00000001, "PT_TOO_SHORT", NULL },
-	{ 0x00000002, "PAGE_NOT_PRESENT", NULL },
-	{ 0x00000003, "PAGE_SYSTEM_ONLY", NULL },
-	{ 0x00000004, "PAGE_READ_ONLY", NULL },
-	{ 0x00000006, "NULL_DMAOBJ", NULL },
-	{ 0x00000007, "WRONG_MEMTYPE", NULL },
-	{ 0x0000000b, "VRAM_LIMIT", NULL },
-	{ 0x0000000f, "DMAOBJ_LIMIT", NULL },
-	{}
-};
-
-void
-nv50_fb_vm_trap(struct drm_device *dev, int display)
-{
-	struct nouveau_fifo_priv *pfifo = nv_engine(dev, NVOBJ_ENGINE_FIFO);
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	const struct nouveau_enum *en, *cl;
-	unsigned long flags;
-	u32 trap[6], idx, chinst;
-	u8 st0, st1, st2, st3;
-	int i, ch;
-
-	idx = nv_rd32(dev, 0x100c90);
-	if (!(idx & 0x80000000))
-		return;
-	idx &= 0x00ffffff;
-
-	for (i = 0; i < 6; i++) {
-		nv_wr32(dev, 0x100c90, idx | i << 24);
-		trap[i] = nv_rd32(dev, 0x100c94);
-	}
-	nv_wr32(dev, 0x100c90, idx | 0x80000000);
-
-	if (!display)
-		return;
-
-	/* lookup channel id */
-	chinst = (trap[2] << 16) | trap[1];
-	spin_lock_irqsave(&dev_priv->channels.lock, flags);
-	for (ch = 0; ch < pfifo->channels; ch++) {
-		struct nouveau_channel *chan = dev_priv->channels.ptr[ch];
-
-		if (!chan || !chan->ramin)
-			continue;
-
-		if (chinst == chan->ramin->vinst >> 12)
-			break;
-	}
-	spin_unlock_irqrestore(&dev_priv->channels.lock, flags);
-
-	/* decode status bits into something more useful */
-	if (dev_priv->chipset  < 0xa3 ||
-	    dev_priv->chipset == 0xaa || dev_priv->chipset == 0xac) {
-		st0 = (trap[0] & 0x0000000f) >> 0;
-		st1 = (trap[0] & 0x000000f0) >> 4;
-		st2 = (trap[0] & 0x00000f00) >> 8;
-		st3 = (trap[0] & 0x0000f000) >> 12;
-	} else {
-		st0 = (trap[0] & 0x000000ff) >> 0;
-		st1 = (trap[0] & 0x0000ff00) >> 8;
-		st2 = (trap[0] & 0x00ff0000) >> 16;
-		st3 = (trap[0] & 0xff000000) >> 24;
-	}
-
-	NV_INFO(dev, "VM: trapped %s at 0x%02x%04x%04x on ch %d [0x%08x] ",
-		(trap[5] & 0x00000100) ? "read" : "write",
-		trap[5] & 0xff, trap[4] & 0xffff, trap[3] & 0xffff, ch, chinst);
-
-	en = nouveau_enum_find(vm_engine, st0);
-	if (en)
-		printk("%s/", en->name);
-	else
-		printk("%02x/", st0);
-
-	cl = nouveau_enum_find(vm_client, st2);
-	if (cl)
-		printk("%s/", cl->name);
-	else
-		printk("%02x/", st2);
-
-	if      (cl && cl->data) cl = nouveau_enum_find(cl->data, st3);
-	else if (en && en->data) cl = nouveau_enum_find(en->data, st3);
-	else                     cl = NULL;
-	if (cl)
-		printk("%s", cl->name);
-	else
-		printk("%02x", st3);
-
-	printk(" reason: ");
-	en = nouveau_enum_find(vm_fault, st1);
-	if (en)
-		printk("%s\n", en->name);
-	else
-		printk("0x%08x\n", st1);
-}
diff --git a/drivers/gpu/drm/nouveau/nv50_fbcon.c b/drivers/gpu/drm/nouveau/nv50_fbcon.c
index ec24959e67a2..52068a0910dc 100644
--- a/drivers/gpu/drm/nouveau/nv50_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nv50_fbcon.c
@@ -22,20 +22,16 @@
  * Authors: Ben Skeggs
  */
 
-#include <drm/drmP.h>
-#include "nouveau_drv.h"
+#include "nouveau_drm.h"
 #include "nouveau_dma.h"
-#include "nouveau_ramht.h"
 #include "nouveau_fbcon.h"
-#include "nouveau_mm.h"
 
 int
 nv50_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
 {
 	struct nouveau_fbdev *nfbdev = info->par;
-	struct drm_device *dev = nfbdev->dev;
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct nouveau_channel *chan = dev_priv->channel;
+	struct nouveau_drm *drm = nouveau_drm(nfbdev->dev);
+	struct nouveau_channel *chan = drm->channel;
 	int ret;
 
 	ret = RING_SPACE(chan, rect->rop == ROP_COPY ? 7 : 11);
@@ -69,9 +65,8 @@ int
 nv50_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region)
 {
 	struct nouveau_fbdev *nfbdev = info->par;
-	struct drm_device *dev = nfbdev->dev;
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct nouveau_channel *chan = dev_priv->channel;
+	struct nouveau_drm *drm = nouveau_drm(nfbdev->dev);
+	struct nouveau_channel *chan = drm->channel;
 	int ret;
 
 	ret = RING_SPACE(chan, 12);
@@ -98,9 +93,8 @@ int
 nv50_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
 {
 	struct nouveau_fbdev *nfbdev = info->par;
-	struct drm_device *dev = nfbdev->dev;
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct nouveau_channel *chan = dev_priv->channel;
+	struct nouveau_drm *drm = nouveau_drm(nfbdev->dev);
+	struct nouveau_channel *chan = drm->channel;
 	uint32_t width, dwords, *data = (uint32_t *)image->data;
 	uint32_t mask = ~(~0 >> (32 - info->var.bits_per_pixel));
 	uint32_t *palette = info->pseudo_palette;
@@ -156,10 +150,11 @@ int
 nv50_fbcon_accel_init(struct fb_info *info)
 {
 	struct nouveau_fbdev *nfbdev = info->par;
-	struct drm_device *dev = nfbdev->dev;
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct nouveau_channel *chan = dev_priv->channel;
 	struct nouveau_framebuffer *fb = &nfbdev->nouveau_fb;
+	struct drm_device *dev = nfbdev->dev;
+	struct nouveau_drm *drm = nouveau_drm(dev);
+	struct nouveau_channel *chan = drm->channel;
+	struct nouveau_object *object;
 	int ret, format;
 
 	switch (info->var.bits_per_pixel) {
@@ -189,7 +184,8 @@ nv50_fbcon_accel_init(struct fb_info *info)
 		return -EINVAL;
 	}
 
-	ret = nouveau_gpuobj_gr_new(dev_priv->channel, Nv2D, 0x502d);
+	ret = nouveau_object_new(nv_object(chan->cli), NVDRM_CHAN, Nv2D,
+				 0x502d, NULL, 0, &object);
 	if (ret)
 		return ret;
 
@@ -202,9 +198,9 @@ nv50_fbcon_accel_init(struct fb_info *info)
 	BEGIN_NV04(chan, NvSub2D, 0x0000, 1);
 	OUT_RING(chan, Nv2D);
 	BEGIN_NV04(chan, NvSub2D, 0x0184, 3);
-	OUT_RING(chan, chan->vram_handle);
-	OUT_RING(chan, chan->vram_handle);
-	OUT_RING(chan, chan->vram_handle);
+	OUT_RING(chan, NvDmaFB);
+	OUT_RING(chan, NvDmaFB);
+	OUT_RING(chan, NvDmaFB);
 	BEGIN_NV04(chan, NvSub2D, 0x0290, 1);
 	OUT_RING(chan, 0);
 	BEGIN_NV04(chan, NvSub2D, 0x0888, 1);
diff --git a/drivers/gpu/drm/nouveau/nv50_fence.c b/drivers/gpu/drm/nouveau/nv50_fence.c
new file mode 100644
index 000000000000..e0763ea88ee2
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv50_fence.c
@@ -0,0 +1,127 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+
+#include <core/object.h>
+#include <core/class.h>
+
+#include "nouveau_drm.h"
+#include "nouveau_dma.h"
+#include "nouveau_fence.h"
+
+#include "nv50_display.h"
+
+struct nv50_fence_chan {
+	struct nouveau_fence_chan base;
+};
+
+struct nv50_fence_priv {
+	struct nouveau_fence_priv base;
+	struct nouveau_bo *bo;
+	spinlock_t lock;
+	u32 sequence;
+};
+
+static int
+nv50_fence_context_new(struct nouveau_channel *chan)
+{
+	struct drm_device *dev = chan->drm->dev;
+	struct nv50_fence_priv *priv = chan->drm->fence;
+	struct nv50_fence_chan *fctx;
+	struct ttm_mem_reg *mem = &priv->bo->bo.mem;
+	struct nouveau_object *object;
+	int ret, i;
+
+	fctx = chan->fence = kzalloc(sizeof(*fctx), GFP_KERNEL);
+	if (!fctx)
+		return -ENOMEM;
+
+	nouveau_fence_context_new(&fctx->base);
+
+	ret = nouveau_object_new(nv_object(chan->cli), chan->handle,
+				 NvSema, 0x0002,
+				 &(struct nv_dma_class) {
+					.flags = NV_DMA_TARGET_VRAM |
+						 NV_DMA_ACCESS_RDWR,
+					.start = mem->start * PAGE_SIZE,
+					.limit = mem->size - 1,
+				 }, sizeof(struct nv_dma_class),
+				 &object);
+
+	/* dma objects for display sync channel semaphore blocks */
+	for (i = 0; !ret && i < dev->mode_config.num_crtc; i++) {
+		struct nouveau_bo *bo = nv50_display_crtc_sema(dev, i);
+
+		ret = nouveau_object_new(nv_object(chan->cli), chan->handle,
+					 NvEvoSema0 + i, 0x003d,
+					 &(struct nv_dma_class) {
+						.flags = NV_DMA_TARGET_VRAM |
+							 NV_DMA_ACCESS_RDWR,
+						.start = bo->bo.offset,
+						.limit = bo->bo.offset + 0xfff,
+					 }, sizeof(struct nv_dma_class),
+					 &object);
+	}
+
+	if (ret)
+		nv10_fence_context_del(chan);
+	return ret;
+}
+
+int
+nv50_fence_create(struct nouveau_drm *drm)
+{
+	struct nv50_fence_priv *priv;
+	int ret = 0;
+
+	priv = drm->fence = kzalloc(sizeof(*priv), GFP_KERNEL);
+	if (!priv)
+		return -ENOMEM;
+
+	priv->base.dtor = nv10_fence_destroy;
+	priv->base.context_new = nv50_fence_context_new;
+	priv->base.context_del = nv10_fence_context_del;
+	priv->base.emit = nv10_fence_emit;
+	priv->base.read = nv10_fence_read;
+	priv->base.sync = nv17_fence_sync;
+	spin_lock_init(&priv->lock);
+
+	ret = nouveau_bo_new(drm->dev, 4096, 0x1000, TTM_PL_FLAG_VRAM,
+			     0, 0x0000, NULL, &priv->bo);
+	if (!ret) {
+		ret = nouveau_bo_pin(priv->bo, TTM_PL_FLAG_VRAM);
+		if (!ret)
+			ret = nouveau_bo_map(priv->bo);
+		if (ret)
+			nouveau_bo_ref(NULL, &priv->bo);
+	}
+
+	if (ret == 0) {
+		nouveau_bo_wr32(priv->bo, 0x000, 0x00000000);
+		priv->base.sync = nv17_fence_sync;
+	}
+
+	if (ret)
+		nv10_fence_destroy(drm);
+	return ret;
+}
diff --git a/drivers/gpu/drm/nouveau/nv50_fifo.c b/drivers/gpu/drm/nouveau/nv50_fifo.c
deleted file mode 100644
index 5a440e89e918..000000000000
--- a/drivers/gpu/drm/nouveau/nv50_fifo.c
+++ /dev/null
@@ -1,293 +0,0 @@
-/*
- * Copyright (C) 2012 Ben Skeggs.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining
- * a copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sublicense, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial
- * portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
- * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
- * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
- * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
- * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-
-#include <drm/drmP.h>
-#include "nouveau_drv.h"
-#include "nouveau_fifo.h"
-#include "nouveau_ramht.h"
-#include "nouveau_vm.h"
-
-struct nv50_fifo_priv {
-	struct nouveau_fifo_priv base;
-	struct nouveau_gpuobj *playlist[2];
-	int cur_playlist;
-};
-
-struct nv50_fifo_chan {
-	struct nouveau_fifo_chan base;
-};
-
-void
-nv50_fifo_playlist_update(struct drm_device *dev)
-{
-	struct nv50_fifo_priv *priv = nv_engine(dev, NVOBJ_ENGINE_FIFO);
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct nouveau_gpuobj *cur;
-	int i, p;
-
-	cur = priv->playlist[priv->cur_playlist];
-	priv->cur_playlist = !priv->cur_playlist;
-
-	for (i = 0, p = 0; i < priv->base.channels; i++) {
-		if (nv_rd32(dev, 0x002600 + (i * 4)) & 0x80000000)
-			nv_wo32(cur, p++ * 4, i);
-	}
-
-	dev_priv->engine.instmem.flush(dev);
-
-	nv_wr32(dev, 0x0032f4, cur->vinst >> 12);
-	nv_wr32(dev, 0x0032ec, p);
-	nv_wr32(dev, 0x002500, 0x00000101);
-}
-
-static int
-nv50_fifo_context_new(struct nouveau_channel *chan, int engine)
-{
-	struct nv50_fifo_priv *priv = nv_engine(chan->dev, engine);
-	struct nv50_fifo_chan *fctx;
-	struct drm_device *dev = chan->dev;
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	u64 ib_offset = chan->pushbuf_base + chan->dma.ib_base * 4;
-	u64 instance = chan->ramin->vinst >> 12;
-	unsigned long flags;
-	int ret = 0, i;
-
-	fctx = chan->engctx[engine] = kzalloc(sizeof(*fctx), GFP_KERNEL);
-	if (!fctx)
-		return -ENOMEM;
-	atomic_inc(&chan->vm->engref[engine]);
-
-	chan->user = ioremap(pci_resource_start(dev->pdev, 0) +
-			     NV50_USER(chan->id), PAGE_SIZE);
-	if (!chan->user) {
-		ret = -ENOMEM;
-		goto error;
-	}
-
-	for (i = 0; i < 0x100; i += 4)
-		nv_wo32(chan->ramin, i, 0x00000000);
-	nv_wo32(chan->ramin, 0x3c, 0x403f6078);
-	nv_wo32(chan->ramin, 0x40, 0x00000000);
-	nv_wo32(chan->ramin, 0x44, 0x01003fff);
-	nv_wo32(chan->ramin, 0x48, chan->pushbuf->cinst >> 4);
-	nv_wo32(chan->ramin, 0x50, lower_32_bits(ib_offset));
-	nv_wo32(chan->ramin, 0x54, upper_32_bits(ib_offset) |
-				   drm_order(chan->dma.ib_max + 1) << 16);
-	nv_wo32(chan->ramin, 0x60, 0x7fffffff);
-	nv_wo32(chan->ramin, 0x78, 0x00000000);
-	nv_wo32(chan->ramin, 0x7c, 0x30000001);
-	nv_wo32(chan->ramin, 0x80, ((chan->ramht->bits - 9) << 27) |
-				   (4 << 24) /* SEARCH_FULL */ |
-				   (chan->ramht->gpuobj->cinst >> 4));
-
-	dev_priv->engine.instmem.flush(dev);
-
-	spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
-	nv_wr32(dev, 0x002600 + (chan->id * 4), 0x80000000 | instance);
-	nv50_fifo_playlist_update(dev);
-	spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
-
-error:
-	if (ret)
-		priv->base.base.context_del(chan, engine);
-	return ret;
-}
-
-static bool
-nv50_fifo_kickoff(struct nouveau_channel *chan)
-{
-	struct drm_device *dev = chan->dev;
-	bool done = true;
-	u32 me;
-
-	/* HW bug workaround:
-	 *
-	 * PFIFO will hang forever if the connected engines don't report
-	 * that they've processed the context switch request.
-	 *
-	 * In order for the kickoff to work, we need to ensure all the
-	 * connected engines are in a state where they can answer.
-	 *
-	 * Newer chipsets don't seem to suffer from this issue, and well,
-	 * there's also a "ignore these engines" bitmask reg we can use
-	 * if we hit the issue there..
-	 */
-
-	/* PME: make sure engine is enabled */
-	me = nv_mask(dev, 0x00b860, 0x00000001, 0x00000001);
-
-	/* do the kickoff... */
-	nv_wr32(dev, 0x0032fc, chan->ramin->vinst >> 12);
-	if (!nv_wait_ne(dev, 0x0032fc, 0xffffffff, 0xffffffff)) {
-		NV_INFO(dev, "PFIFO: channel %d unload timeout\n", chan->id);
-		done = false;
-	}
-
-	/* restore any engine states we changed, and exit */
-	nv_wr32(dev, 0x00b860, me);
-	return done;
-}
-
-static void
-nv50_fifo_context_del(struct nouveau_channel *chan, int engine)
-{
-	struct nv50_fifo_chan *fctx = chan->engctx[engine];
-	struct drm_device *dev = chan->dev;
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	unsigned long flags;
-
-	/* remove channel from playlist, will context switch if active */
-	spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
-	nv_mask(dev, 0x002600 + (chan->id * 4), 0x80000000, 0x00000000);
-	nv50_fifo_playlist_update(dev);
-
-	/* tell any engines on this channel to unload their contexts */
-	nv50_fifo_kickoff(chan);
-
-	nv_wr32(dev, 0x002600 + (chan->id * 4), 0x00000000);
-	spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
-
-	/* clean up */
-	if (chan->user) {
-		iounmap(chan->user);
-		chan->user = NULL;
-	}
-
-	atomic_dec(&chan->vm->engref[engine]);
-	chan->engctx[engine] = NULL;
-	kfree(fctx);
-}
-
-static int
-nv50_fifo_init(struct drm_device *dev, int engine)
-{
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	u32 instance;
-	int i;
-
-	nv_mask(dev, 0x000200, 0x00000100, 0x00000000);
-	nv_mask(dev, 0x000200, 0x00000100, 0x00000100);
-	nv_wr32(dev, 0x00250c, 0x6f3cfc34);
-	nv_wr32(dev, 0x002044, 0x01003fff);
-
-	nv_wr32(dev, 0x002100, 0xffffffff);
-	nv_wr32(dev, 0x002140, 0xffffffff);
-
-	for (i = 0; i < 128; i++) {
-		struct nouveau_channel *chan = dev_priv->channels.ptr[i];
-		if (chan && chan->engctx[engine])
-			instance = 0x80000000 | chan->ramin->vinst >> 12;
-		else
-			instance = 0x00000000;
-		nv_wr32(dev, 0x002600 + (i * 4), instance);
-	}
-
-	nv50_fifo_playlist_update(dev);
-
-	nv_wr32(dev, 0x003200, 1);
-	nv_wr32(dev, 0x003250, 1);
-	nv_wr32(dev, 0x002500, 1);
-	return 0;
-}
-
-static int
-nv50_fifo_fini(struct drm_device *dev, int engine, bool suspend)
-{
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct nv50_fifo_priv *priv = nv_engine(dev, engine);
-	int i;
-
-	/* set playlist length to zero, fifo will unload context */
-	nv_wr32(dev, 0x0032ec, 0);
-
-	/* tell all connected engines to unload their contexts */
-	for (i = 0; i < priv->base.channels; i++) {
-		struct nouveau_channel *chan = dev_priv->channels.ptr[i];
-		if (chan && !nv50_fifo_kickoff(chan))
-			return -EBUSY;
-	}
-
-	nv_wr32(dev, 0x002140, 0);
-	return 0;
-}
-
-void
-nv50_fifo_tlb_flush(struct drm_device *dev, int engine)
-{
-	nv50_vm_flush_engine(dev, 5);
-}
-
-void
-nv50_fifo_destroy(struct drm_device *dev, int engine)
-{
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct nv50_fifo_priv *priv = nv_engine(dev, engine);
-
-	nouveau_irq_unregister(dev, 8);
-
-	nouveau_gpuobj_ref(NULL, &priv->playlist[0]);
-	nouveau_gpuobj_ref(NULL, &priv->playlist[1]);
-
-	dev_priv->eng[engine] = NULL;
-	kfree(priv);
-}
-
-int
-nv50_fifo_create(struct drm_device *dev)
-{
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct nv50_fifo_priv *priv;
-	int ret;
-
-	priv = kzalloc(sizeof(*priv), GFP_KERNEL);
-	if (!priv)
-		return -ENOMEM;
-
-	priv->base.base.destroy = nv50_fifo_destroy;
-	priv->base.base.init = nv50_fifo_init;
-	priv->base.base.fini = nv50_fifo_fini;
-	priv->base.base.context_new = nv50_fifo_context_new;
-	priv->base.base.context_del = nv50_fifo_context_del;
-	priv->base.base.tlb_flush = nv50_fifo_tlb_flush;
-	priv->base.channels = 127;
-	dev_priv->eng[NVOBJ_ENGINE_FIFO] = &priv->base.base;
-
-	ret = nouveau_gpuobj_new(dev, NULL, priv->base.channels * 4, 0x1000,
-				 NVOBJ_FLAG_ZERO_ALLOC, &priv->playlist[0]);
-	if (ret)
-		goto error;
-
-	ret = nouveau_gpuobj_new(dev, NULL, priv->base.channels * 4, 0x1000,
-				 NVOBJ_FLAG_ZERO_ALLOC, &priv->playlist[1]);
-	if (ret)
-		goto error;
-
-	nouveau_irq_register(dev, 8, nv04_fifo_isr);
-error:
-	if (ret)
-		priv->base.base.destroy(dev, NVOBJ_ENGINE_FIFO);
-	return ret;
-}
diff --git a/drivers/gpu/drm/nouveau/nv50_gpio.c b/drivers/gpu/drm/nouveau/nv50_gpio.c
deleted file mode 100644
index c86a5fcc5e69..000000000000
--- a/drivers/gpu/drm/nouveau/nv50_gpio.c
+++ /dev/null
@@ -1,155 +0,0 @@
-/*
- * Copyright 2010 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Ben Skeggs
- */
-
-#include <linux/dmi.h>
-#include <drm/drmP.h>
-#include "nouveau_drv.h"
-#include "nouveau_hw.h"
-#include "nouveau_gpio.h"
-
-#include "nv50_display.h"
-
-static int
-nv50_gpio_location(int line, u32 *reg, u32 *shift)
-{
-	const uint32_t nv50_gpio_reg[4] = { 0xe104, 0xe108, 0xe280, 0xe284 };
-
-	if (line >= 32)
-		return -EINVAL;
-
-	*reg = nv50_gpio_reg[line >> 3];
-	*shift = (line & 7) << 2;
-	return 0;
-}
-
-int
-nv50_gpio_drive(struct drm_device *dev, int line, int dir, int out)
-{
-	u32 reg, shift;
-
-	if (nv50_gpio_location(line, &reg, &shift))
-		return -EINVAL;
-
-	nv_mask(dev, reg, 7 << shift, (((dir ^ 1) << 1) | out) << shift);
-	return 0;
-}
-
-int
-nv50_gpio_sense(struct drm_device *dev, int line)
-{
-	u32 reg, shift;
-
-	if (nv50_gpio_location(line, &reg, &shift))
-		return -EINVAL;
-
-	return !!(nv_rd32(dev, reg) & (4 << shift));
-}
-
-void
-nv50_gpio_irq_enable(struct drm_device *dev, int line, bool on)
-{
-	u32 reg  = line < 16 ? 0xe050 : 0xe070;
-	u32 mask = 0x00010001 << (line & 0xf);
-
-	nv_wr32(dev, reg + 4, mask);
-	nv_mask(dev, reg + 0, mask, on ? mask : 0);
-}
-
-int
-nvd0_gpio_drive(struct drm_device *dev, int line, int dir, int out)
-{
-	u32 data = ((dir ^ 1) << 13) | (out << 12);
-	nv_mask(dev, 0x00d610 + (line * 4), 0x00003000, data);
-	nv_mask(dev, 0x00d604, 0x00000001, 0x00000001); /* update? */
-	return 0;
-}
-
-int
-nvd0_gpio_sense(struct drm_device *dev, int line)
-{
-	return !!(nv_rd32(dev, 0x00d610 + (line * 4)) & 0x00004000);
-}
-
-static void
-nv50_gpio_isr(struct drm_device *dev)
-{
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	u32 intr0, intr1 = 0;
-	u32 hi, lo;
-
-	intr0 = nv_rd32(dev, 0xe054) & nv_rd32(dev, 0xe050);
-	if (dev_priv->chipset >= 0x90)
-		intr1 = nv_rd32(dev, 0xe074) & nv_rd32(dev, 0xe070);
-
-	hi = (intr0 & 0x0000ffff) | (intr1 << 16);
-	lo = (intr0 >> 16) | (intr1 & 0xffff0000);
-	nouveau_gpio_isr(dev, 0, hi | lo);
-
-	nv_wr32(dev, 0xe054, intr0);
-	if (dev_priv->chipset >= 0x90)
-		nv_wr32(dev, 0xe074, intr1);
-}
-
-static struct dmi_system_id gpio_reset_ids[] = {
-	{
-		.ident = "Apple Macbook 10,1",
-		.matches = {
-			DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
-			DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro10,1"),
-		}
-	},
-	{ }
-};
-
-int
-nv50_gpio_init(struct drm_device *dev)
-{
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-
-	/* initialise gpios and routing to vbios defaults */
-	if (dmi_check_system(gpio_reset_ids))
-		nouveau_gpio_reset(dev);
-
-	/* disable, and ack any pending gpio interrupts */
-	nv_wr32(dev, 0xe050, 0x00000000);
-	nv_wr32(dev, 0xe054, 0xffffffff);
-	if (dev_priv->chipset >= 0x90) {
-		nv_wr32(dev, 0xe070, 0x00000000);
-		nv_wr32(dev, 0xe074, 0xffffffff);
-	}
-
-	nouveau_irq_register(dev, 21, nv50_gpio_isr);
-	return 0;
-}
-
-void
-nv50_gpio_fini(struct drm_device *dev)
-{
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-
-	nv_wr32(dev, 0xe050, 0x00000000);
-	if (dev_priv->chipset >= 0x90)
-		nv_wr32(dev, 0xe070, 0x00000000);
-	nouveau_irq_unregister(dev, 21);
-}
diff --git a/drivers/gpu/drm/nouveau/nv50_graph.c b/drivers/gpu/drm/nouveau/nv50_graph.c
deleted file mode 100644
index f8a9c8095297..000000000000
--- a/drivers/gpu/drm/nouveau/nv50_graph.c
+++ /dev/null
@@ -1,867 +0,0 @@
-/*
- * Copyright (C) 2007 Ben Skeggs.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining
- * a copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sublicense, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial
- * portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
- * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
- * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
- * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
- * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-
-#include <drm/drmP.h>
-#include "nouveau_drv.h"
-#include "nouveau_fifo.h"
-#include "nouveau_ramht.h"
-#include "nouveau_dma.h"
-#include "nouveau_vm.h"
-#include "nv50_evo.h"
-
-struct nv50_graph_engine {
-	struct nouveau_exec_engine base;
-	u32 ctxprog[512];
-	u32 ctxprog_size;
-	u32 grctx_size;
-};
-
-static int
-nv50_graph_init(struct drm_device *dev, int engine)
-{
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct nv50_graph_engine *pgraph = nv_engine(dev, engine);
-	u32 units = nv_rd32(dev, 0x001540);
-	int i;
-
-	NV_DEBUG(dev, "\n");
-
-	/* master reset */
-	nv_mask(dev, 0x000200, 0x00201000, 0x00000000);
-	nv_mask(dev, 0x000200, 0x00201000, 0x00201000);
-	nv_wr32(dev, 0x40008c, 0x00000004); /* HW_CTX_SWITCH_ENABLED */
-
-	/* reset/enable traps and interrupts */
-	nv_wr32(dev, 0x400804, 0xc0000000);
-	nv_wr32(dev, 0x406800, 0xc0000000);
-	nv_wr32(dev, 0x400c04, 0xc0000000);
-	nv_wr32(dev, 0x401800, 0xc0000000);
-	nv_wr32(dev, 0x405018, 0xc0000000);
-	nv_wr32(dev, 0x402000, 0xc0000000);
-	for (i = 0; i < 16; i++) {
-		if (!(units & (1 << i)))
-			continue;
-
-		if (dev_priv->chipset < 0xa0) {
-			nv_wr32(dev, 0x408900 + (i << 12), 0xc0000000);
-			nv_wr32(dev, 0x408e08 + (i << 12), 0xc0000000);
-			nv_wr32(dev, 0x408314 + (i << 12), 0xc0000000);
-		} else {
-			nv_wr32(dev, 0x408600 + (i << 11), 0xc0000000);
-			nv_wr32(dev, 0x408708 + (i << 11), 0xc0000000);
-			nv_wr32(dev, 0x40831c + (i << 11), 0xc0000000);
-		}
-	}
-
-	nv_wr32(dev, 0x400108, 0xffffffff);
-	nv_wr32(dev, 0x400138, 0xffffffff);
-	nv_wr32(dev, 0x400100, 0xffffffff);
-	nv_wr32(dev, 0x40013c, 0xffffffff);
-	nv_wr32(dev, 0x400500, 0x00010001);
-
-	/* upload context program, initialise ctxctl defaults */
-	nv_wr32(dev, 0x400324, 0x00000000);
-	for (i = 0; i < pgraph->ctxprog_size; i++)
-		nv_wr32(dev, 0x400328, pgraph->ctxprog[i]);
-	nv_wr32(dev, 0x400824, 0x00000000);
-	nv_wr32(dev, 0x400828, 0x00000000);
-	nv_wr32(dev, 0x40082c, 0x00000000);
-	nv_wr32(dev, 0x400830, 0x00000000);
-	nv_wr32(dev, 0x400724, 0x00000000);
-	nv_wr32(dev, 0x40032c, 0x00000000);
-	nv_wr32(dev, 0x400320, 4);	/* CTXCTL_CMD = NEWCTXDMA */
-
-	/* some unknown zcull magic */
-	switch (dev_priv->chipset & 0xf0) {
-	case 0x50:
-	case 0x80:
-	case 0x90:
-		nv_wr32(dev, 0x402ca8, 0x00000800);
-		break;
-	case 0xa0:
-	default:
-		nv_wr32(dev, 0x402cc0, 0x00000000);
-		if (dev_priv->chipset == 0xa0 ||
-		    dev_priv->chipset == 0xaa ||
-		    dev_priv->chipset == 0xac) {
-			nv_wr32(dev, 0x402ca8, 0x00000802);
-		} else {
-			nv_wr32(dev, 0x402cc0, 0x00000000);
-			nv_wr32(dev, 0x402ca8, 0x00000002);
-		}
-
-		break;
-	}
-
-	/* zero out zcull regions */
-	for (i = 0; i < 8; i++) {
-		nv_wr32(dev, 0x402c20 + (i * 8), 0x00000000);
-		nv_wr32(dev, 0x402c24 + (i * 8), 0x00000000);
-		nv_wr32(dev, 0x402c28 + (i * 8), 0x00000000);
-		nv_wr32(dev, 0x402c2c + (i * 8), 0x00000000);
-	}
-
-	return 0;
-}
-
-static int
-nv50_graph_fini(struct drm_device *dev, int engine, bool suspend)
-{
-	nv_wr32(dev, 0x40013c, 0x00000000);
-	return 0;
-}
-
-static int
-nv50_graph_context_new(struct nouveau_channel *chan, int engine)
-{
-	struct drm_device *dev = chan->dev;
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct nouveau_gpuobj *ramin = chan->ramin;
-	struct nouveau_gpuobj *grctx = NULL;
-	struct nv50_graph_engine *pgraph = nv_engine(dev, engine);
-	int hdr, ret;
-
-	NV_DEBUG(dev, "ch%d\n", chan->id);
-
-	ret = nouveau_gpuobj_new(dev, NULL, pgraph->grctx_size, 0,
-				 NVOBJ_FLAG_ZERO_ALLOC |
-				 NVOBJ_FLAG_ZERO_FREE, &grctx);
-	if (ret)
-		return ret;
-
-	hdr = (dev_priv->chipset == 0x50) ? 0x200 : 0x20;
-	nv_wo32(ramin, hdr + 0x00, 0x00190002);
-	nv_wo32(ramin, hdr + 0x04, grctx->vinst + grctx->size - 1);
-	nv_wo32(ramin, hdr + 0x08, grctx->vinst);
-	nv_wo32(ramin, hdr + 0x0c, 0);
-	nv_wo32(ramin, hdr + 0x10, 0);
-	nv_wo32(ramin, hdr + 0x14, 0x00010000);
-
-	nv50_grctx_fill(dev, grctx);
-	nv_wo32(grctx, 0x00000, chan->ramin->vinst >> 12);
-
-	dev_priv->engine.instmem.flush(dev);
-
-	atomic_inc(&chan->vm->engref[NVOBJ_ENGINE_GR]);
-	chan->engctx[NVOBJ_ENGINE_GR] = grctx;
-	return 0;
-}
-
-static void
-nv50_graph_context_del(struct nouveau_channel *chan, int engine)
-{
-	struct nouveau_gpuobj *grctx = chan->engctx[engine];
-	struct drm_device *dev = chan->dev;
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	int i, hdr = (dev_priv->chipset == 0x50) ? 0x200 : 0x20;
-
-	for (i = hdr; i < hdr + 24; i += 4)
-		nv_wo32(chan->ramin, i, 0);
-	dev_priv->engine.instmem.flush(dev);
-
-	atomic_dec(&chan->vm->engref[engine]);
-	nouveau_gpuobj_ref(NULL, &grctx);
-	chan->engctx[engine] = NULL;
-}
-
-static int
-nv50_graph_object_new(struct nouveau_channel *chan, int engine,
-		      u32 handle, u16 class)
-{
-	struct drm_device *dev = chan->dev;
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct nouveau_gpuobj *obj = NULL;
-	int ret;
-
-	ret = nouveau_gpuobj_new(dev, chan, 16, 16, NVOBJ_FLAG_ZERO_FREE, &obj);
-	if (ret)
-		return ret;
-	obj->engine = 1;
-	obj->class  = class;
-
-	nv_wo32(obj, 0x00, class);
-	nv_wo32(obj, 0x04, 0x00000000);
-	nv_wo32(obj, 0x08, 0x00000000);
-	nv_wo32(obj, 0x0c, 0x00000000);
-	dev_priv->engine.instmem.flush(dev);
-
-	ret = nouveau_ramht_insert(chan, handle, obj);
-	nouveau_gpuobj_ref(NULL, &obj);
-	return ret;
-}
-
-static void
-nv50_graph_tlb_flush(struct drm_device *dev, int engine)
-{
-	nv50_vm_flush_engine(dev, 0);
-}
-
-static void
-nv84_graph_tlb_flush(struct drm_device *dev, int engine)
-{
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct nouveau_timer_engine *ptimer = &dev_priv->engine.timer;
-	bool idle, timeout = false;
-	unsigned long flags;
-	u64 start;
-	u32 tmp;
-
-	spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
-	nv_mask(dev, 0x400500, 0x00000001, 0x00000000);
-
-	start = ptimer->read(dev);
-	do {
-		idle = true;
-
-		for (tmp = nv_rd32(dev, 0x400380); tmp && idle; tmp >>= 3) {
-			if ((tmp & 7) == 1)
-				idle = false;
-		}
-
-		for (tmp = nv_rd32(dev, 0x400384); tmp && idle; tmp >>= 3) {
-			if ((tmp & 7) == 1)
-				idle = false;
-		}
-
-		for (tmp = nv_rd32(dev, 0x400388); tmp && idle; tmp >>= 3) {
-			if ((tmp & 7) == 1)
-				idle = false;
-		}
-	} while (!idle && !(timeout = ptimer->read(dev) - start > 2000000000));
-
-	if (timeout) {
-		NV_ERROR(dev, "PGRAPH TLB flush idle timeout fail: "
-			      "0x%08x 0x%08x 0x%08x 0x%08x\n",
-			 nv_rd32(dev, 0x400700), nv_rd32(dev, 0x400380),
-			 nv_rd32(dev, 0x400384), nv_rd32(dev, 0x400388));
-	}
-
-	nv50_vm_flush_engine(dev, 0);
-
-	nv_mask(dev, 0x400500, 0x00000001, 0x00000001);
-	spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
-}
-
-static struct nouveau_enum nv50_mp_exec_error_names[] = {
-	{ 3, "STACK_UNDERFLOW", NULL },
-	{ 4, "QUADON_ACTIVE", NULL },
-	{ 8, "TIMEOUT", NULL },
-	{ 0x10, "INVALID_OPCODE", NULL },
-	{ 0x40, "BREAKPOINT", NULL },
-	{}
-};
-
-static struct nouveau_bitfield nv50_graph_trap_m2mf[] = {
-	{ 0x00000001, "NOTIFY" },
-	{ 0x00000002, "IN" },
-	{ 0x00000004, "OUT" },
-	{}
-};
-
-static struct nouveau_bitfield nv50_graph_trap_vfetch[] = {
-	{ 0x00000001, "FAULT" },
-	{}
-};
-
-static struct nouveau_bitfield nv50_graph_trap_strmout[] = {
-	{ 0x00000001, "FAULT" },
-	{}
-};
-
-static struct nouveau_bitfield nv50_graph_trap_ccache[] = {
-	{ 0x00000001, "FAULT" },
-	{}
-};
-
-/* There must be a *lot* of these. Will take some time to gather them up. */
-struct nouveau_enum nv50_data_error_names[] = {
-	{ 0x00000003, "INVALID_OPERATION", NULL },
-	{ 0x00000004, "INVALID_VALUE", NULL },
-	{ 0x00000005, "INVALID_ENUM", NULL },
-	{ 0x00000008, "INVALID_OBJECT", NULL },
-	{ 0x00000009, "READ_ONLY_OBJECT", NULL },
-	{ 0x0000000a, "SUPERVISOR_OBJECT", NULL },
-	{ 0x0000000b, "INVALID_ADDRESS_ALIGNMENT", NULL },
-	{ 0x0000000c, "INVALID_BITFIELD", NULL },
-	{ 0x0000000d, "BEGIN_END_ACTIVE", NULL },
-	{ 0x0000000e, "SEMANTIC_COLOR_BACK_OVER_LIMIT", NULL },
-	{ 0x0000000f, "VIEWPORT_ID_NEEDS_GP", NULL },
-	{ 0x00000010, "RT_DOUBLE_BIND", NULL },
-	{ 0x00000011, "RT_TYPES_MISMATCH", NULL },
-	{ 0x00000012, "RT_LINEAR_WITH_ZETA", NULL },
-	{ 0x00000015, "FP_TOO_FEW_REGS", NULL },
-	{ 0x00000016, "ZETA_FORMAT_CSAA_MISMATCH", NULL },
-	{ 0x00000017, "RT_LINEAR_WITH_MSAA", NULL },
-	{ 0x00000018, "FP_INTERPOLANT_START_OVER_LIMIT", NULL },
-	{ 0x00000019, "SEMANTIC_LAYER_OVER_LIMIT", NULL },
-	{ 0x0000001a, "RT_INVALID_ALIGNMENT", NULL },
-	{ 0x0000001b, "SAMPLER_OVER_LIMIT", NULL },
-	{ 0x0000001c, "TEXTURE_OVER_LIMIT", NULL },
-	{ 0x0000001e, "GP_TOO_MANY_OUTPUTS", NULL },
-	{ 0x0000001f, "RT_BPP128_WITH_MS8", NULL },
-	{ 0x00000021, "Z_OUT_OF_BOUNDS", NULL },
-	{ 0x00000023, "XY_OUT_OF_BOUNDS", NULL },
-	{ 0x00000024, "VP_ZERO_INPUTS", NULL },
-	{ 0x00000027, "CP_MORE_PARAMS_THAN_SHARED", NULL },
-	{ 0x00000028, "CP_NO_REG_SPACE_STRIPED", NULL },
-	{ 0x00000029, "CP_NO_REG_SPACE_PACKED", NULL },
-	{ 0x0000002a, "CP_NOT_ENOUGH_WARPS", NULL },
-	{ 0x0000002b, "CP_BLOCK_SIZE_MISMATCH", NULL },
-	{ 0x0000002c, "CP_NOT_ENOUGH_LOCAL_WARPS", NULL },
-	{ 0x0000002d, "CP_NOT_ENOUGH_STACK_WARPS", NULL },
-	{ 0x0000002e, "CP_NO_BLOCKDIM_LATCH", NULL },
-	{ 0x00000031, "ENG2D_FORMAT_MISMATCH", NULL },
-	{ 0x0000003f, "PRIMITIVE_ID_NEEDS_GP", NULL },
-	{ 0x00000044, "SEMANTIC_VIEWPORT_OVER_LIMIT", NULL },
-	{ 0x00000045, "SEMANTIC_COLOR_FRONT_OVER_LIMIT", NULL },
-	{ 0x00000046, "LAYER_ID_NEEDS_GP", NULL },
-	{ 0x00000047, "SEMANTIC_CLIP_OVER_LIMIT", NULL },
-	{ 0x00000048, "SEMANTIC_PTSZ_OVER_LIMIT", NULL },
-	{}
-};
-
-static struct nouveau_bitfield nv50_graph_intr[] = {
-	{ 0x00000001, "NOTIFY" },
-	{ 0x00000002, "COMPUTE_QUERY" },
-	{ 0x00000010, "ILLEGAL_MTHD" },
-	{ 0x00000020, "ILLEGAL_CLASS" },
-	{ 0x00000040, "DOUBLE_NOTIFY" },
-	{ 0x00001000, "CONTEXT_SWITCH" },
-	{ 0x00010000, "BUFFER_NOTIFY" },
-	{ 0x00100000, "DATA_ERROR" },
-	{ 0x00200000, "TRAP" },
-	{ 0x01000000, "SINGLE_STEP" },
-	{}
-};
-
-static void
-nv50_pgraph_mp_trap(struct drm_device *dev, int tpid, int display)
-{
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	uint32_t units = nv_rd32(dev, 0x1540);
-	uint32_t addr, mp10, status, pc, oplow, ophigh;
-	int i;
-	int mps = 0;
-	for (i = 0; i < 4; i++) {
-		if (!(units & 1 << (i+24)))
-			continue;
-		if (dev_priv->chipset < 0xa0)
-			addr = 0x408200 + (tpid << 12) + (i << 7);
-		else
-			addr = 0x408100 + (tpid << 11) + (i << 7);
-		mp10 = nv_rd32(dev, addr + 0x10);
-		status = nv_rd32(dev, addr + 0x14);
-		if (!status)
-			continue;
-		if (display) {
-			nv_rd32(dev, addr + 0x20);
-			pc = nv_rd32(dev, addr + 0x24);
-			oplow = nv_rd32(dev, addr + 0x70);
-			ophigh = nv_rd32(dev, addr + 0x74);
-			NV_INFO(dev, "PGRAPH_TRAP_MP_EXEC - "
-					"TP %d MP %d: ", tpid, i);
-			nouveau_enum_print(nv50_mp_exec_error_names, status);
-			printk(" at %06x warp %d, opcode %08x %08x\n",
-					pc&0xffffff, pc >> 24,
-					oplow, ophigh);
-		}
-		nv_wr32(dev, addr + 0x10, mp10);
-		nv_wr32(dev, addr + 0x14, 0);
-		mps++;
-	}
-	if (!mps && display)
-		NV_INFO(dev, "PGRAPH_TRAP_MP_EXEC - TP %d: "
-				"No MPs claiming errors?\n", tpid);
-}
-
-static void
-nv50_pgraph_tp_trap(struct drm_device *dev, int type, uint32_t ustatus_old,
-		uint32_t ustatus_new, int display, const char *name)
-{
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	int tps = 0;
-	uint32_t units = nv_rd32(dev, 0x1540);
-	int i, r;
-	uint32_t ustatus_addr, ustatus;
-	for (i = 0; i < 16; i++) {
-		if (!(units & (1 << i)))
-			continue;
-		if (dev_priv->chipset < 0xa0)
-			ustatus_addr = ustatus_old + (i << 12);
-		else
-			ustatus_addr = ustatus_new + (i << 11);
-		ustatus = nv_rd32(dev, ustatus_addr) & 0x7fffffff;
-		if (!ustatus)
-			continue;
-		tps++;
-		switch (type) {
-		case 6: /* texture error... unknown for now */
-			if (display) {
-				NV_ERROR(dev, "magic set %d:\n", i);
-				for (r = ustatus_addr + 4; r <= ustatus_addr + 0x10; r += 4)
-					NV_ERROR(dev, "\t0x%08x: 0x%08x\n", r,
-						nv_rd32(dev, r));
-			}
-			break;
-		case 7: /* MP error */
-			if (ustatus & 0x04030000) {
-				nv50_pgraph_mp_trap(dev, i, display);
-				ustatus &= ~0x04030000;
-			}
-			break;
-		case 8: /* TPDMA error */
-			{
-			uint32_t e0c = nv_rd32(dev, ustatus_addr + 4);
-			uint32_t e10 = nv_rd32(dev, ustatus_addr + 8);
-			uint32_t e14 = nv_rd32(dev, ustatus_addr + 0xc);
-			uint32_t e18 = nv_rd32(dev, ustatus_addr + 0x10);
-			uint32_t e1c = nv_rd32(dev, ustatus_addr + 0x14);
-			uint32_t e20 = nv_rd32(dev, ustatus_addr + 0x18);
-			uint32_t e24 = nv_rd32(dev, ustatus_addr + 0x1c);
-			/* 2d engine destination */
-			if (ustatus & 0x00000010) {
-				if (display) {
-					NV_INFO(dev, "PGRAPH_TRAP_TPDMA_2D - TP %d - Unknown fault at address %02x%08x\n",
-							i, e14, e10);
-					NV_INFO(dev, "PGRAPH_TRAP_TPDMA_2D - TP %d - e0c: %08x, e18: %08x, e1c: %08x, e20: %08x, e24: %08x\n",
-							i, e0c, e18, e1c, e20, e24);
-				}
-				ustatus &= ~0x00000010;
-			}
-			/* Render target */
-			if (ustatus & 0x00000040) {
-				if (display) {
-					NV_INFO(dev, "PGRAPH_TRAP_TPDMA_RT - TP %d - Unknown fault at address %02x%08x\n",
-							i, e14, e10);
-					NV_INFO(dev, "PGRAPH_TRAP_TPDMA_RT - TP %d - e0c: %08x, e18: %08x, e1c: %08x, e20: %08x, e24: %08x\n",
-							i, e0c, e18, e1c, e20, e24);
-				}
-				ustatus &= ~0x00000040;
-			}
-			/* CUDA memory: l[], g[] or stack. */
-			if (ustatus & 0x00000080) {
-				if (display) {
-					if (e18 & 0x80000000) {
-						/* g[] read fault? */
-						NV_INFO(dev, "PGRAPH_TRAP_TPDMA - TP %d - Global read fault at address %02x%08x\n",
-								i, e14, e10 | ((e18 >> 24) & 0x1f));
-						e18 &= ~0x1f000000;
-					} else if (e18 & 0xc) {
-						/* g[] write fault? */
-						NV_INFO(dev, "PGRAPH_TRAP_TPDMA - TP %d - Global write fault at address %02x%08x\n",
-								i, e14, e10 | ((e18 >> 7) & 0x1f));
-						e18 &= ~0x00000f80;
-					} else {
-						NV_INFO(dev, "PGRAPH_TRAP_TPDMA - TP %d - Unknown CUDA fault at address %02x%08x\n",
-								i, e14, e10);
-					}
-					NV_INFO(dev, "PGRAPH_TRAP_TPDMA - TP %d - e0c: %08x, e18: %08x, e1c: %08x, e20: %08x, e24: %08x\n",
-							i, e0c, e18, e1c, e20, e24);
-				}
-				ustatus &= ~0x00000080;
-			}
-			}
-			break;
-		}
-		if (ustatus) {
-			if (display)
-				NV_INFO(dev, "%s - TP%d: Unhandled ustatus 0x%08x\n", name, i, ustatus);
-		}
-		nv_wr32(dev, ustatus_addr, 0xc0000000);
-	}
-
-	if (!tps && display)
-		NV_INFO(dev, "%s - No TPs claiming errors?\n", name);
-}
-
-static int
-nv50_pgraph_trap_handler(struct drm_device *dev, u32 display, u64 inst, u32 chid)
-{
-	u32 status = nv_rd32(dev, 0x400108);
-	u32 ustatus;
-
-	if (!status && display) {
-		NV_INFO(dev, "PGRAPH - TRAP: no units reporting traps?\n");
-		return 1;
-	}
-
-	/* DISPATCH: Relays commands to other units and handles NOTIFY,
-	 * COND, QUERY. If you get a trap from it, the command is still stuck
-	 * in DISPATCH and you need to do something about it. */
-	if (status & 0x001) {
-		ustatus = nv_rd32(dev, 0x400804) & 0x7fffffff;
-		if (!ustatus && display) {
-			NV_INFO(dev, "PGRAPH_TRAP_DISPATCH - no ustatus?\n");
-		}
-
-		nv_wr32(dev, 0x400500, 0x00000000);
-
-		/* Known to be triggered by screwed up NOTIFY and COND... */
-		if (ustatus & 0x00000001) {
-			u32 addr = nv_rd32(dev, 0x400808);
-			u32 subc = (addr & 0x00070000) >> 16;
-			u32 mthd = (addr & 0x00001ffc);
-			u32 datal = nv_rd32(dev, 0x40080c);
-			u32 datah = nv_rd32(dev, 0x400810);
-			u32 class = nv_rd32(dev, 0x400814);
-			u32 r848 = nv_rd32(dev, 0x400848);
-
-			NV_INFO(dev, "PGRAPH - TRAP DISPATCH_FAULT\n");
-			if (display && (addr & 0x80000000)) {
-				NV_INFO(dev, "PGRAPH - ch %d (0x%010llx) "
-					     "subc %d class 0x%04x mthd 0x%04x "
-					     "data 0x%08x%08x "
-					     "400808 0x%08x 400848 0x%08x\n",
-					chid, inst, subc, class, mthd, datah,
-					datal, addr, r848);
-			} else
-			if (display) {
-				NV_INFO(dev, "PGRAPH - no stuck command?\n");
-			}
-
-			nv_wr32(dev, 0x400808, 0);
-			nv_wr32(dev, 0x4008e8, nv_rd32(dev, 0x4008e8) & 3);
-			nv_wr32(dev, 0x400848, 0);
-			ustatus &= ~0x00000001;
-		}
-
-		if (ustatus & 0x00000002) {
-			u32 addr = nv_rd32(dev, 0x40084c);
-			u32 subc = (addr & 0x00070000) >> 16;
-			u32 mthd = (addr & 0x00001ffc);
-			u32 data = nv_rd32(dev, 0x40085c);
-			u32 class = nv_rd32(dev, 0x400814);
-
-			NV_INFO(dev, "PGRAPH - TRAP DISPATCH_QUERY\n");
-			if (display && (addr & 0x80000000)) {
-				NV_INFO(dev, "PGRAPH - ch %d (0x%010llx) "
-					     "subc %d class 0x%04x mthd 0x%04x "
-					     "data 0x%08x 40084c 0x%08x\n",
-					chid, inst, subc, class, mthd,
-					data, addr);
-			} else
-			if (display) {
-				NV_INFO(dev, "PGRAPH - no stuck command?\n");
-			}
-
-			nv_wr32(dev, 0x40084c, 0);
-			ustatus &= ~0x00000002;
-		}
-
-		if (ustatus && display) {
-			NV_INFO(dev, "PGRAPH - TRAP_DISPATCH (unknown "
-				      "0x%08x)\n", ustatus);
-		}
-
-		nv_wr32(dev, 0x400804, 0xc0000000);
-		nv_wr32(dev, 0x400108, 0x001);
-		status &= ~0x001;
-		if (!status)
-			return 0;
-	}
-
-	/* M2MF: Memory to memory copy engine. */
-	if (status & 0x002) {
-		u32 ustatus = nv_rd32(dev, 0x406800) & 0x7fffffff;
-		if (display) {
-			NV_INFO(dev, "PGRAPH - TRAP_M2MF");
-			nouveau_bitfield_print(nv50_graph_trap_m2mf, ustatus);
-			printk("\n");
-			NV_INFO(dev, "PGRAPH - TRAP_M2MF %08x %08x %08x %08x\n",
-				nv_rd32(dev, 0x406804), nv_rd32(dev, 0x406808),
-				nv_rd32(dev, 0x40680c), nv_rd32(dev, 0x406810));
-
-		}
-
-		/* No sane way found yet -- just reset the bugger. */
-		nv_wr32(dev, 0x400040, 2);
-		nv_wr32(dev, 0x400040, 0);
-		nv_wr32(dev, 0x406800, 0xc0000000);
-		nv_wr32(dev, 0x400108, 0x002);
-		status &= ~0x002;
-	}
-
-	/* VFETCH: Fetches data from vertex buffers. */
-	if (status & 0x004) {
-		u32 ustatus = nv_rd32(dev, 0x400c04) & 0x7fffffff;
-		if (display) {
-			NV_INFO(dev, "PGRAPH - TRAP_VFETCH");
-			nouveau_bitfield_print(nv50_graph_trap_vfetch, ustatus);
-			printk("\n");
-			NV_INFO(dev, "PGRAPH - TRAP_VFETCH %08x %08x %08x %08x\n",
-				nv_rd32(dev, 0x400c00), nv_rd32(dev, 0x400c08),
-				nv_rd32(dev, 0x400c0c), nv_rd32(dev, 0x400c10));
-		}
-
-		nv_wr32(dev, 0x400c04, 0xc0000000);
-		nv_wr32(dev, 0x400108, 0x004);
-		status &= ~0x004;
-	}
-
-	/* STRMOUT: DirectX streamout / OpenGL transform feedback. */
-	if (status & 0x008) {
-		ustatus = nv_rd32(dev, 0x401800) & 0x7fffffff;
-		if (display) {
-			NV_INFO(dev, "PGRAPH - TRAP_STRMOUT");
-			nouveau_bitfield_print(nv50_graph_trap_strmout, ustatus);
-			printk("\n");
-			NV_INFO(dev, "PGRAPH - TRAP_STRMOUT %08x %08x %08x %08x\n",
-				nv_rd32(dev, 0x401804), nv_rd32(dev, 0x401808),
-				nv_rd32(dev, 0x40180c), nv_rd32(dev, 0x401810));
-
-		}
-
-		/* No sane way found yet -- just reset the bugger. */
-		nv_wr32(dev, 0x400040, 0x80);
-		nv_wr32(dev, 0x400040, 0);
-		nv_wr32(dev, 0x401800, 0xc0000000);
-		nv_wr32(dev, 0x400108, 0x008);
-		status &= ~0x008;
-	}
-
-	/* CCACHE: Handles code and c[] caches and fills them. */
-	if (status & 0x010) {
-		ustatus = nv_rd32(dev, 0x405018) & 0x7fffffff;
-		if (display) {
-			NV_INFO(dev, "PGRAPH - TRAP_CCACHE");
-			nouveau_bitfield_print(nv50_graph_trap_ccache, ustatus);
-			printk("\n");
-			NV_INFO(dev, "PGRAPH - TRAP_CCACHE %08x %08x %08x %08x"
-				     " %08x %08x %08x\n",
-				nv_rd32(dev, 0x405000), nv_rd32(dev, 0x405004),
-				nv_rd32(dev, 0x405008), nv_rd32(dev, 0x40500c),
-				nv_rd32(dev, 0x405010), nv_rd32(dev, 0x405014),
-				nv_rd32(dev, 0x40501c));
-
-		}
-
-		nv_wr32(dev, 0x405018, 0xc0000000);
-		nv_wr32(dev, 0x400108, 0x010);
-		status &= ~0x010;
-	}
-
-	/* Unknown, not seen yet... 0x402000 is the only trap status reg
-	 * remaining, so try to handle it anyway. Perhaps related to that
-	 * unknown DMA slot on tesla? */
-	if (status & 0x20) {
-		ustatus = nv_rd32(dev, 0x402000) & 0x7fffffff;
-		if (display)
-			NV_INFO(dev, "PGRAPH - TRAP_UNKC04 0x%08x\n", ustatus);
-		nv_wr32(dev, 0x402000, 0xc0000000);
-		/* no status modifiction on purpose */
-	}
-
-	/* TEXTURE: CUDA texturing units */
-	if (status & 0x040) {
-		nv50_pgraph_tp_trap(dev, 6, 0x408900, 0x408600, display,
-				    "PGRAPH - TRAP_TEXTURE");
-		nv_wr32(dev, 0x400108, 0x040);
-		status &= ~0x040;
-	}
-
-	/* MP: CUDA execution engines. */
-	if (status & 0x080) {
-		nv50_pgraph_tp_trap(dev, 7, 0x408314, 0x40831c, display,
-				    "PGRAPH - TRAP_MP");
-		nv_wr32(dev, 0x400108, 0x080);
-		status &= ~0x080;
-	}
-
-	/* TPDMA:  Handles TP-initiated uncached memory accesses:
-	 * l[], g[], stack, 2d surfaces, render targets. */
-	if (status & 0x100) {
-		nv50_pgraph_tp_trap(dev, 8, 0x408e08, 0x408708, display,
-				    "PGRAPH - TRAP_TPDMA");
-		nv_wr32(dev, 0x400108, 0x100);
-		status &= ~0x100;
-	}
-
-	if (status) {
-		if (display)
-			NV_INFO(dev, "PGRAPH - TRAP: unknown 0x%08x\n", status);
-		nv_wr32(dev, 0x400108, status);
-	}
-
-	return 1;
-}
-
-int
-nv50_graph_isr_chid(struct drm_device *dev, u64 inst)
-{
-	struct nouveau_fifo_priv *pfifo = nv_engine(dev, NVOBJ_ENGINE_FIFO);
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct nouveau_channel *chan;
-	unsigned long flags;
-	int i;
-
-	spin_lock_irqsave(&dev_priv->channels.lock, flags);
-	for (i = 0; i < pfifo->channels; i++) {
-		chan = dev_priv->channels.ptr[i];
-		if (!chan || !chan->ramin)
-			continue;
-
-		if (inst == chan->ramin->vinst)
-			break;
-	}
-	spin_unlock_irqrestore(&dev_priv->channels.lock, flags);
-	return i;
-}
-
-static void
-nv50_graph_isr(struct drm_device *dev)
-{
-	u32 stat;
-
-	while ((stat = nv_rd32(dev, 0x400100))) {
-		u64 inst = (u64)(nv_rd32(dev, 0x40032c) & 0x0fffffff) << 12;
-		u32 chid = nv50_graph_isr_chid(dev, inst);
-		u32 addr = nv_rd32(dev, NV04_PGRAPH_TRAPPED_ADDR);
-		u32 subc = (addr & 0x00070000) >> 16;
-		u32 mthd = (addr & 0x00001ffc);
-		u32 data = nv_rd32(dev, NV04_PGRAPH_TRAPPED_DATA);
-		u32 class = nv_rd32(dev, 0x400814);
-		u32 show = stat;
-
-		if (stat & 0x00000010) {
-			if (!nouveau_gpuobj_mthd_call2(dev, chid, class,
-						       mthd, data))
-				show &= ~0x00000010;
-		}
-
-		show = (show && nouveau_ratelimit()) ? show : 0;
-
-		if (show & 0x00100000) {
-			u32 ecode = nv_rd32(dev, 0x400110);
-			NV_INFO(dev, "PGRAPH - DATA_ERROR ");
-			nouveau_enum_print(nv50_data_error_names, ecode);
-			printk("\n");
-		}
-
-		if (stat & 0x00200000) {
-			if (!nv50_pgraph_trap_handler(dev, show, inst, chid))
-				show &= ~0x00200000;
-		}
-
-		nv_wr32(dev, 0x400100, stat);
-		nv_wr32(dev, 0x400500, 0x00010001);
-
-		if (show) {
-			NV_INFO(dev, "PGRAPH -");
-			nouveau_bitfield_print(nv50_graph_intr, show);
-			printk("\n");
-			NV_INFO(dev, "PGRAPH - ch %d (0x%010llx) subc %d "
-				     "class 0x%04x mthd 0x%04x data 0x%08x\n",
-				chid, inst, subc, class, mthd, data);
-			nv50_fb_vm_trap(dev, 1);
-		}
-	}
-
-	if (nv_rd32(dev, 0x400824) & (1 << 31))
-		nv_wr32(dev, 0x400824, nv_rd32(dev, 0x400824) & ~(1 << 31));
-}
-
-static void
-nv50_graph_destroy(struct drm_device *dev, int engine)
-{
-	struct nv50_graph_engine *pgraph = nv_engine(dev, engine);
-
-	NVOBJ_ENGINE_DEL(dev, GR);
-
-	nouveau_irq_unregister(dev, 12);
-	kfree(pgraph);
-}
-
-int
-nv50_graph_create(struct drm_device *dev)
-{
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct nv50_graph_engine *pgraph;
-	int ret;
-
-	pgraph = kzalloc(sizeof(*pgraph),GFP_KERNEL);
-	if (!pgraph)
-		return -ENOMEM;
-
-	ret = nv50_grctx_init(dev, pgraph->ctxprog, ARRAY_SIZE(pgraph->ctxprog),
-				  &pgraph->ctxprog_size,
-				  &pgraph->grctx_size);
-	if (ret) {
-		NV_ERROR(dev, "PGRAPH: ctxprog build failed\n");
-		kfree(pgraph);
-		return 0;
-	}
-
-	pgraph->base.destroy = nv50_graph_destroy;
-	pgraph->base.init = nv50_graph_init;
-	pgraph->base.fini = nv50_graph_fini;
-	pgraph->base.context_new = nv50_graph_context_new;
-	pgraph->base.context_del = nv50_graph_context_del;
-	pgraph->base.object_new = nv50_graph_object_new;
-	if (dev_priv->chipset == 0x50 || dev_priv->chipset == 0xac)
-		pgraph->base.tlb_flush = nv50_graph_tlb_flush;
-	else
-		pgraph->base.tlb_flush = nv84_graph_tlb_flush;
-
-	nouveau_irq_register(dev, 12, nv50_graph_isr);
-
-	NVOBJ_ENGINE_ADD(dev, GR, &pgraph->base);
-	NVOBJ_CLASS(dev, 0x0030, GR); /* null */
-	NVOBJ_CLASS(dev, 0x5039, GR); /* m2mf */
-	NVOBJ_CLASS(dev, 0x502d, GR); /* 2d */
-
-	/* tesla */
-	if (dev_priv->chipset == 0x50)
-		NVOBJ_CLASS(dev, 0x5097, GR); /* tesla (nv50) */
-	else
-	if (dev_priv->chipset < 0xa0)
-		NVOBJ_CLASS(dev, 0x8297, GR); /* tesla (nv8x/nv9x) */
-	else {
-		switch (dev_priv->chipset) {
-		case 0xa0:
-		case 0xaa:
-		case 0xac:
-			NVOBJ_CLASS(dev, 0x8397, GR);
-			break;
-		case 0xa3:
-		case 0xa5:
-		case 0xa8:
-			NVOBJ_CLASS(dev, 0x8597, GR);
-			break;
-		case 0xaf:
-			NVOBJ_CLASS(dev, 0x8697, GR);
-			break;
-		}
-	}
-
-	/* compute */
-	NVOBJ_CLASS(dev, 0x50c0, GR);
-	if (dev_priv->chipset  > 0xa0 &&
-	    dev_priv->chipset != 0xaa &&
-	    dev_priv->chipset != 0xac)
-		NVOBJ_CLASS(dev, 0x85c0, GR);
-
-	return 0;
-}
diff --git a/drivers/gpu/drm/nouveau/nv50_instmem.c b/drivers/gpu/drm/nouveau/nv50_instmem.c
deleted file mode 100644
index 05eff577f053..000000000000
--- a/drivers/gpu/drm/nouveau/nv50_instmem.c
+++ /dev/null
@@ -1,427 +0,0 @@
-/*
- * Copyright (C) 2007 Ben Skeggs.
- *
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining
- * a copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sublicense, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial
- * portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
- * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
- * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
- * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
- * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-
-#include <drm/drmP.h>
-
-#include "nouveau_drv.h"
-#include "nouveau_vm.h"
-
-#define BAR1_VM_BASE 0x0020000000ULL
-#define BAR1_VM_SIZE pci_resource_len(dev->pdev, 1)
-#define BAR3_VM_BASE 0x0000000000ULL
-#define BAR3_VM_SIZE pci_resource_len(dev->pdev, 3)
-
-struct nv50_instmem_priv {
-	uint32_t save1700[5]; /* 0x1700->0x1710 */
-
-	struct nouveau_gpuobj *bar1_dmaobj;
-	struct nouveau_gpuobj *bar3_dmaobj;
-};
-
-static void
-nv50_channel_del(struct nouveau_channel **pchan)
-{
-	struct nouveau_channel *chan;
-
-	chan = *pchan;
-	*pchan = NULL;
-	if (!chan)
-		return;
-
-	nouveau_gpuobj_ref(NULL, &chan->ramfc);
-	nouveau_vm_ref(NULL, &chan->vm, chan->vm_pd);
-	nouveau_gpuobj_ref(NULL, &chan->vm_pd);
-	if (drm_mm_initialized(&chan->ramin_heap))
-		drm_mm_takedown(&chan->ramin_heap);
-	nouveau_gpuobj_ref(NULL, &chan->ramin);
-	kfree(chan);
-}
-
-static int
-nv50_channel_new(struct drm_device *dev, u32 size, struct nouveau_vm *vm,
-		 struct nouveau_channel **pchan)
-{
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	u32 pgd = (dev_priv->chipset == 0x50) ? 0x1400 : 0x0200;
-	u32  fc = (dev_priv->chipset == 0x50) ? 0x0000 : 0x4200;
-	struct nouveau_channel *chan;
-	int ret, i;
-
-	chan = kzalloc(sizeof(*chan), GFP_KERNEL);
-	if (!chan)
-		return -ENOMEM;
-	chan->dev = dev;
-
-	ret = nouveau_gpuobj_new(dev, NULL, size, 0x1000, 0, &chan->ramin);
-	if (ret) {
-		nv50_channel_del(&chan);
-		return ret;
-	}
-
-	ret = drm_mm_init(&chan->ramin_heap, 0x6000, chan->ramin->size - 0x6000);
-	if (ret) {
-		nv50_channel_del(&chan);
-		return ret;
-	}
-
-	ret = nouveau_gpuobj_new_fake(dev, chan->ramin->pinst == ~0 ? ~0 :
-				      chan->ramin->pinst + pgd,
-				      chan->ramin->vinst + pgd,
-				      0x4000, NVOBJ_FLAG_ZERO_ALLOC,
-				      &chan->vm_pd);
-	if (ret) {
-		nv50_channel_del(&chan);
-		return ret;
-	}
-
-	for (i = 0; i < 0x4000; i += 8) {
-		nv_wo32(chan->vm_pd, i + 0, 0x00000000);
-		nv_wo32(chan->vm_pd, i + 4, 0xdeadcafe);
-	}
-
-	ret = nouveau_vm_ref(vm, &chan->vm, chan->vm_pd);
-	if (ret) {
-		nv50_channel_del(&chan);
-		return ret;
-	}
-
-	ret = nouveau_gpuobj_new_fake(dev, chan->ramin->pinst == ~0 ? ~0 :
-				      chan->ramin->pinst + fc,
-				      chan->ramin->vinst + fc, 0x100,
-				      NVOBJ_FLAG_ZERO_ALLOC, &chan->ramfc);
-	if (ret) {
-		nv50_channel_del(&chan);
-		return ret;
-	}
-
-	*pchan = chan;
-	return 0;
-}
-
-int
-nv50_instmem_init(struct drm_device *dev)
-{
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct nv50_instmem_priv *priv;
-	struct nouveau_channel *chan;
-	struct nouveau_vm *vm;
-	int ret, i;
-	u32 tmp;
-
-	priv = kzalloc(sizeof(*priv), GFP_KERNEL);
-	if (!priv)
-		return -ENOMEM;
-	dev_priv->engine.instmem.priv = priv;
-
-	/* Save state, will restore at takedown. */
-	for (i = 0x1700; i <= 0x1710; i += 4)
-		priv->save1700[(i-0x1700)/4] = nv_rd32(dev, i);
-
-	/* Global PRAMIN heap */
-	ret = drm_mm_init(&dev_priv->ramin_heap, 0, dev_priv->ramin_size);
-	if (ret) {
-		NV_ERROR(dev, "Failed to init RAMIN heap\n");
-		goto error;
-	}
-
-	/* BAR3 */
-	ret = nouveau_vm_new(dev, BAR3_VM_BASE, BAR3_VM_SIZE, BAR3_VM_BASE,
-			     &dev_priv->bar3_vm);
-	if (ret)
-		goto error;
-
-	ret = nouveau_gpuobj_new(dev, NULL, (BAR3_VM_SIZE >> 12) * 8,
-				 0x1000, NVOBJ_FLAG_DONT_MAP |
-				 NVOBJ_FLAG_ZERO_ALLOC,
-				 &dev_priv->bar3_vm->pgt[0].obj[0]);
-	if (ret)
-		goto error;
-	dev_priv->bar3_vm->pgt[0].refcount[0] = 1;
-
-	nv50_instmem_map(dev_priv->bar3_vm->pgt[0].obj[0]);
-
-	ret = nv50_channel_new(dev, 128 * 1024, dev_priv->bar3_vm, &chan);
-	if (ret)
-		goto error;
-	dev_priv->channels.ptr[0] = dev_priv->channels.ptr[127] = chan;
-
-	ret = nv50_gpuobj_dma_new(chan, 0x0000, BAR3_VM_BASE, BAR3_VM_SIZE,
-				  NV_MEM_TARGET_VM, NV_MEM_ACCESS_VM,
-				  NV_MEM_TYPE_VM, NV_MEM_COMP_VM,
-				  &priv->bar3_dmaobj);
-	if (ret)
-		goto error;
-
-	nv_wr32(dev, 0x001704, 0x00000000 | (chan->ramin->vinst >> 12));
-	nv_wr32(dev, 0x001704, 0x40000000 | (chan->ramin->vinst >> 12));
-	nv_wr32(dev, 0x00170c, 0x80000000 | (priv->bar3_dmaobj->cinst >> 4));
-
-	dev_priv->engine.instmem.flush(dev);
-	dev_priv->ramin_available = true;
-
-	tmp = nv_ro32(chan->ramin, 0);
-	nv_wo32(chan->ramin, 0, ~tmp);
-	if (nv_ro32(chan->ramin, 0) != ~tmp) {
-		NV_ERROR(dev, "PRAMIN readback failed\n");
-		ret = -EIO;
-		goto error;
-	}
-	nv_wo32(chan->ramin, 0, tmp);
-
-	/* BAR1 */
-	ret = nouveau_vm_new(dev, BAR1_VM_BASE, BAR1_VM_SIZE, BAR1_VM_BASE, &vm);
-	if (ret)
-		goto error;
-
-	ret = nouveau_vm_ref(vm, &dev_priv->bar1_vm, chan->vm_pd);
-	if (ret)
-		goto error;
-	nouveau_vm_ref(NULL, &vm, NULL);
-
-	ret = nv50_gpuobj_dma_new(chan, 0x0000, BAR1_VM_BASE, BAR1_VM_SIZE,
-				  NV_MEM_TARGET_VM, NV_MEM_ACCESS_VM,
-				  NV_MEM_TYPE_VM, NV_MEM_COMP_VM,
-				  &priv->bar1_dmaobj);
-	if (ret)
-		goto error;
-
-	nv_wr32(dev, 0x001708, 0x80000000 | (priv->bar1_dmaobj->cinst >> 4));
-	for (i = 0; i < 8; i++)
-		nv_wr32(dev, 0x1900 + (i*4), 0);
-
-	/* Create shared channel VM, space is reserved at the beginning
-	 * to catch "NULL pointer" references
-	 */
-	ret = nouveau_vm_new(dev, 0, (1ULL << 40), 0x0020000000ULL,
-			     &dev_priv->chan_vm);
-	if (ret)
-		return ret;
-
-	return 0;
-
-error:
-	nv50_instmem_takedown(dev);
-	return ret;
-}
-
-void
-nv50_instmem_takedown(struct drm_device *dev)
-{
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct nv50_instmem_priv *priv = dev_priv->engine.instmem.priv;
-	struct nouveau_channel *chan = dev_priv->channels.ptr[0];
-	int i;
-
-	NV_DEBUG(dev, "\n");
-
-	if (!priv)
-		return;
-
-	dev_priv->ramin_available = false;
-
-	nouveau_vm_ref(NULL, &dev_priv->chan_vm, NULL);
-
-	for (i = 0x1700; i <= 0x1710; i += 4)
-		nv_wr32(dev, i, priv->save1700[(i - 0x1700) / 4]);
-
-	nouveau_gpuobj_ref(NULL, &priv->bar3_dmaobj);
-	nouveau_gpuobj_ref(NULL, &priv->bar1_dmaobj);
-
-	nouveau_vm_ref(NULL, &dev_priv->bar1_vm, chan->vm_pd);
-	dev_priv->channels.ptr[127] = 0;
-	nv50_channel_del(&dev_priv->channels.ptr[0]);
-
-	nouveau_gpuobj_ref(NULL, &dev_priv->bar3_vm->pgt[0].obj[0]);
-	nouveau_vm_ref(NULL, &dev_priv->bar3_vm, NULL);
-
-	if (drm_mm_initialized(&dev_priv->ramin_heap))
-		drm_mm_takedown(&dev_priv->ramin_heap);
-
-	dev_priv->engine.instmem.priv = NULL;
-	kfree(priv);
-}
-
-int
-nv50_instmem_suspend(struct drm_device *dev)
-{
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-
-	dev_priv->ramin_available = false;
-	return 0;
-}
-
-void
-nv50_instmem_resume(struct drm_device *dev)
-{
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct nv50_instmem_priv *priv = dev_priv->engine.instmem.priv;
-	struct nouveau_channel *chan = dev_priv->channels.ptr[0];
-	int i;
-
-	/* Poke the relevant regs, and pray it works :) */
-	nv_wr32(dev, NV50_PUNK_BAR_CFG_BASE, (chan->ramin->vinst >> 12));
-	nv_wr32(dev, NV50_PUNK_UNK1710, 0);
-	nv_wr32(dev, NV50_PUNK_BAR_CFG_BASE, (chan->ramin->vinst >> 12) |
-					 NV50_PUNK_BAR_CFG_BASE_VALID);
-	nv_wr32(dev, NV50_PUNK_BAR1_CTXDMA, (priv->bar1_dmaobj->cinst >> 4) |
-					NV50_PUNK_BAR1_CTXDMA_VALID);
-	nv_wr32(dev, NV50_PUNK_BAR3_CTXDMA, (priv->bar3_dmaobj->cinst >> 4) |
-					NV50_PUNK_BAR3_CTXDMA_VALID);
-
-	for (i = 0; i < 8; i++)
-		nv_wr32(dev, 0x1900 + (i*4), 0);
-
-	dev_priv->ramin_available = true;
-}
-
-struct nv50_gpuobj_node {
-	struct nouveau_mem *vram;
-	struct nouveau_vma chan_vma;
-	u32 align;
-};
-
-int
-nv50_instmem_get(struct nouveau_gpuobj *gpuobj, struct nouveau_channel *chan,
-		 u32 size, u32 align)
-{
-	struct drm_device *dev = gpuobj->dev;
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct nouveau_vram_engine *vram = &dev_priv->engine.vram;
-	struct nv50_gpuobj_node *node = NULL;
-	int ret;
-
-	node = kzalloc(sizeof(*node), GFP_KERNEL);
-	if (!node)
-		return -ENOMEM;
-	node->align = align;
-
-	size  = (size + 4095) & ~4095;
-	align = max(align, (u32)4096);
-
-	ret = vram->get(dev, size, align, 0, 0, &node->vram);
-	if (ret) {
-		kfree(node);
-		return ret;
-	}
-
-	gpuobj->vinst = node->vram->offset;
-
-	if (gpuobj->flags & NVOBJ_FLAG_VM) {
-		u32 flags = NV_MEM_ACCESS_RW;
-		if (!(gpuobj->flags & NVOBJ_FLAG_VM_USER))
-			flags |= NV_MEM_ACCESS_SYS;
-
-		ret = nouveau_vm_get(chan->vm, size, 12, flags,
-				     &node->chan_vma);
-		if (ret) {
-			vram->put(dev, &node->vram);
-			kfree(node);
-			return ret;
-		}
-
-		nouveau_vm_map(&node->chan_vma, node->vram);
-		gpuobj->linst = node->chan_vma.offset;
-	}
-
-	gpuobj->size = size;
-	gpuobj->node = node;
-	return 0;
-}
-
-void
-nv50_instmem_put(struct nouveau_gpuobj *gpuobj)
-{
-	struct drm_device *dev = gpuobj->dev;
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct nouveau_vram_engine *vram = &dev_priv->engine.vram;
-	struct nv50_gpuobj_node *node;
-
-	node = gpuobj->node;
-	gpuobj->node = NULL;
-
-	if (node->chan_vma.node) {
-		nouveau_vm_unmap(&node->chan_vma);
-		nouveau_vm_put(&node->chan_vma);
-	}
-	vram->put(dev, &node->vram);
-	kfree(node);
-}
-
-int
-nv50_instmem_map(struct nouveau_gpuobj *gpuobj)
-{
-	struct drm_nouveau_private *dev_priv = gpuobj->dev->dev_private;
-	struct nv50_gpuobj_node *node = gpuobj->node;
-	int ret;
-
-	ret = nouveau_vm_get(dev_priv->bar3_vm, gpuobj->size, 12,
-			     NV_MEM_ACCESS_RW, &node->vram->bar_vma);
-	if (ret)
-		return ret;
-
-	nouveau_vm_map(&node->vram->bar_vma, node->vram);
-	gpuobj->pinst = node->vram->bar_vma.offset;
-	return 0;
-}
-
-void
-nv50_instmem_unmap(struct nouveau_gpuobj *gpuobj)
-{
-	struct nv50_gpuobj_node *node = gpuobj->node;
-
-	if (node->vram->bar_vma.node) {
-		nouveau_vm_unmap(&node->vram->bar_vma);
-		nouveau_vm_put(&node->vram->bar_vma);
-	}
-}
-
-void
-nv50_instmem_flush(struct drm_device *dev)
-{
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	unsigned long flags;
-
-	spin_lock_irqsave(&dev_priv->vm_lock, flags);
-	nv_wr32(dev, 0x00330c, 0x00000001);
-	if (!nv_wait(dev, 0x00330c, 0x00000002, 0x00000000))
-		NV_ERROR(dev, "PRAMIN flush timeout\n");
-	spin_unlock_irqrestore(&dev_priv->vm_lock, flags);
-}
-
-void
-nv84_instmem_flush(struct drm_device *dev)
-{
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	unsigned long flags;
-
-	spin_lock_irqsave(&dev_priv->vm_lock, flags);
-	nv_wr32(dev, 0x070000, 0x00000001);
-	if (!nv_wait(dev, 0x070000, 0x00000002, 0x00000000))
-		NV_ERROR(dev, "PRAMIN flush timeout\n");
-	spin_unlock_irqrestore(&dev_priv->vm_lock, flags);
-}
-
diff --git a/drivers/gpu/drm/nouveau/nv50_mc.c b/drivers/gpu/drm/nouveau/nv50_mc.c
deleted file mode 100644
index a739c2afae90..000000000000
--- a/drivers/gpu/drm/nouveau/nv50_mc.c
+++ /dev/null
@@ -1,39 +0,0 @@
-/*
- * Copyright (C) 2007 Ben Skeggs.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining
- * a copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sublicense, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial
- * portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
- * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
- * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
- * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
- * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-
-#include <drm/drmP.h>
-#include "nouveau_drv.h"
-
-int
-nv50_mc_init(struct drm_device *dev)
-{
-	nv_wr32(dev, NV03_PMC_ENABLE, 0xFFFFFFFF);
-	return 0;
-}
-
-void nv50_mc_takedown(struct drm_device *dev)
-{
-}
diff --git a/drivers/gpu/drm/nouveau/nv50_mpeg.c b/drivers/gpu/drm/nouveau/nv50_mpeg.c
deleted file mode 100644
index e11bb540727b..000000000000
--- a/drivers/gpu/drm/nouveau/nv50_mpeg.c
+++ /dev/null
@@ -1,241 +0,0 @@
-/*
- * Copyright 2011 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Ben Skeggs
- */
-
-#include <drm/drmP.h>
-#include "nouveau_drv.h"
-#include "nouveau_ramht.h"
-
-struct nv50_mpeg_engine {
-	struct nouveau_exec_engine base;
-};
-
-static inline u32
-CTX_PTR(struct drm_device *dev, u32 offset)
-{
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-
-	if (dev_priv->chipset == 0x50)
-		offset += 0x0260;
-	else
-		offset += 0x0060;
-
-	return offset;
-}
-
-static int
-nv50_mpeg_context_new(struct nouveau_channel *chan, int engine)
-{
-	struct drm_device *dev = chan->dev;
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct nouveau_gpuobj *ramin = chan->ramin;
-	struct nouveau_gpuobj *ctx = NULL;
-	int ret;
-
-	NV_DEBUG(dev, "ch%d\n", chan->id);
-
-	ret = nouveau_gpuobj_new(dev, chan, 128 * 4, 0, NVOBJ_FLAG_ZERO_ALLOC |
-				 NVOBJ_FLAG_ZERO_FREE, &ctx);
-	if (ret)
-		return ret;
-
-	nv_wo32(ramin, CTX_PTR(dev, 0x00), 0x80190002);
-	nv_wo32(ramin, CTX_PTR(dev, 0x04), ctx->vinst + ctx->size - 1);
-	nv_wo32(ramin, CTX_PTR(dev, 0x08), ctx->vinst);
-	nv_wo32(ramin, CTX_PTR(dev, 0x0c), 0);
-	nv_wo32(ramin, CTX_PTR(dev, 0x10), 0);
-	nv_wo32(ramin, CTX_PTR(dev, 0x14), 0x00010000);
-
-	nv_wo32(ctx, 0x70, 0x00801ec1);
-	nv_wo32(ctx, 0x7c, 0x0000037c);
-	dev_priv->engine.instmem.flush(dev);
-
-	chan->engctx[engine] = ctx;
-	return 0;
-}
-
-static void
-nv50_mpeg_context_del(struct nouveau_channel *chan, int engine)
-{
-	struct nouveau_gpuobj *ctx = chan->engctx[engine];
-	struct drm_device *dev = chan->dev;
-	int i;
-
-	for (i = 0x00; i <= 0x14; i += 4)
-		nv_wo32(chan->ramin, CTX_PTR(dev, i), 0x00000000);
-
-	nouveau_gpuobj_ref(NULL, &ctx);
-	chan->engctx[engine] = NULL;
-}
-
-static int
-nv50_mpeg_object_new(struct nouveau_channel *chan, int engine,
-		     u32 handle, u16 class)
-{
-	struct drm_device *dev = chan->dev;
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct nouveau_gpuobj *obj = NULL;
-	int ret;
-
-	ret = nouveau_gpuobj_new(dev, chan, 16, 16, NVOBJ_FLAG_ZERO_FREE, &obj);
-	if (ret)
-		return ret;
-	obj->engine = 2;
-	obj->class  = class;
-
-	nv_wo32(obj, 0x00, class);
-	nv_wo32(obj, 0x04, 0x00000000);
-	nv_wo32(obj, 0x08, 0x00000000);
-	nv_wo32(obj, 0x0c, 0x00000000);
-	dev_priv->engine.instmem.flush(dev);
-
-	ret = nouveau_ramht_insert(chan, handle, obj);
-	nouveau_gpuobj_ref(NULL, &obj);
-	return ret;
-}
-
-static void
-nv50_mpeg_tlb_flush(struct drm_device *dev, int engine)
-{
-	nv50_vm_flush_engine(dev, 0x08);
-}
-
-static int
-nv50_mpeg_init(struct drm_device *dev, int engine)
-{
-	nv_wr32(dev, 0x00b32c, 0x00000000);
-	nv_wr32(dev, 0x00b314, 0x00000100);
-	nv_wr32(dev, 0x00b0e0, 0x0000001a);
-
-	nv_wr32(dev, 0x00b220, 0x00000044);
-	nv_wr32(dev, 0x00b300, 0x00801ec1);
-	nv_wr32(dev, 0x00b390, 0x00000000);
-	nv_wr32(dev, 0x00b394, 0x00000000);
-	nv_wr32(dev, 0x00b398, 0x00000000);
-	nv_mask(dev, 0x00b32c, 0x00000001, 0x00000001);
-
-	nv_wr32(dev, 0x00b100, 0xffffffff);
-	nv_wr32(dev, 0x00b140, 0xffffffff);
-
-	if (!nv_wait(dev, 0x00b200, 0x00000001, 0x00000000)) {
-		NV_ERROR(dev, "PMPEG init: 0x%08x\n", nv_rd32(dev, 0x00b200));
-		return -EBUSY;
-	}
-
-	return 0;
-}
-
-static int
-nv50_mpeg_fini(struct drm_device *dev, int engine, bool suspend)
-{
-	nv_mask(dev, 0x00b32c, 0x00000001, 0x00000000);
-	nv_wr32(dev, 0x00b140, 0x00000000);
-	return 0;
-}
-
-static void
-nv50_mpeg_isr(struct drm_device *dev)
-{
-	u32 stat = nv_rd32(dev, 0x00b100);
-	u32 type = nv_rd32(dev, 0x00b230);
-	u32 mthd = nv_rd32(dev, 0x00b234);
-	u32 data = nv_rd32(dev, 0x00b238);
-	u32 show = stat;
-
-	if (stat & 0x01000000) {
-		/* happens on initial binding of the object */
-		if (type == 0x00000020 && mthd == 0x0000) {
-			nv_wr32(dev, 0x00b308, 0x00000100);
-			show &= ~0x01000000;
-		}
-	}
-
-	if (show && nouveau_ratelimit()) {
-		NV_INFO(dev, "PMPEG - 0x%08x 0x%08x 0x%08x 0x%08x\n",
-			stat, type, mthd, data);
-	}
-
-	nv_wr32(dev, 0x00b100, stat);
-	nv_wr32(dev, 0x00b230, 0x00000001);
-	nv50_fb_vm_trap(dev, 1);
-}
-
-static void
-nv50_vpe_isr(struct drm_device *dev)
-{
-	if (nv_rd32(dev, 0x00b100))
-		nv50_mpeg_isr(dev);
-
-	if (nv_rd32(dev, 0x00b800)) {
-		u32 stat = nv_rd32(dev, 0x00b800);
-		NV_INFO(dev, "PMSRCH: 0x%08x\n", stat);
-		nv_wr32(dev, 0xb800, stat);
-	}
-}
-
-static void
-nv50_mpeg_destroy(struct drm_device *dev, int engine)
-{
-	struct nv50_mpeg_engine *pmpeg = nv_engine(dev, engine);
-
-	nouveau_irq_unregister(dev, 0);
-
-	NVOBJ_ENGINE_DEL(dev, MPEG);
-	kfree(pmpeg);
-}
-
-int
-nv50_mpeg_create(struct drm_device *dev)
-{
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct nv50_mpeg_engine *pmpeg;
-
-	pmpeg = kzalloc(sizeof(*pmpeg), GFP_KERNEL);
-	if (!pmpeg)
-		return -ENOMEM;
-
-	pmpeg->base.destroy = nv50_mpeg_destroy;
-	pmpeg->base.init = nv50_mpeg_init;
-	pmpeg->base.fini = nv50_mpeg_fini;
-	pmpeg->base.context_new = nv50_mpeg_context_new;
-	pmpeg->base.context_del = nv50_mpeg_context_del;
-	pmpeg->base.object_new = nv50_mpeg_object_new;
-	pmpeg->base.tlb_flush = nv50_mpeg_tlb_flush;
-
-	if (dev_priv->chipset == 0x50) {
-		nouveau_irq_register(dev, 0, nv50_vpe_isr);
-		NVOBJ_ENGINE_ADD(dev, MPEG, &pmpeg->base);
-		NVOBJ_CLASS(dev, 0x3174, MPEG);
-#if 0
-		NVOBJ_ENGINE_ADD(dev, ME, &pme->base);
-		NVOBJ_CLASS(dev, 0x4075, ME);
-#endif
-	} else {
-		nouveau_irq_register(dev, 0, nv50_mpeg_isr);
-		NVOBJ_ENGINE_ADD(dev, MPEG, &pmpeg->base);
-		NVOBJ_CLASS(dev, 0x8274, MPEG);
-	}
-
-	return 0;
-
-}
diff --git a/drivers/gpu/drm/nouveau/nv50_pm.c b/drivers/gpu/drm/nouveau/nv50_pm.c
index 07593fd73af3..c4a65039b1ca 100644
--- a/drivers/gpu/drm/nouveau/nv50_pm.c
+++ b/drivers/gpu/drm/nouveau/nv50_pm.c
@@ -23,13 +23,19 @@
  */
 
 #include <drm/drmP.h>
-#include "nouveau_drv.h"
+#include "nouveau_drm.h"
 #include "nouveau_bios.h"
 #include "nouveau_hw.h"
 #include "nouveau_pm.h"
 #include "nouveau_hwsq.h"
+
 #include "nv50_display.h"
 
+#include <subdev/bios/pll.h>
+#include <subdev/clock.h>
+#include <subdev/timer.h>
+#include <subdev/fb.h>
+
 enum clk_src {
 	clk_src_crystal,
 	clk_src_href,
@@ -49,19 +55,20 @@ static u32 read_clk(struct drm_device *, enum clk_src);
 static u32
 read_div(struct drm_device *dev)
 {
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_device *device = nouveau_dev(dev);
+	struct nouveau_drm *drm = nouveau_drm(dev);
 
-	switch (dev_priv->chipset) {
+	switch (nv_device(drm->device)->chipset) {
 	case 0x50: /* it exists, but only has bit 31, not the dividers.. */
 	case 0x84:
 	case 0x86:
 	case 0x98:
 	case 0xa0:
-		return nv_rd32(dev, 0x004700);
+		return nv_rd32(device, 0x004700);
 	case 0x92:
 	case 0x94:
 	case 0x96:
-		return nv_rd32(dev, 0x004800);
+		return nv_rd32(device, 0x004800);
 	default:
 		return 0x00000000;
 	}
@@ -70,12 +77,13 @@ read_div(struct drm_device *dev)
 static u32
 read_pll_src(struct drm_device *dev, u32 base)
 {
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_device *device = nouveau_dev(dev);
+	struct nouveau_drm *drm = nouveau_drm(dev);
 	u32 coef, ref = read_clk(dev, clk_src_crystal);
-	u32 rsel = nv_rd32(dev, 0x00e18c);
+	u32 rsel = nv_rd32(device, 0x00e18c);
 	int P, N, M, id;
 
-	switch (dev_priv->chipset) {
+	switch (nv_device(drm->device)->chipset) {
 	case 0x50:
 	case 0xa0:
 		switch (base) {
@@ -84,11 +92,11 @@ read_pll_src(struct drm_device *dev, u32 base)
 		case 0x4008: id = !!(rsel & 0x00000008); break;
 		case 0x4030: id = 0; break;
 		default:
-			NV_ERROR(dev, "ref: bad pll 0x%06x\n", base);
+			NV_ERROR(drm, "ref: bad pll 0x%06x\n", base);
 			return 0;
 		}
 
-		coef = nv_rd32(dev, 0x00e81c + (id * 0x0c));
+		coef = nv_rd32(device, 0x00e81c + (id * 0x0c));
 		ref *=  (coef & 0x01000000) ? 2 : 4;
 		P    =  (coef & 0x00070000) >> 16;
 		N    = ((coef & 0x0000ff00) >> 8) + 1;
@@ -97,7 +105,7 @@ read_pll_src(struct drm_device *dev, u32 base)
 	case 0x84:
 	case 0x86:
 	case 0x92:
-		coef = nv_rd32(dev, 0x00e81c);
+		coef = nv_rd32(device, 0x00e81c);
 		P    = (coef & 0x00070000) >> 16;
 		N    = (coef & 0x0000ff00) >> 8;
 		M    = (coef & 0x000000ff) >> 0;
@@ -105,14 +113,14 @@ read_pll_src(struct drm_device *dev, u32 base)
 	case 0x94:
 	case 0x96:
 	case 0x98:
-		rsel = nv_rd32(dev, 0x00c050);
+		rsel = nv_rd32(device, 0x00c050);
 		switch (base) {
 		case 0x4020: rsel = (rsel & 0x00000003) >> 0; break;
 		case 0x4008: rsel = (rsel & 0x0000000c) >> 2; break;
 		case 0x4028: rsel = (rsel & 0x00001800) >> 11; break;
 		case 0x4030: rsel = 3; break;
 		default:
-			NV_ERROR(dev, "ref: bad pll 0x%06x\n", base);
+			NV_ERROR(drm, "ref: bad pll 0x%06x\n", base);
 			return 0;
 		}
 
@@ -123,8 +131,8 @@ read_pll_src(struct drm_device *dev, u32 base)
 		case 3: id = 0; break;
 		}
 
-		coef =  nv_rd32(dev, 0x00e81c + (id * 0x28));
-		P    = (nv_rd32(dev, 0x00e824 + (id * 0x28)) >> 16) & 7;
+		coef =  nv_rd32(device, 0x00e81c + (id * 0x28));
+		P    = (nv_rd32(device, 0x00e824 + (id * 0x28)) >> 16) & 7;
 		P   += (coef & 0x00070000) >> 16;
 		N    = (coef & 0x0000ff00) >> 8;
 		M    = (coef & 0x000000ff) >> 0;
@@ -141,7 +149,9 @@ read_pll_src(struct drm_device *dev, u32 base)
 static u32
 read_pll_ref(struct drm_device *dev, u32 base)
 {
-	u32 src, mast = nv_rd32(dev, 0x00c040);
+	struct nouveau_device *device = nouveau_dev(dev);
+	struct nouveau_drm *drm = nouveau_drm(dev);
+	u32 src, mast = nv_rd32(device, 0x00c040);
 
 	switch (base) {
 	case 0x004028:
@@ -159,7 +169,7 @@ read_pll_ref(struct drm_device *dev, u32 base)
 	case 0x00e810:
 		return read_clk(dev, clk_src_crystal);
 	default:
-		NV_ERROR(dev, "bad pll 0x%06x\n", base);
+		NV_ERROR(drm, "bad pll 0x%06x\n", base);
 		return 0;
 	}
 
@@ -171,17 +181,18 @@ read_pll_ref(struct drm_device *dev, u32 base)
 static u32
 read_pll(struct drm_device *dev, u32 base)
 {
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	u32 mast = nv_rd32(dev, 0x00c040);
-	u32 ctrl = nv_rd32(dev, base + 0);
-	u32 coef = nv_rd32(dev, base + 4);
+	struct nouveau_device *device = nouveau_dev(dev);
+	struct nouveau_drm *drm = nouveau_drm(dev);
+	u32 mast = nv_rd32(device, 0x00c040);
+	u32 ctrl = nv_rd32(device, base + 0);
+	u32 coef = nv_rd32(device, base + 4);
 	u32 ref = read_pll_ref(dev, base);
 	u32 clk = 0;
 	int N1, N2, M1, M2;
 
 	if (base == 0x004028 && (mast & 0x00100000)) {
 		/* wtf, appears to only disable post-divider on nva0 */
-		if (dev_priv->chipset != 0xa0)
+		if (nv_device(drm->device)->chipset != 0xa0)
 			return read_clk(dev, clk_src_dom6);
 	}
 
@@ -205,13 +216,14 @@ read_pll(struct drm_device *dev, u32 base)
 static u32
 read_clk(struct drm_device *dev, enum clk_src src)
 {
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	u32 mast = nv_rd32(dev, 0x00c040);
+	struct nouveau_device *device = nouveau_dev(dev);
+	struct nouveau_drm *drm = nouveau_drm(dev);
+	u32 mast = nv_rd32(device, 0x00c040);
 	u32 P = 0;
 
 	switch (src) {
 	case clk_src_crystal:
-		return dev_priv->crystal;
+		return device->crystal;
 	case clk_src_href:
 		return 100000; /* PCIE reference clock */
 	case clk_src_hclk:
@@ -230,7 +242,7 @@ read_clk(struct drm_device *dev, enum clk_src src)
 		break;
 	case clk_src_nvclk:
 		if (!(mast & 0x00100000))
-			P = (nv_rd32(dev, 0x004028) & 0x00070000) >> 16;
+			P = (nv_rd32(device, 0x004028) & 0x00070000) >> 16;
 		switch (mast & 0x00000003) {
 		case 0x00000000: return read_clk(dev, clk_src_crystal) >> P;
 		case 0x00000001: return read_clk(dev, clk_src_dom6);
@@ -239,7 +251,7 @@ read_clk(struct drm_device *dev, enum clk_src src)
 		}
 		break;
 	case clk_src_sclk:
-		P = (nv_rd32(dev, 0x004020) & 0x00070000) >> 16;
+		P = (nv_rd32(device, 0x004020) & 0x00070000) >> 16;
 		switch (mast & 0x00000030) {
 		case 0x00000000:
 			if (mast & 0x00000080)
@@ -251,8 +263,8 @@ read_clk(struct drm_device *dev, enum clk_src src)
 		}
 		break;
 	case clk_src_mclk:
-		P = (nv_rd32(dev, 0x004008) & 0x00070000) >> 16;
-		if (nv_rd32(dev, 0x004008) & 0x00000200) {
+		P = (nv_rd32(device, 0x004008) & 0x00070000) >> 16;
+		if (nv_rd32(device, 0x004008) & 0x00000200) {
 			switch (mast & 0x0000c000) {
 			case 0x00000000:
 				return read_clk(dev, clk_src_crystal) >> P;
@@ -266,7 +278,7 @@ read_clk(struct drm_device *dev, enum clk_src src)
 		break;
 	case clk_src_vdec:
 		P = (read_div(dev) & 0x00000700) >> 8;
-		switch (dev_priv->chipset) {
+		switch (nv_device(drm->device)->chipset) {
 		case 0x84:
 		case 0x86:
 		case 0x92:
@@ -275,7 +287,7 @@ read_clk(struct drm_device *dev, enum clk_src src)
 		case 0xa0:
 			switch (mast & 0x00000c00) {
 			case 0x00000000:
-				if (dev_priv->chipset == 0xa0) /* wtf?? */
+				if (nv_device(drm->device)->chipset == 0xa0) /* wtf?? */
 					return read_clk(dev, clk_src_nvclk) >> P;
 				return read_clk(dev, clk_src_crystal) >> P;
 			case 0x00000400:
@@ -303,7 +315,7 @@ read_clk(struct drm_device *dev, enum clk_src src)
 		}
 		break;
 	case clk_src_dom6:
-		switch (dev_priv->chipset) {
+		switch (nv_device(drm->device)->chipset) {
 		case 0x50:
 		case 0xa0:
 			return read_pll(dev, 0x00e810) >> 2;
@@ -329,22 +341,22 @@ read_clk(struct drm_device *dev, enum clk_src src)
 		break;
 	}
 
-	NV_DEBUG(dev, "unknown clock source %d 0x%08x\n", src, mast);
+	NV_DEBUG(drm, "unknown clock source %d 0x%08x\n", src, mast);
 	return 0;
 }
 
 int
 nv50_pm_clocks_get(struct drm_device *dev, struct nouveau_pm_level *perflvl)
 {
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	if (dev_priv->chipset == 0xaa ||
-	    dev_priv->chipset == 0xac)
+	struct nouveau_drm *drm = nouveau_drm(dev);
+	if (nv_device(drm->device)->chipset == 0xaa ||
+	    nv_device(drm->device)->chipset == 0xac)
 		return 0;
 
 	perflvl->core   = read_clk(dev, clk_src_nvclk);
 	perflvl->shader = read_clk(dev, clk_src_sclk);
 	perflvl->memory = read_clk(dev, clk_src_mclk);
-	if (dev_priv->chipset != 0x50) {
+	if (nv_device(drm->device)->chipset != 0x50) {
 		perflvl->vdec = read_clk(dev, clk_src_vdec);
 		perflvl->dom6 = read_clk(dev, clk_src_dom6);
 	}
@@ -363,22 +375,25 @@ struct nv50_pm_state {
 };
 
 static u32
-calc_pll(struct drm_device *dev, u32 reg, struct pll_lims *pll,
+calc_pll(struct drm_device *dev, u32 reg, struct nvbios_pll *pll,
 	 u32 clk, int *N1, int *M1, int *log2P)
 {
+	struct nouveau_device *device = nouveau_dev(dev);
+	struct nouveau_bios *bios = nouveau_bios(device);
+	struct nouveau_clock *pclk = nouveau_clock(device);
 	struct nouveau_pll_vals coef;
 	int ret;
 
-	ret = get_pll_limits(dev, reg, pll);
+	ret = nvbios_pll_parse(bios, reg, pll);
 	if (ret)
 		return 0;
 
-	pll->vco2.maxfreq = 0;
+	pll->vco2.max_freq = 0;
 	pll->refclk = read_pll_ref(dev, reg);
 	if (!pll->refclk)
 		return 0;
 
-	ret = nouveau_calc_pll_mnp(dev, pll, clk, &coef);
+	ret = pclk->pll_calc(pclk, pll, clk, &coef);
 	if (ret == 0)
 		return 0;
 
@@ -461,27 +476,29 @@ mclk_wait(struct nouveau_mem_exec_func *exec, u32 nsec)
 static u32
 mclk_mrg(struct nouveau_mem_exec_func *exec, int mr)
 {
+	struct nouveau_device *device = nouveau_dev(exec->dev);
 	if (mr <= 1)
-		return nv_rd32(exec->dev, 0x1002c0 + ((mr - 0) * 4));
+		return nv_rd32(device, 0x1002c0 + ((mr - 0) * 4));
 	if (mr <= 3)
-		return nv_rd32(exec->dev, 0x1002e0 + ((mr - 2) * 4));
+		return nv_rd32(device, 0x1002e0 + ((mr - 2) * 4));
 	return 0;
 }
 
 static void
 mclk_mrs(struct nouveau_mem_exec_func *exec, int mr, u32 data)
 {
-	struct drm_nouveau_private *dev_priv = exec->dev->dev_private;
+	struct nouveau_device *device = nouveau_dev(exec->dev);
+	struct nouveau_fb *pfb = nouveau_fb(device);
 	struct nv50_pm_state *info = exec->priv;
 	struct hwsq_ucode *hwsq = &info->mclk_hwsq;
 
 	if (mr <= 1) {
-		if (dev_priv->vram_rank_B)
+		if (pfb->ram.ranks > 1)
 			hwsq_wr32(hwsq, 0x1002c8 + ((mr - 0) * 4), data);
 		hwsq_wr32(hwsq, 0x1002c0 + ((mr - 0) * 4), data);
 	} else
 	if (mr <= 3) {
-		if (dev_priv->vram_rank_B)
+		if (pfb->ram.ranks > 1)
 			hwsq_wr32(hwsq, 0x1002e8 + ((mr - 2) * 4), data);
 		hwsq_wr32(hwsq, 0x1002e0 + ((mr - 2) * 4), data);
 	}
@@ -490,11 +507,12 @@ mclk_mrs(struct nouveau_mem_exec_func *exec, int mr, u32 data)
 static void
 mclk_clock_set(struct nouveau_mem_exec_func *exec)
 {
+	struct nouveau_device *device = nouveau_dev(exec->dev);
 	struct nv50_pm_state *info = exec->priv;
 	struct hwsq_ucode *hwsq = &info->mclk_hwsq;
-	u32 ctrl = nv_rd32(exec->dev, 0x004008);
+	u32 ctrl = nv_rd32(device, 0x004008);
 
-	info->mmast = nv_rd32(exec->dev, 0x00c040);
+	info->mmast = nv_rd32(device, 0x00c040);
 	info->mmast &= ~0xc0000000; /* get MCLK_2 from HREF */
 	info->mmast |=  0x0000c000; /* use MCLK_2 as MPLL_BYPASS clock */
 
@@ -508,7 +526,7 @@ mclk_clock_set(struct nouveau_mem_exec_func *exec)
 static void
 mclk_timing_set(struct nouveau_mem_exec_func *exec)
 {
-	struct drm_device *dev = exec->dev;
+	struct nouveau_device *device = nouveau_dev(exec->dev);
 	struct nv50_pm_state *info = exec->priv;
 	struct nouveau_pm_level *perflvl = info->perflvl;
 	struct hwsq_ucode *hwsq = &info->mclk_hwsq;
@@ -516,7 +534,7 @@ mclk_timing_set(struct nouveau_mem_exec_func *exec)
 
 	for (i = 0; i < 9; i++) {
 		u32 reg = 0x100220 + (i * 4);
-		u32 val = nv_rd32(dev, reg);
+		u32 val = nv_rd32(device, reg);
 		if (val != perflvl->timing.reg[i])
 			hwsq_wr32(hwsq, reg, perflvl->timing.reg[i]);
 	}
@@ -526,7 +544,8 @@ static int
 calc_mclk(struct drm_device *dev, struct nouveau_pm_level *perflvl,
 	  struct nv50_pm_state *info)
 {
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_drm *drm = nouveau_drm(dev);
+	struct nouveau_device *device = nouveau_dev(dev);
 	u32 crtc_mask = nv50_display_active_crtcs(dev);
 	struct nouveau_mem_exec_func exec = {
 		.dev = dev,
@@ -542,22 +561,22 @@ calc_mclk(struct drm_device *dev, struct nouveau_pm_level *perflvl,
 		.priv = info
 	};
 	struct hwsq_ucode *hwsq = &info->mclk_hwsq;
-	struct pll_lims pll;
+	struct nvbios_pll pll;
 	int N, M, P;
 	int ret;
 
 	/* use pcie refclock if possible, otherwise use mpll */
-	info->mctrl  = nv_rd32(dev, 0x004008);
+	info->mctrl  = nv_rd32(device, 0x004008);
 	info->mctrl &= ~0x81ff0200;
 	if (clk_same(perflvl->memory, read_clk(dev, clk_src_href))) {
-		info->mctrl |= 0x00000200 | (pll.log2p_bias << 19);
+		info->mctrl |= 0x00000200 | (pll.bias_p << 19);
 	} else {
 		ret = calc_pll(dev, 0x4008, &pll, perflvl->memory, &N, &M, &P);
 		if (ret == 0)
 			return -EINVAL;
 
 		info->mctrl |= 0x80000000 | (P << 22) | (P << 16);
-		info->mctrl |= pll.log2p_bias << 19;
+		info->mctrl |= pll.bias_p << 19;
 		info->mcoef  = (N << 8) | M;
 	}
 
@@ -567,7 +586,7 @@ calc_mclk(struct drm_device *dev, struct nouveau_pm_level *perflvl,
 		hwsq_op5f(hwsq, crtc_mask, 0x00); /* wait for scanout */
 		hwsq_op5f(hwsq, crtc_mask, 0x01); /* wait for vblank */
 	}
-	if (dev_priv->chipset >= 0x92)
+	if (nv_device(drm->device)->chipset >= 0x92)
 		hwsq_wr32(hwsq, 0x611200, 0x00003300); /* disable scanout */
 	hwsq_setf(hwsq, 0x10, 0); /* disable bus access */
 	hwsq_op5f(hwsq, 0x00, 0x01); /* no idea :s */
@@ -578,7 +597,7 @@ calc_mclk(struct drm_device *dev, struct nouveau_pm_level *perflvl,
 
 	hwsq_setf(hwsq, 0x10, 1); /* enable bus access */
 	hwsq_op5f(hwsq, 0x00, 0x00); /* no idea, reverse of 0x00, 0x01? */
-	if (dev_priv->chipset >= 0x92)
+	if (nv_device(drm->device)->chipset >= 0x92)
 		hwsq_wr32(hwsq, 0x611200, 0x00003330); /* enable scanout */
 	hwsq_fini(hwsq);
 	return 0;
@@ -587,16 +606,17 @@ calc_mclk(struct drm_device *dev, struct nouveau_pm_level *perflvl,
 void *
 nv50_pm_clocks_pre(struct drm_device *dev, struct nouveau_pm_level *perflvl)
 {
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_device *device = nouveau_dev(dev);
+	struct nouveau_drm *drm = nouveau_drm(dev);
 	struct nv50_pm_state *info;
 	struct hwsq_ucode *hwsq;
-	struct pll_lims pll;
+	struct nvbios_pll pll;
 	u32 out, mast, divs, ctrl;
 	int clk, ret = -EINVAL;
 	int N, M, P1, P2;
 
-	if (dev_priv->chipset == 0xaa ||
-	    dev_priv->chipset == 0xac)
+	if (nv_device(drm->device)->chipset == 0xaa ||
+	    nv_device(drm->device)->chipset == 0xac)
 		return ERR_PTR(-ENODEV);
 
 	info = kmalloc(sizeof(*info), GFP_KERNEL);
@@ -645,7 +665,7 @@ nv50_pm_clocks_pre(struct drm_device *dev, struct nouveau_pm_level *perflvl)
 		clk = calc_div(perflvl->core, perflvl->vdec, &P1);
 
 		/* see how close we can get using xpll/hclk as a source */
-		if (dev_priv->chipset != 0x98)
+		if (nv_device(drm->device)->chipset != 0x98)
 			out = read_pll(dev, 0x004030);
 		else
 			out = read_clk(dev, clk_src_hclkm3d2);
@@ -654,7 +674,7 @@ nv50_pm_clocks_pre(struct drm_device *dev, struct nouveau_pm_level *perflvl)
 		/* select whichever gets us closest */
 		if (abs((int)perflvl->vdec - clk) <=
 		    abs((int)perflvl->vdec - out)) {
-			if (dev_priv->chipset != 0x98)
+			if (nv_device(drm->device)->chipset != 0x98)
 				mast |= 0x00000c00;
 			divs |= P1 << 8;
 		} else {
@@ -682,7 +702,7 @@ nv50_pm_clocks_pre(struct drm_device *dev, struct nouveau_pm_level *perflvl)
 	}
 
 	/* vdec/dom6: complete switch to new clocks */
-	switch (dev_priv->chipset) {
+	switch (nv_device(drm->device)->chipset) {
 	case 0x92:
 	case 0x94:
 	case 0x96:
@@ -698,7 +718,7 @@ nv50_pm_clocks_pre(struct drm_device *dev, struct nouveau_pm_level *perflvl)
 	/* core/shader: make sure sclk/nvclk are disconnected from their
 	 * PLLs (nvclk to dom6, sclk to hclk)
 	 */
-	if (dev_priv->chipset < 0x92)
+	if (nv_device(drm->device)->chipset < 0x92)
 		mast = (mast & ~0x001000b0) | 0x00100080;
 	else
 		mast = (mast & ~0x000000b3) | 0x00000081;
@@ -710,7 +730,7 @@ nv50_pm_clocks_pre(struct drm_device *dev, struct nouveau_pm_level *perflvl)
 	if (clk == 0)
 		goto error;
 
-	ctrl  = nv_rd32(dev, 0x004028) & ~0xc03f0100;
+	ctrl  = nv_rd32(device, 0x004028) & ~0xc03f0100;
 	mast &= ~0x00100000;
 	mast |= 3;
 
@@ -723,7 +743,7 @@ nv50_pm_clocks_pre(struct drm_device *dev, struct nouveau_pm_level *perflvl)
 	 * cases will be handled by tying to nvclk, but it's possible there's
 	 * corners
 	 */
-	ctrl = nv_rd32(dev, 0x004020) & ~0xc03f0100;
+	ctrl = nv_rd32(device, 0x004020) & ~0xc03f0100;
 
 	if (P1-- && perflvl->shader == (perflvl->core << 1)) {
 		hwsq_wr32(hwsq, 0x004020, (P1 << 19) | (P1 << 16) | ctrl);
@@ -752,11 +772,12 @@ error:
 static int
 prog_hwsq(struct drm_device *dev, struct hwsq_ucode *hwsq)
 {
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_device *device = nouveau_dev(dev);
+	struct nouveau_drm *drm = nouveau_drm(dev);
 	u32 hwsq_data, hwsq_kick;
 	int i;
 
-	if (dev_priv->chipset < 0x94) {
+	if (nv_device(drm->device)->chipset < 0x94) {
 		hwsq_data = 0x001400;
 		hwsq_kick = 0x00000003;
 	} else {
@@ -764,22 +785,22 @@ prog_hwsq(struct drm_device *dev, struct hwsq_ucode *hwsq)
 		hwsq_kick = 0x00000001;
 	}
 	/* upload hwsq ucode */
-	nv_mask(dev, 0x001098, 0x00000008, 0x00000000);
-	nv_wr32(dev, 0x001304, 0x00000000);
-	if (dev_priv->chipset >= 0x92)
-		nv_wr32(dev, 0x001318, 0x00000000);
+	nv_mask(device, 0x001098, 0x00000008, 0x00000000);
+	nv_wr32(device, 0x001304, 0x00000000);
+	if (nv_device(drm->device)->chipset >= 0x92)
+		nv_wr32(device, 0x001318, 0x00000000);
 	for (i = 0; i < hwsq->len / 4; i++)
-		nv_wr32(dev, hwsq_data + (i * 4), hwsq->ptr.u32[i]);
-	nv_mask(dev, 0x001098, 0x00000018, 0x00000018);
+		nv_wr32(device, hwsq_data + (i * 4), hwsq->ptr.u32[i]);
+	nv_mask(device, 0x001098, 0x00000018, 0x00000018);
 
 	/* launch, and wait for completion */
-	nv_wr32(dev, 0x00130c, hwsq_kick);
-	if (!nv_wait(dev, 0x001308, 0x00000100, 0x00000000)) {
-		NV_ERROR(dev, "hwsq ucode exec timed out\n");
-		NV_ERROR(dev, "0x001308: 0x%08x\n", nv_rd32(dev, 0x001308));
+	nv_wr32(device, 0x00130c, hwsq_kick);
+	if (!nv_wait(device, 0x001308, 0x00000100, 0x00000000)) {
+		NV_ERROR(drm, "hwsq ucode exec timed out\n");
+		NV_ERROR(drm, "0x001308: 0x%08x\n", nv_rd32(device, 0x001308));
 		for (i = 0; i < hwsq->len / 4; i++) {
-			NV_ERROR(dev, "0x%06x: 0x%08x\n", 0x1400 + (i * 4),
-				 nv_rd32(dev, 0x001400 + (i * 4)));
+			NV_ERROR(drm, "0x%06x: 0x%08x\n", 0x1400 + (i * 4),
+				 nv_rd32(device, 0x001400 + (i * 4)));
 		}
 
 		return -EIO;
@@ -791,20 +812,22 @@ prog_hwsq(struct drm_device *dev, struct hwsq_ucode *hwsq)
 int
 nv50_pm_clocks_set(struct drm_device *dev, void *data)
 {
+	struct nouveau_device *device = nouveau_dev(dev);
 	struct nv50_pm_state *info = data;
 	struct bit_entry M;
 	int ret = -EBUSY;
 
 	/* halt and idle execution engines */
-	nv_mask(dev, 0x002504, 0x00000001, 0x00000001);
-	if (!nv_wait(dev, 0x002504, 0x00000010, 0x00000010))
+	nv_mask(device, 0x002504, 0x00000001, 0x00000001);
+	if (!nv_wait(device, 0x002504, 0x00000010, 0x00000010))
 		goto resume;
-	if (!nv_wait(dev, 0x00251c, 0x0000003f, 0x0000003f))
+	if (!nv_wait(device, 0x00251c, 0x0000003f, 0x0000003f))
 		goto resume;
 
 	/* program memory clock, if necessary - must come before engine clock
 	 * reprogramming due to how we construct the hwsq scripts in pre()
 	 */
+#define nouveau_bios_init_exec(a,b) nouveau_bios_run_init_table((a), (b), NULL, 0)
 	if (info->mclk_hwsq.len) {
 		/* execute some scripts that do ??? from the vbios.. */
 		if (!bit_table(dev, 'M', &M) && M.version == 1) {
@@ -826,61 +849,7 @@ nv50_pm_clocks_set(struct drm_device *dev, void *data)
 	ret = prog_hwsq(dev, &info->eclk_hwsq);
 
 resume:
-	nv_mask(dev, 0x002504, 0x00000001, 0x00000000);
+	nv_mask(device, 0x002504, 0x00000001, 0x00000000);
 	kfree(info);
 	return ret;
 }
-
-static int
-pwm_info(struct drm_device *dev, int *line, int *ctrl, int *indx)
-{
-	if (*line == 0x04) {
-		*ctrl = 0x00e100;
-		*line = 4;
-		*indx = 0;
-	} else
-	if (*line == 0x09) {
-		*ctrl = 0x00e100;
-		*line = 9;
-		*indx = 1;
-	} else
-	if (*line == 0x10) {
-		*ctrl = 0x00e28c;
-		*line = 0;
-		*indx = 0;
-	} else {
-		NV_ERROR(dev, "unknown pwm ctrl for gpio %d\n", *line);
-		return -ENODEV;
-	}
-
-	return 0;
-}
-
-int
-nv50_pm_pwm_get(struct drm_device *dev, int line, u32 *divs, u32 *duty)
-{
-	int ctrl, id, ret = pwm_info(dev, &line, &ctrl, &id);
-	if (ret)
-		return ret;
-
-	if (nv_rd32(dev, ctrl) & (1 << line)) {
-		*divs = nv_rd32(dev, 0x00e114 + (id * 8));
-		*duty = nv_rd32(dev, 0x00e118 + (id * 8));
-		return 0;
-	}
-
-	return -EINVAL;
-}
-
-int
-nv50_pm_pwm_set(struct drm_device *dev, int line, u32 divs, u32 duty)
-{
-	int ctrl, id, ret = pwm_info(dev, &line, &ctrl, &id);
-	if (ret)
-		return ret;
-
-	nv_mask(dev, ctrl, 0x00010001 << line, 0x00000001 << line);
-	nv_wr32(dev, 0x00e114 + (id * 8), divs);
-	nv_wr32(dev, 0x00e118 + (id * 8), duty | 0x80000000);
-	return 0;
-}
diff --git a/drivers/gpu/drm/nouveau/nv50_software.c b/drivers/gpu/drm/nouveau/nv50_software.c
deleted file mode 100644
index 5497a6ce25b4..000000000000
--- a/drivers/gpu/drm/nouveau/nv50_software.c
+++ /dev/null
@@ -1,203 +0,0 @@
-/*
- * Copyright 2012 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Ben Skeggs
- */
-
-#include <drm/drmP.h>
-
-#include "nouveau_drv.h"
-#include "nouveau_ramht.h"
-#include "nouveau_software.h"
-
-#include "nv50_display.h"
-
-struct nv50_software_priv {
-	struct nouveau_software_priv base;
-};
-
-struct nv50_software_chan {
-	struct nouveau_software_chan base;
-};
-
-static int
-mthd_dma_vblsem(struct nouveau_channel *chan, u32 class, u32 mthd, u32 data)
-{
-	struct nv50_software_chan *pch = chan->engctx[NVOBJ_ENGINE_SW];
-	struct nouveau_gpuobj *gpuobj;
-
-	gpuobj = nouveau_ramht_find(chan, data);
-	if (!gpuobj)
-		return -ENOENT;
-
-	pch->base.vblank.ctxdma = gpuobj->cinst >> 4;
-	return 0;
-}
-
-static int
-mthd_vblsem_offset(struct nouveau_channel *chan, u32 class, u32 mthd, u32 data)
-{
-	struct nv50_software_chan *pch = chan->engctx[NVOBJ_ENGINE_SW];
-	pch->base.vblank.offset = data;
-	return 0;
-}
-
-static int
-mthd_vblsem_value(struct nouveau_channel *chan, u32 class, u32 mthd, u32 data)
-{
-	struct nv50_software_chan *pch = chan->engctx[NVOBJ_ENGINE_SW];
-	pch->base.vblank.value = data;
-	return 0;
-}
-
-static int
-mthd_vblsem_release(struct nouveau_channel *chan, u32 class, u32 mthd, u32 data)
-{
-	struct nv50_software_priv *psw = nv_engine(chan->dev, NVOBJ_ENGINE_SW);
-	struct nv50_software_chan *pch = chan->engctx[NVOBJ_ENGINE_SW];
-	struct drm_device *dev = chan->dev;
-
-	if (data > 1)
-		return -EINVAL;
-
-	drm_vblank_get(dev, data);
-
-	pch->base.vblank.head = data;
-	list_add(&pch->base.vblank.list, &psw->base.vblank);
-	return 0;
-}
-
-static int
-mthd_flip(struct nouveau_channel *chan, u32 class, u32 mthd, u32 data)
-{
-	nouveau_finish_page_flip(chan, NULL);
-	return 0;
-}
-
-static int
-nv50_software_context_new(struct nouveau_channel *chan, int engine)
-{
-	struct nv50_software_priv *psw = nv_engine(chan->dev, NVOBJ_ENGINE_SW);
-	struct nv50_display *pdisp = nv50_display(chan->dev);
-	struct nv50_software_chan *pch;
-	int ret = 0, i;
-
-	pch = kzalloc(sizeof(*pch), GFP_KERNEL);
-	if (!pch)
-		return -ENOMEM;
-
-	nouveau_software_context_new(&pch->base);
-	pch->base.vblank.channel = chan->ramin->vinst >> 12;
-	chan->engctx[engine] = pch;
-
-	/* dma objects for display sync channel semaphore blocks */
-	for (i = 0; i < chan->dev->mode_config.num_crtc; i++) {
-		struct nv50_display_crtc *dispc = &pdisp->crtc[i];
-		struct nouveau_gpuobj *obj = NULL;
-
-		ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY,
-					     dispc->sem.bo->bo.offset, 0x1000,
-					     NV_MEM_ACCESS_RW,
-					     NV_MEM_TARGET_VRAM, &obj);
-		if (ret)
-			break;
-
-		ret = nouveau_ramht_insert(chan, NvEvoSema0 + i, obj);
-		nouveau_gpuobj_ref(NULL, &obj);
-	}
-
-	if (ret)
-		psw->base.base.context_del(chan, engine);
-	return ret;
-}
-
-static void
-nv50_software_context_del(struct nouveau_channel *chan, int engine)
-{
-	struct nv50_software_chan *pch = chan->engctx[engine];
-	chan->engctx[engine] = NULL;
-	kfree(pch);
-}
-
-static int
-nv50_software_object_new(struct nouveau_channel *chan, int engine,
-			 u32 handle, u16 class)
-{
-	struct drm_device *dev = chan->dev;
-	struct nouveau_gpuobj *obj = NULL;
-	int ret;
-
-	ret = nouveau_gpuobj_new(dev, chan, 16, 16, 0, &obj);
-	if (ret)
-		return ret;
-	obj->engine = 0;
-	obj->class  = class;
-
-	ret = nouveau_ramht_insert(chan, handle, obj);
-	nouveau_gpuobj_ref(NULL, &obj);
-	return ret;
-}
-
-static int
-nv50_software_init(struct drm_device *dev, int engine)
-{
-	return 0;
-}
-
-static int
-nv50_software_fini(struct drm_device *dev, int engine, bool suspend)
-{
-	return 0;
-}
-
-static void
-nv50_software_destroy(struct drm_device *dev, int engine)
-{
-	struct nv50_software_priv *psw = nv_engine(dev, engine);
-
-	NVOBJ_ENGINE_DEL(dev, SW);
-	kfree(psw);
-}
-
-int
-nv50_software_create(struct drm_device *dev)
-{
-	struct nv50_software_priv *psw = kzalloc(sizeof(*psw), GFP_KERNEL);
-	if (!psw)
-		return -ENOMEM;
-
-	psw->base.base.destroy = nv50_software_destroy;
-	psw->base.base.init = nv50_software_init;
-	psw->base.base.fini = nv50_software_fini;
-	psw->base.base.context_new = nv50_software_context_new;
-	psw->base.base.context_del = nv50_software_context_del;
-	psw->base.base.object_new = nv50_software_object_new;
-	nouveau_software_create(&psw->base);
-
-	NVOBJ_ENGINE_ADD(dev, SW, &psw->base.base);
-	NVOBJ_CLASS(dev, 0x506e, SW);
-	NVOBJ_MTHD (dev, 0x506e, 0x018c, mthd_dma_vblsem);
-	NVOBJ_MTHD (dev, 0x506e, 0x0400, mthd_vblsem_offset);
-	NVOBJ_MTHD (dev, 0x506e, 0x0404, mthd_vblsem_value);
-	NVOBJ_MTHD (dev, 0x506e, 0x0408, mthd_vblsem_release);
-	NVOBJ_MTHD (dev, 0x506e, 0x0500, mthd_flip);
-	return 0;
-}
diff --git a/drivers/gpu/drm/nouveau/nv50_sor.c b/drivers/gpu/drm/nouveau/nv50_sor.c
index 63ece8503a11..b562b59e1326 100644
--- a/drivers/gpu/drm/nouveau/nv50_sor.c
+++ b/drivers/gpu/drm/nouveau/nv50_sor.c
@@ -29,35 +29,40 @@
 
 #define NOUVEAU_DMA_DEBUG (nouveau_reg_debug & NOUVEAU_REG_DEBUG_EVO)
 #include "nouveau_reg.h"
-#include "nouveau_drv.h"
+#include "nouveau_drm.h"
 #include "nouveau_dma.h"
 #include "nouveau_encoder.h"
 #include "nouveau_connector.h"
 #include "nouveau_crtc.h"
 #include "nv50_display.h"
 
+#include <subdev/timer.h>
+
 static u32
-nv50_sor_dp_lane_map(struct drm_device *dev, struct dcb_entry *dcb, u8 lane)
+nv50_sor_dp_lane_map(struct drm_device *dev, struct dcb_output *dcb, u8 lane)
 {
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_drm *drm = nouveau_drm(dev);
 	static const u8 nvaf[] = { 24, 16, 8, 0 }; /* thanks, apple.. */
 	static const u8 nv50[] = { 16, 8, 0, 24 };
-	if (dev_priv->chipset == 0xaf)
+	if (nv_device(drm->device)->chipset == 0xaf)
 		return nvaf[lane];
 	return nv50[lane];
 }
 
 static void
-nv50_sor_dp_train_set(struct drm_device *dev, struct dcb_entry *dcb, u8 pattern)
+nv50_sor_dp_train_set(struct drm_device *dev, struct dcb_output *dcb, u8 pattern)
 {
+	struct nouveau_device *device = nouveau_dev(dev);
 	u32 or = ffs(dcb->or) - 1, link = !(dcb->sorconf.link & 1);
-	nv_mask(dev, NV50_SOR_DP_CTRL(or, link), 0x0f000000, pattern << 24);
+	nv_mask(device, NV50_SOR_DP_CTRL(or, link), 0x0f000000, pattern << 24);
 }
 
 static void
-nv50_sor_dp_train_adj(struct drm_device *dev, struct dcb_entry *dcb,
+nv50_sor_dp_train_adj(struct drm_device *dev, struct dcb_output *dcb,
 		      u8 lane, u8 swing, u8 preem)
 {
+	struct nouveau_device *device = nouveau_dev(dev);
+	struct nouveau_drm *drm = nouveau_drm(dev);
 	u32 or = ffs(dcb->or) - 1, link = !(dcb->sorconf.link & 1);
 	u32 shift = nv50_sor_dp_lane_map(dev, dcb, lane);
 	u32 mask = 0x000000ff << shift;
@@ -65,7 +70,7 @@ nv50_sor_dp_train_adj(struct drm_device *dev, struct dcb_entry *dcb,
 
 	table = nouveau_dp_bios_data(dev, dcb, &entry);
 	if (!table || (table[0] != 0x20 && table[0] != 0x21)) {
-		NV_ERROR(dev, "PDISP: unsupported DP table for chipset\n");
+		NV_ERROR(drm, "PDISP: unsupported DP table for chipset\n");
 		return;
 	}
 
@@ -76,24 +81,26 @@ nv50_sor_dp_train_adj(struct drm_device *dev, struct dcb_entry *dcb,
 			return;
 	}
 
-	nv_mask(dev, NV50_SOR_DP_UNK118(or, link), mask, config[2] << shift);
-	nv_mask(dev, NV50_SOR_DP_UNK120(or, link), mask, config[3] << shift);
-	nv_mask(dev, NV50_SOR_DP_UNK130(or, link), 0x0000ff00, config[4] << 8);
+	nv_mask(device, NV50_SOR_DP_UNK118(or, link), mask, config[2] << shift);
+	nv_mask(device, NV50_SOR_DP_UNK120(or, link), mask, config[3] << shift);
+	nv_mask(device, NV50_SOR_DP_UNK130(or, link), 0x0000ff00, config[4] << 8);
 }
 
 static void
-nv50_sor_dp_link_set(struct drm_device *dev, struct dcb_entry *dcb, int crtc,
+nv50_sor_dp_link_set(struct drm_device *dev, struct dcb_output *dcb, int crtc,
 		     int link_nr, u32 link_bw, bool enhframe)
 {
+	struct nouveau_device *device = nouveau_dev(dev);
+	struct nouveau_drm *drm = nouveau_drm(dev);
 	u32 or = ffs(dcb->or) - 1, link = !(dcb->sorconf.link & 1);
-	u32 dpctrl = nv_rd32(dev, NV50_SOR_DP_CTRL(or, link)) & ~0x001f4000;
-	u32 clksor = nv_rd32(dev, 0x614300 + (or * 0x800)) & ~0x000c0000;
+	u32 dpctrl = nv_rd32(device, NV50_SOR_DP_CTRL(or, link)) & ~0x001f4000;
+	u32 clksor = nv_rd32(device, 0x614300 + (or * 0x800)) & ~0x000c0000;
 	u8 *table, *entry, mask;
 	int i;
 
 	table = nouveau_dp_bios_data(dev, dcb, &entry);
 	if (!table || (table[0] != 0x20 && table[0] != 0x21)) {
-		NV_ERROR(dev, "PDISP: unsupported DP table for chipset\n");
+		NV_ERROR(drm, "PDISP: unsupported DP table for chipset\n");
 		return;
 	}
 
@@ -112,20 +119,21 @@ nv50_sor_dp_link_set(struct drm_device *dev, struct dcb_entry *dcb, int crtc,
 	if (link_bw > 162000)
 		clksor |= 0x00040000;
 
-	nv_wr32(dev, 0x614300 + (or * 0x800), clksor);
-	nv_wr32(dev, NV50_SOR_DP_CTRL(or, link), dpctrl);
+	nv_wr32(device, 0x614300 + (or * 0x800), clksor);
+	nv_wr32(device, NV50_SOR_DP_CTRL(or, link), dpctrl);
 
 	mask = 0;
 	for (i = 0; i < link_nr; i++)
 		mask |= 1 << (nv50_sor_dp_lane_map(dev, dcb, i) >> 3);
-	nv_mask(dev, NV50_SOR_DP_UNK130(or, link), 0x0000000f, mask);
+	nv_mask(device, NV50_SOR_DP_UNK130(or, link), 0x0000000f, mask);
 }
 
 static void
 nv50_sor_dp_link_get(struct drm_device *dev, u32 or, u32 link, u32 *nr, u32 *bw)
 {
-	u32 dpctrl = nv_rd32(dev, NV50_SOR_DP_CTRL(or, link)) & 0x000f0000;
-	u32 clksor = nv_rd32(dev, 0x614300 + (or * 0x800));
+	struct nouveau_device *device = nouveau_dev(dev);
+	u32 dpctrl = nv_rd32(device, NV50_SOR_DP_CTRL(or, link)) & 0x000f0000;
+	u32 clksor = nv_rd32(device, 0x614300 + (or * 0x800));
 	if (clksor & 0x000c0000)
 		*bw = 270000;
 	else
@@ -139,6 +147,8 @@ nv50_sor_dp_link_get(struct drm_device *dev, u32 or, u32 link, u32 *nr, u32 *bw)
 void
 nv50_sor_dp_calc_tu(struct drm_device *dev, int or, int link, u32 clk, u32 bpp)
 {
+	struct nouveau_device *device = nouveau_dev(dev);
+	struct nouveau_drm *drm = nouveau_drm(dev);
 	const u32 symbol = 100000;
 	int bestTU = 0, bestVTUi = 0, bestVTUf = 0, bestVTUa = 0;
 	int TU, VTUi, VTUf, VTUa;
@@ -206,7 +216,7 @@ nv50_sor_dp_calc_tu(struct drm_device *dev, int or, int link, u32 clk, u32 bpp)
 	}
 
 	if (!bestTU) {
-		NV_ERROR(dev, "DP: unable to find suitable config\n");
+		NV_ERROR(drm, "DP: unable to find suitable config\n");
 		return;
 	}
 
@@ -217,8 +227,8 @@ nv50_sor_dp_calc_tu(struct drm_device *dev, int or, int link, u32 clk, u32 bpp)
 	r = do_div(unk, symbol);
 	unk += 6;
 
-	nv_mask(dev, NV50_SOR_DP_CTRL(or, link), 0x000001fc, bestTU << 2);
-	nv_mask(dev, NV50_SOR_DP_SCFG(or, link), 0x010f7f3f, bestVTUa << 24 |
+	nv_mask(device, NV50_SOR_DP_CTRL(or, link), 0x000001fc, bestTU << 2);
+	nv_mask(device, NV50_SOR_DP_SCFG(or, link), 0x010f7f3f, bestVTUa << 24 |
 							     bestVTUf << 16 |
 							     bestVTUi << 8 |
 							     unk);
@@ -227,6 +237,7 @@ static void
 nv50_sor_disconnect(struct drm_encoder *encoder)
 {
 	struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+	struct nouveau_drm *drm = nouveau_drm(encoder->dev);
 	struct drm_device *dev = encoder->dev;
 	struct nouveau_channel *evo = nv50_display(dev)->master;
 	int ret;
@@ -235,11 +246,11 @@ nv50_sor_disconnect(struct drm_encoder *encoder)
 		return;
 	nv50_crtc_blank(nouveau_crtc(nv_encoder->crtc), true);
 
-	NV_DEBUG_KMS(dev, "Disconnecting SOR %d\n", nv_encoder->or);
+	NV_DEBUG(drm, "Disconnecting SOR %d\n", nv_encoder->or);
 
 	ret = RING_SPACE(evo, 4);
 	if (ret) {
-		NV_ERROR(dev, "no space while disconnecting SOR\n");
+		NV_ERROR(drm, "no space while disconnecting SOR\n");
 		return;
 	}
 	BEGIN_NV04(evo, 0, NV50_EVO_SOR(nv_encoder->or, MODE_CTRL), 1);
@@ -256,22 +267,24 @@ nv50_sor_disconnect(struct drm_encoder *encoder)
 static void
 nv50_sor_dpms(struct drm_encoder *encoder, int mode)
 {
+	struct nouveau_device *device = nouveau_dev(encoder->dev);
+	struct nouveau_drm *drm = nouveau_drm(encoder->dev);
 	struct drm_device *dev = encoder->dev;
 	struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
 	struct drm_encoder *enc;
 	uint32_t val;
 	int or = nv_encoder->or;
 
-	NV_DEBUG_KMS(dev, "or %d type %d mode %d\n", or, nv_encoder->dcb->type, mode);
+	NV_DEBUG(drm, "or %d type %d mode %d\n", or, nv_encoder->dcb->type, mode);
 
 	nv_encoder->last_dpms = mode;
 	list_for_each_entry(enc, &dev->mode_config.encoder_list, head) {
 		struct nouveau_encoder *nvenc = nouveau_encoder(enc);
 
 		if (nvenc == nv_encoder ||
-		    (nvenc->dcb->type != OUTPUT_TMDS &&
-		     nvenc->dcb->type != OUTPUT_LVDS &&
-		     nvenc->dcb->type != OUTPUT_DP) ||
+		    (nvenc->dcb->type != DCB_OUTPUT_TMDS &&
+		     nvenc->dcb->type != DCB_OUTPUT_LVDS &&
+		     nvenc->dcb->type != DCB_OUTPUT_DP) ||
 		    nvenc->dcb->or != nv_encoder->dcb->or)
 			continue;
 
@@ -280,30 +293,30 @@ nv50_sor_dpms(struct drm_encoder *encoder, int mode)
 	}
 
 	/* wait for it to be done */
-	if (!nv_wait(dev, NV50_PDISPLAY_SOR_DPMS_CTRL(or),
+	if (!nv_wait(device, NV50_PDISPLAY_SOR_DPMS_CTRL(or),
 		     NV50_PDISPLAY_SOR_DPMS_CTRL_PENDING, 0)) {
-		NV_ERROR(dev, "timeout: SOR_DPMS_CTRL_PENDING(%d) == 0\n", or);
-		NV_ERROR(dev, "SOR_DPMS_CTRL(%d) = 0x%08x\n", or,
-			 nv_rd32(dev, NV50_PDISPLAY_SOR_DPMS_CTRL(or)));
+		NV_ERROR(drm, "timeout: SOR_DPMS_CTRL_PENDING(%d) == 0\n", or);
+		NV_ERROR(drm, "SOR_DPMS_CTRL(%d) = 0x%08x\n", or,
+			 nv_rd32(device, NV50_PDISPLAY_SOR_DPMS_CTRL(or)));
 	}
 
-	val = nv_rd32(dev, NV50_PDISPLAY_SOR_DPMS_CTRL(or));
+	val = nv_rd32(device, NV50_PDISPLAY_SOR_DPMS_CTRL(or));
 
 	if (mode == DRM_MODE_DPMS_ON)
 		val |= NV50_PDISPLAY_SOR_DPMS_CTRL_ON;
 	else
 		val &= ~NV50_PDISPLAY_SOR_DPMS_CTRL_ON;
 
-	nv_wr32(dev, NV50_PDISPLAY_SOR_DPMS_CTRL(or), val |
+	nv_wr32(device, NV50_PDISPLAY_SOR_DPMS_CTRL(or), val |
 		NV50_PDISPLAY_SOR_DPMS_CTRL_PENDING);
-	if (!nv_wait(dev, NV50_PDISPLAY_SOR_DPMS_STATE(or),
+	if (!nv_wait(device, NV50_PDISPLAY_SOR_DPMS_STATE(or),
 		     NV50_PDISPLAY_SOR_DPMS_STATE_WAIT, 0)) {
-		NV_ERROR(dev, "timeout: SOR_DPMS_STATE_WAIT(%d) == 0\n", or);
-		NV_ERROR(dev, "SOR_DPMS_STATE(%d) = 0x%08x\n", or,
-			 nv_rd32(dev, NV50_PDISPLAY_SOR_DPMS_STATE(or)));
+		NV_ERROR(drm, "timeout: SOR_DPMS_STATE_WAIT(%d) == 0\n", or);
+		NV_ERROR(drm, "SOR_DPMS_STATE(%d) = 0x%08x\n", or,
+			 nv_rd32(device, NV50_PDISPLAY_SOR_DPMS_STATE(or)));
 	}
 
-	if (nv_encoder->dcb->type == OUTPUT_DP) {
+	if (nv_encoder->dcb->type == DCB_OUTPUT_DP) {
 		struct dp_train_func func = {
 			.link_set = nv50_sor_dp_link_set,
 			.train_set = nv50_sor_dp_train_set,
@@ -317,13 +330,15 @@ nv50_sor_dpms(struct drm_encoder *encoder, int mode)
 static void
 nv50_sor_save(struct drm_encoder *encoder)
 {
-	NV_ERROR(encoder->dev, "!!\n");
+	struct nouveau_drm *drm = nouveau_drm(encoder->dev);
+	NV_ERROR(drm, "!!\n");
 }
 
 static void
 nv50_sor_restore(struct drm_encoder *encoder)
 {
-	NV_ERROR(encoder->dev, "!!\n");
+	struct nouveau_drm *drm = nouveau_drm(encoder->dev);
+	NV_ERROR(drm, "!!\n");
 }
 
 static bool
@@ -331,14 +346,15 @@ nv50_sor_mode_fixup(struct drm_encoder *encoder,
 		    const struct drm_display_mode *mode,
 		    struct drm_display_mode *adjusted_mode)
 {
+	struct nouveau_drm *drm = nouveau_drm(encoder->dev);
 	struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
 	struct nouveau_connector *connector;
 
-	NV_DEBUG_KMS(encoder->dev, "or %d\n", nv_encoder->or);
+	NV_DEBUG(drm, "or %d\n", nv_encoder->or);
 
 	connector = nouveau_encoder_connector_get(nv_encoder);
 	if (!connector) {
-		NV_ERROR(encoder->dev, "Encoder has no connector\n");
+		NV_ERROR(drm, "Encoder has no connector\n");
 		return false;
 	}
 
@@ -354,7 +370,7 @@ nv50_sor_prepare(struct drm_encoder *encoder)
 {
 	struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
 	nv50_sor_disconnect(encoder);
-	if (nv_encoder->dcb->type == OUTPUT_DP) {
+	if (nv_encoder->dcb->type == DCB_OUTPUT_DP) {
 		/* avoid race between link training and supervisor intr */
 		nv50_display_sync(encoder->dev);
 	}
@@ -371,18 +387,18 @@ nv50_sor_mode_set(struct drm_encoder *encoder, struct drm_display_mode *umode,
 {
 	struct nouveau_channel *evo = nv50_display(encoder->dev)->master;
 	struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
-	struct drm_device *dev = encoder->dev;
+	struct nouveau_drm *drm = nouveau_drm(encoder->dev);
 	struct nouveau_crtc *crtc = nouveau_crtc(encoder->crtc);
 	struct nouveau_connector *nv_connector;
 	uint32_t mode_ctl = 0;
 	int ret;
 
-	NV_DEBUG_KMS(dev, "or %d type %d -> crtc %d\n",
+	NV_DEBUG(drm, "or %d type %d -> crtc %d\n",
 		     nv_encoder->or, nv_encoder->dcb->type, crtc->index);
 	nv_encoder->crtc = encoder->crtc;
 
 	switch (nv_encoder->dcb->type) {
-	case OUTPUT_TMDS:
+	case DCB_OUTPUT_TMDS:
 		if (nv_encoder->dcb->sorconf.link & 1) {
 			if (mode->clock < 165000)
 				mode_ctl = 0x0100;
@@ -393,7 +409,7 @@ nv50_sor_mode_set(struct drm_encoder *encoder, struct drm_display_mode *umode,
 
 		nouveau_hdmi_mode_set(encoder, mode);
 		break;
-	case OUTPUT_DP:
+	case DCB_OUTPUT_DP:
 		nv_connector = nouveau_encoder_connector_get(nv_encoder);
 		if (nv_connector && nv_connector->base.display_info.bpc == 6) {
 			nv_encoder->dp.datarate = mode->clock * 18 / 8;
@@ -427,7 +443,7 @@ nv50_sor_mode_set(struct drm_encoder *encoder, struct drm_display_mode *umode,
 
 	ret = RING_SPACE(evo, 2);
 	if (ret) {
-		NV_ERROR(dev, "no space while connecting SOR\n");
+		NV_ERROR(drm, "no space while connecting SOR\n");
 		nv_encoder->crtc = NULL;
 		return;
 	}
@@ -458,11 +474,9 @@ static void
 nv50_sor_destroy(struct drm_encoder *encoder)
 {
 	struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+	struct nouveau_drm *drm = nouveau_drm(encoder->dev);
 
-	if (!encoder)
-		return;
-
-	NV_DEBUG_KMS(encoder->dev, "\n");
+	NV_DEBUG(drm, "\n");
 
 	drm_encoder_cleanup(encoder);
 
@@ -474,21 +488,22 @@ static const struct drm_encoder_funcs nv50_sor_encoder_funcs = {
 };
 
 int
-nv50_sor_create(struct drm_connector *connector, struct dcb_entry *entry)
+nv50_sor_create(struct drm_connector *connector, struct dcb_output *entry)
 {
 	struct nouveau_encoder *nv_encoder = NULL;
 	struct drm_device *dev = connector->dev;
+	struct nouveau_drm *drm = nouveau_drm(dev);
 	struct drm_encoder *encoder;
 	int type;
 
-	NV_DEBUG_KMS(dev, "\n");
+	NV_DEBUG(drm, "\n");
 
 	switch (entry->type) {
-	case OUTPUT_TMDS:
-	case OUTPUT_DP:
+	case DCB_OUTPUT_TMDS:
+	case DCB_OUTPUT_DP:
 		type = DRM_MODE_ENCODER_TMDS;
 		break;
-	case OUTPUT_LVDS:
+	case DCB_OUTPUT_LVDS:
 		type = DRM_MODE_ENCODER_LVDS;
 		break;
 	default:
diff --git a/drivers/gpu/drm/nouveau/nv50_vram.c b/drivers/gpu/drm/nouveau/nv50_vram.c
deleted file mode 100644
index e2a1af7b9eef..000000000000
--- a/drivers/gpu/drm/nouveau/nv50_vram.c
+++ /dev/null
@@ -1,237 +0,0 @@
-/*
- * Copyright 2010 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Ben Skeggs
- */
-
-#include <drm/drmP.h>
-#include "nouveau_drv.h"
-#include "nouveau_mm.h"
-
-static int types[0x80] = {
-	1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
-	1, 1, 1, 1, 0, 0, 0, 0, 2, 2, 2, 2, 0, 0, 0, 0,
-	1, 1, 1, 1, 1, 1, 1, 0, 2, 2, 2, 2, 2, 2, 2, 0,
-	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
-	1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 0, 0,
-	0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0,
-	1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 2, 2, 2, 2,
-	1, 0, 2, 0, 1, 0, 2, 0, 1, 1, 2, 2, 1, 1, 0, 0
-};
-
-bool
-nv50_vram_flags_valid(struct drm_device *dev, u32 tile_flags)
-{
-	int type = (tile_flags & NOUVEAU_GEM_TILE_LAYOUT_MASK) >> 8;
-
-	if (likely(type < ARRAY_SIZE(types) && types[type]))
-		return true;
-	return false;
-}
-
-void
-nv50_vram_del(struct drm_device *dev, struct nouveau_mem **pmem)
-{
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct nouveau_mm *mm = &dev_priv->engine.vram.mm;
-	struct nouveau_mm_node *this;
-	struct nouveau_mem *mem;
-
-	mem = *pmem;
-	*pmem = NULL;
-	if (unlikely(mem == NULL))
-		return;
-
-	mutex_lock(&mm->mutex);
-	while (!list_empty(&mem->regions)) {
-		this = list_first_entry(&mem->regions, struct nouveau_mm_node, rl_entry);
-
-		list_del(&this->rl_entry);
-		nouveau_mm_put(mm, this);
-	}
-
-	if (mem->tag) {
-		drm_mm_put_block(mem->tag);
-		mem->tag = NULL;
-	}
-	mutex_unlock(&mm->mutex);
-
-	kfree(mem);
-}
-
-int
-nv50_vram_new(struct drm_device *dev, u64 size, u32 align, u32 size_nc,
-	      u32 memtype, struct nouveau_mem **pmem)
-{
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct nouveau_mm *mm = &dev_priv->engine.vram.mm;
-	struct nouveau_mm_node *r;
-	struct nouveau_mem *mem;
-	int comp = (memtype & 0x300) >> 8;
-	int type = (memtype & 0x07f);
-	int ret;
-
-	if (!types[type])
-		return -EINVAL;
-	size >>= 12;
-	align >>= 12;
-	size_nc >>= 12;
-
-	mem = kzalloc(sizeof(*mem), GFP_KERNEL);
-	if (!mem)
-		return -ENOMEM;
-
-	mutex_lock(&mm->mutex);
-	if (comp) {
-		if (align == 16) {
-			struct nouveau_fb_engine *pfb = &dev_priv->engine.fb;
-			int n = (size >> 4) * comp;
-
-			mem->tag = drm_mm_search_free(&pfb->tag_heap, n, 0, 0);
-			if (mem->tag)
-				mem->tag = drm_mm_get_block(mem->tag, n, 0);
-		}
-
-		if (unlikely(!mem->tag))
-			comp = 0;
-	}
-
-	INIT_LIST_HEAD(&mem->regions);
-	mem->dev = dev_priv->dev;
-	mem->memtype = (comp << 7) | type;
-	mem->size = size;
-
-	do {
-		ret = nouveau_mm_get(mm, types[type], size, size_nc, align, &r);
-		if (ret) {
-			mutex_unlock(&mm->mutex);
-			nv50_vram_del(dev, &mem);
-			return ret;
-		}
-
-		list_add_tail(&r->rl_entry, &mem->regions);
-		size -= r->length;
-	} while (size);
-	mutex_unlock(&mm->mutex);
-
-	r = list_first_entry(&mem->regions, struct nouveau_mm_node, rl_entry);
-	mem->offset = (u64)r->offset << 12;
-	*pmem = mem;
-	return 0;
-}
-
-static u32
-nv50_vram_rblock(struct drm_device *dev)
-{
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	int i, parts, colbits, rowbitsa, rowbitsb, banks;
-	u64 rowsize, predicted;
-	u32 r0, r4, rt, ru, rblock_size;
-
-	r0 = nv_rd32(dev, 0x100200);
-	r4 = nv_rd32(dev, 0x100204);
-	rt = nv_rd32(dev, 0x100250);
-	ru = nv_rd32(dev, 0x001540);
-	NV_DEBUG(dev, "memcfg 0x%08x 0x%08x 0x%08x 0x%08x\n", r0, r4, rt, ru);
-
-	for (i = 0, parts = 0; i < 8; i++) {
-		if (ru & (0x00010000 << i))
-			parts++;
-	}
-
-	colbits  =  (r4 & 0x0000f000) >> 12;
-	rowbitsa = ((r4 & 0x000f0000) >> 16) + 8;
-	rowbitsb = ((r4 & 0x00f00000) >> 20) + 8;
-	banks    = 1 << (((r4 & 0x03000000) >> 24) + 2);
-
-	rowsize = parts * banks * (1 << colbits) * 8;
-	predicted = rowsize << rowbitsa;
-	if (r0 & 0x00000004)
-		predicted += rowsize << rowbitsb;
-
-	if (predicted != dev_priv->vram_size) {
-		NV_WARN(dev, "memory controller reports %dMiB VRAM\n",
-			(u32)(dev_priv->vram_size >> 20));
-		NV_WARN(dev, "we calculated %dMiB VRAM\n",
-			(u32)(predicted >> 20));
-	}
-
-	rblock_size = rowsize;
-	if (rt & 1)
-		rblock_size *= 3;
-
-	NV_DEBUG(dev, "rblock %d bytes\n", rblock_size);
-	return rblock_size;
-}
-
-int
-nv50_vram_init(struct drm_device *dev)
-{
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct nouveau_vram_engine *vram = &dev_priv->engine.vram;
-	const u32 rsvd_head = ( 256 * 1024) >> 12; /* vga memory */
-	const u32 rsvd_tail = (1024 * 1024) >> 12; /* vbios etc */
-	u32 pfb714 = nv_rd32(dev, 0x100714);
-	u32 rblock, length;
-
-	switch (pfb714 & 0x00000007) {
-	case 0: dev_priv->vram_type = NV_MEM_TYPE_DDR1; break;
-	case 1:
-		if (nouveau_mem_vbios_type(dev) == NV_MEM_TYPE_DDR3)
-			dev_priv->vram_type = NV_MEM_TYPE_DDR3;
-		else
-			dev_priv->vram_type = NV_MEM_TYPE_DDR2;
-		break;
-	case 2: dev_priv->vram_type = NV_MEM_TYPE_GDDR3; break;
-	case 3: dev_priv->vram_type = NV_MEM_TYPE_GDDR4; break;
-	case 4: dev_priv->vram_type = NV_MEM_TYPE_GDDR5; break;
-	default:
-		break;
-	}
-
-	dev_priv->vram_rank_B = !!(nv_rd32(dev, 0x100200) & 0x4);
-	dev_priv->vram_size  = nv_rd32(dev, 0x10020c);
-	dev_priv->vram_size |= (dev_priv->vram_size & 0xff) << 32;
-	dev_priv->vram_size &= 0xffffffff00ULL;
-
-	/* IGPs, no funky reordering happens here, they don't have VRAM */
-	if (dev_priv->chipset == 0xaa ||
-	    dev_priv->chipset == 0xac ||
-	    dev_priv->chipset == 0xaf) {
-		dev_priv->vram_sys_base = (u64)nv_rd32(dev, 0x100e10) << 12;
-		rblock = 4096 >> 12;
-	} else {
-		rblock = nv50_vram_rblock(dev) >> 12;
-	}
-
-	length = (dev_priv->vram_size >> 12) - rsvd_head - rsvd_tail;
-
-	return nouveau_mm_init(&vram->mm, rsvd_head, length, rblock);
-}
-
-void
-nv50_vram_fini(struct drm_device *dev)
-{
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct nouveau_vram_engine *vram = &dev_priv->engine.vram;
-
-	nouveau_mm_fini(&vram->mm);
-}
diff --git a/drivers/gpu/drm/nouveau/nv84_bsp.c b/drivers/gpu/drm/nouveau/nv84_bsp.c
deleted file mode 100644
index a4f4d4a0a755..000000000000
--- a/drivers/gpu/drm/nouveau/nv84_bsp.c
+++ /dev/null
@@ -1,83 +0,0 @@
-/*
- * Copyright 2011 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Ben Skeggs
- */
-
-#include <drm/drmP.h>
-#include "nouveau_drv.h"
-#include "nouveau_util.h"
-#include "nouveau_vm.h"
-#include "nouveau_ramht.h"
-
-/*XXX: This stub is currently used on NV98+ also, as soon as this becomes
- *     more than just an enable/disable stub this needs to be split out to
- *     nv98_bsp.c...
- */
-
-struct nv84_bsp_engine {
-	struct nouveau_exec_engine base;
-};
-
-static int
-nv84_bsp_fini(struct drm_device *dev, int engine, bool suspend)
-{
-	if (!(nv_rd32(dev, 0x000200) & 0x00008000))
-		return 0;
-
-	nv_mask(dev, 0x000200, 0x00008000, 0x00000000);
-	return 0;
-}
-
-static int
-nv84_bsp_init(struct drm_device *dev, int engine)
-{
-	nv_mask(dev, 0x000200, 0x00008000, 0x00000000);
-	nv_mask(dev, 0x000200, 0x00008000, 0x00008000);
-	return 0;
-}
-
-static void
-nv84_bsp_destroy(struct drm_device *dev, int engine)
-{
-	struct nv84_bsp_engine *pbsp = nv_engine(dev, engine);
-
-	NVOBJ_ENGINE_DEL(dev, BSP);
-
-	kfree(pbsp);
-}
-
-int
-nv84_bsp_create(struct drm_device *dev)
-{
-	struct nv84_bsp_engine *pbsp;
-
-	pbsp = kzalloc(sizeof(*pbsp), GFP_KERNEL);
-	if (!pbsp)
-		return -ENOMEM;
-
-	pbsp->base.destroy = nv84_bsp_destroy;
-	pbsp->base.init = nv84_bsp_init;
-	pbsp->base.fini = nv84_bsp_fini;
-
-	NVOBJ_ENGINE_ADD(dev, BSP, &pbsp->base);
-	return 0;
-}
diff --git a/drivers/gpu/drm/nouveau/nv84_crypt.c b/drivers/gpu/drm/nouveau/nv84_crypt.c
deleted file mode 100644
index dc2bc5cc536d..000000000000
--- a/drivers/gpu/drm/nouveau/nv84_crypt.c
+++ /dev/null
@@ -1,205 +0,0 @@
-/*
- * Copyright 2010 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Ben Skeggs
- */
-
-#include <drm/drmP.h>
-#include "nouveau_drv.h"
-#include "nouveau_util.h"
-#include "nouveau_vm.h"
-#include "nouveau_ramht.h"
-
-struct nv84_crypt_engine {
-	struct nouveau_exec_engine base;
-};
-
-static int
-nv84_crypt_context_new(struct nouveau_channel *chan, int engine)
-{
-	struct drm_device *dev = chan->dev;
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct nouveau_gpuobj *ramin = chan->ramin;
-	struct nouveau_gpuobj *ctx;
-	int ret;
-
-	NV_DEBUG(dev, "ch%d\n", chan->id);
-
-	ret = nouveau_gpuobj_new(dev, chan, 256, 0, NVOBJ_FLAG_ZERO_ALLOC |
-				 NVOBJ_FLAG_ZERO_FREE, &ctx);
-	if (ret)
-		return ret;
-
-	nv_wo32(ramin, 0xa0, 0x00190000);
-	nv_wo32(ramin, 0xa4, ctx->vinst + ctx->size - 1);
-	nv_wo32(ramin, 0xa8, ctx->vinst);
-	nv_wo32(ramin, 0xac, 0);
-	nv_wo32(ramin, 0xb0, 0);
-	nv_wo32(ramin, 0xb4, 0);
-	dev_priv->engine.instmem.flush(dev);
-
-	atomic_inc(&chan->vm->engref[engine]);
-	chan->engctx[engine] = ctx;
-	return 0;
-}
-
-static void
-nv84_crypt_context_del(struct nouveau_channel *chan, int engine)
-{
-	struct nouveau_gpuobj *ctx = chan->engctx[engine];
-	struct drm_device *dev = chan->dev;
-	u32 inst;
-
-	inst  = (chan->ramin->vinst >> 12);
-	inst |= 0x80000000;
-
-	/* mark context as invalid if still on the hardware, not
-	 * doing this causes issues the next time PCRYPT is used,
-	 * unsurprisingly :)
-	 */
-	nv_wr32(dev, 0x10200c, 0x00000000);
-	if (nv_rd32(dev, 0x102188) == inst)
-		nv_mask(dev, 0x102188, 0x80000000, 0x00000000);
-	if (nv_rd32(dev, 0x10218c) == inst)
-		nv_mask(dev, 0x10218c, 0x80000000, 0x00000000);
-	nv_wr32(dev, 0x10200c, 0x00000010);
-
-	nouveau_gpuobj_ref(NULL, &ctx);
-
-	atomic_dec(&chan->vm->engref[engine]);
-	chan->engctx[engine] = NULL;
-}
-
-static int
-nv84_crypt_object_new(struct nouveau_channel *chan, int engine,
-		      u32 handle, u16 class)
-{
-	struct drm_device *dev = chan->dev;
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct nouveau_gpuobj *obj = NULL;
-	int ret;
-
-	ret = nouveau_gpuobj_new(dev, chan, 16, 16, NVOBJ_FLAG_ZERO_FREE, &obj);
-	if (ret)
-		return ret;
-	obj->engine = 5;
-	obj->class  = class;
-
-	nv_wo32(obj, 0x00, class);
-	dev_priv->engine.instmem.flush(dev);
-
-	ret = nouveau_ramht_insert(chan, handle, obj);
-	nouveau_gpuobj_ref(NULL, &obj);
-	return ret;
-}
-
-static void
-nv84_crypt_tlb_flush(struct drm_device *dev, int engine)
-{
-	nv50_vm_flush_engine(dev, 0x0a);
-}
-
-static struct nouveau_bitfield nv84_crypt_intr[] = {
-	{ 0x00000001, "INVALID_STATE" },
-	{ 0x00000002, "ILLEGAL_MTHD" },
-	{ 0x00000004, "ILLEGAL_CLASS" },
-	{ 0x00000080, "QUERY" },
-	{ 0x00000100, "FAULT" },
-	{}
-};
-
-static void
-nv84_crypt_isr(struct drm_device *dev)
-{
-	u32 stat = nv_rd32(dev, 0x102130);
-	u32 mthd = nv_rd32(dev, 0x102190);
-	u32 data = nv_rd32(dev, 0x102194);
-	u64 inst = (u64)(nv_rd32(dev, 0x102188) & 0x7fffffff) << 12;
-	int show = nouveau_ratelimit();
-	int chid = nv50_graph_isr_chid(dev, inst);
-
-	if (show) {
-		NV_INFO(dev, "PCRYPT:");
-		nouveau_bitfield_print(nv84_crypt_intr, stat);
-		printk(KERN_CONT " ch %d (0x%010llx) mthd 0x%04x data 0x%08x\n",
-			chid, inst, mthd, data);
-	}
-
-	nv_wr32(dev, 0x102130, stat);
-	nv_wr32(dev, 0x10200c, 0x10);
-
-	nv50_fb_vm_trap(dev, show);
-}
-
-static int
-nv84_crypt_fini(struct drm_device *dev, int engine, bool suspend)
-{
-	nv_wr32(dev, 0x102140, 0x00000000);
-	return 0;
-}
-
-static int
-nv84_crypt_init(struct drm_device *dev, int engine)
-{
-	nv_mask(dev, 0x000200, 0x00004000, 0x00000000);
-	nv_mask(dev, 0x000200, 0x00004000, 0x00004000);
-
-	nv_wr32(dev, 0x102130, 0xffffffff);
-	nv_wr32(dev, 0x102140, 0xffffffbf);
-
-	nv_wr32(dev, 0x10200c, 0x00000010);
-	return 0;
-}
-
-static void
-nv84_crypt_destroy(struct drm_device *dev, int engine)
-{
-	struct nv84_crypt_engine *pcrypt = nv_engine(dev, engine);
-
-	NVOBJ_ENGINE_DEL(dev, CRYPT);
-
-	nouveau_irq_unregister(dev, 14);
-	kfree(pcrypt);
-}
-
-int
-nv84_crypt_create(struct drm_device *dev)
-{
-	struct nv84_crypt_engine *pcrypt;
-
-	pcrypt = kzalloc(sizeof(*pcrypt), GFP_KERNEL);
-	if (!pcrypt)
-		return -ENOMEM;
-
-	pcrypt->base.destroy = nv84_crypt_destroy;
-	pcrypt->base.init = nv84_crypt_init;
-	pcrypt->base.fini = nv84_crypt_fini;
-	pcrypt->base.context_new = nv84_crypt_context_new;
-	pcrypt->base.context_del = nv84_crypt_context_del;
-	pcrypt->base.object_new = nv84_crypt_object_new;
-	pcrypt->base.tlb_flush = nv84_crypt_tlb_flush;
-
-	nouveau_irq_register(dev, 14, nv84_crypt_isr);
-
-	NVOBJ_ENGINE_ADD(dev, CRYPT, &pcrypt->base);
-	NVOBJ_CLASS (dev, 0x74c1, CRYPT);
-	return 0;
-}
diff --git a/drivers/gpu/drm/nouveau/nv84_fence.c b/drivers/gpu/drm/nouveau/nv84_fence.c
index 60dd73d532e7..c686650584b6 100644
--- a/drivers/gpu/drm/nouveau/nv84_fence.c
+++ b/drivers/gpu/drm/nouveau/nv84_fence.c
@@ -22,13 +22,17 @@
  * Authors: Ben Skeggs
  */
 
-#include <drm/drmP.h>
-#include "nouveau_drv.h"
+#include <core/object.h>
+#include <core/class.h>
+
+#include <engine/fifo.h>
+
+#include "nouveau_drm.h"
 #include "nouveau_dma.h"
-#include "nouveau_fifo.h"
-#include "nouveau_ramht.h"
 #include "nouveau_fence.h"
 
+#include "nv50_display.h"
+
 struct nv84_fence_chan {
 	struct nouveau_fence_chan base;
 };
@@ -42,13 +46,14 @@ static int
 nv84_fence_emit(struct nouveau_fence *fence)
 {
 	struct nouveau_channel *chan = fence->channel;
+	struct nouveau_fifo_chan *fifo = (void *)chan->object;
 	int ret = RING_SPACE(chan, 7);
 	if (ret == 0) {
 		BEGIN_NV04(chan, 0, NV11_SUBCHAN_DMA_SEMAPHORE, 1);
 		OUT_RING  (chan, NvSema);
 		BEGIN_NV04(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4);
-		OUT_RING  (chan, upper_32_bits(chan->id * 16));
-		OUT_RING  (chan, lower_32_bits(chan->id * 16));
+		OUT_RING  (chan, upper_32_bits(fifo->chid * 16));
+		OUT_RING  (chan, lower_32_bits(fifo->chid * 16));
 		OUT_RING  (chan, fence->sequence);
 		OUT_RING  (chan, NV84_SUBCHAN_SEMAPHORE_TRIGGER_WRITE_LONG);
 		FIRE_RING (chan);
@@ -61,13 +66,14 @@ static int
 nv84_fence_sync(struct nouveau_fence *fence,
 		struct nouveau_channel *prev, struct nouveau_channel *chan)
 {
+	struct nouveau_fifo_chan *fifo = (void *)prev->object;
 	int ret = RING_SPACE(chan, 7);
 	if (ret == 0) {
 		BEGIN_NV04(chan, 0, NV11_SUBCHAN_DMA_SEMAPHORE, 1);
 		OUT_RING  (chan, NvSema);
 		BEGIN_NV04(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4);
-		OUT_RING  (chan, upper_32_bits(prev->id * 16));
-		OUT_RING  (chan, lower_32_bits(prev->id * 16));
+		OUT_RING  (chan, upper_32_bits(fifo->chid * 16));
+		OUT_RING  (chan, lower_32_bits(fifo->chid * 16));
 		OUT_RING  (chan, fence->sequence);
 		OUT_RING  (chan, NV84_SUBCHAN_SEMAPHORE_TRIGGER_ACQUIRE_GEQUAL);
 		FIRE_RING (chan);
@@ -78,100 +84,99 @@ nv84_fence_sync(struct nouveau_fence *fence,
 static u32
 nv84_fence_read(struct nouveau_channel *chan)
 {
-	struct nv84_fence_priv *priv = nv_engine(chan->dev, NVOBJ_ENGINE_FENCE);
-	return nv_ro32(priv->mem, chan->id * 16);
+	struct nouveau_fifo_chan *fifo = (void *)chan->object;
+	struct nv84_fence_priv *priv = chan->drm->fence;
+	return nv_ro32(priv->mem, fifo->chid * 16);
 }
 
 static void
-nv84_fence_context_del(struct nouveau_channel *chan, int engine)
+nv84_fence_context_del(struct nouveau_channel *chan)
 {
-	struct nv84_fence_chan *fctx = chan->engctx[engine];
+	struct nv84_fence_chan *fctx = chan->fence;
 	nouveau_fence_context_del(&fctx->base);
-	chan->engctx[engine] = NULL;
+	chan->fence = NULL;
 	kfree(fctx);
 }
 
 static int
-nv84_fence_context_new(struct nouveau_channel *chan, int engine)
+nv84_fence_context_new(struct nouveau_channel *chan)
 {
-	struct nv84_fence_priv *priv = nv_engine(chan->dev, engine);
+	struct drm_device *dev = chan->drm->dev;
+	struct nouveau_fifo_chan *fifo = (void *)chan->object;
+	struct nv84_fence_priv *priv = chan->drm->fence;
 	struct nv84_fence_chan *fctx;
-	struct nouveau_gpuobj *obj;
-	int ret;
+	struct nouveau_object *object;
+	int ret, i;
 
-	fctx = chan->engctx[engine] = kzalloc(sizeof(*fctx), GFP_KERNEL);
+	fctx = chan->fence = kzalloc(sizeof(*fctx), GFP_KERNEL);
 	if (!fctx)
 		return -ENOMEM;
 
 	nouveau_fence_context_new(&fctx->base);
 
-	ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_FROM_MEMORY,
-				     priv->mem->vinst, priv->mem->size,
-				     NV_MEM_ACCESS_RW,
-				     NV_MEM_TARGET_VRAM, &obj);
-	if (ret == 0) {
-		ret = nouveau_ramht_insert(chan, NvSema, obj);
-		nouveau_gpuobj_ref(NULL, &obj);
-		nv_wo32(priv->mem, chan->id * 16, 0x00000000);
+	ret = nouveau_object_new(nv_object(chan->cli), chan->handle,
+				 NvSema, 0x0002,
+				 &(struct nv_dma_class) {
+					.flags = NV_DMA_TARGET_VRAM |
+						 NV_DMA_ACCESS_RDWR,
+					.start = priv->mem->addr,
+					.limit = priv->mem->addr +
+						 priv->mem->size - 1,
+				 }, sizeof(struct nv_dma_class),
+				 &object);
+
+	/* dma objects for display sync channel semaphore blocks */
+	for (i = 0; !ret && i < dev->mode_config.num_crtc; i++) {
+		struct nouveau_bo *bo = nv50_display_crtc_sema(dev, i);
+
+		ret = nouveau_object_new(nv_object(chan->cli), chan->handle,
+					 NvEvoSema0 + i, 0x003d,
+					 &(struct nv_dma_class) {
+						.flags = NV_DMA_TARGET_VRAM |
+							 NV_DMA_ACCESS_RDWR,
+						.start = bo->bo.offset,
+						.limit = bo->bo.offset + 0xfff,
+					 }, sizeof(struct nv_dma_class),
+					 &object);
 	}
 
 	if (ret)
-		nv84_fence_context_del(chan, engine);
+		nv84_fence_context_del(chan);
+	nv_wo32(priv->mem, fifo->chid * 16, 0x00000000);
 	return ret;
 }
 
-static int
-nv84_fence_fini(struct drm_device *dev, int engine, bool suspend)
-{
-	return 0;
-}
-
-static int
-nv84_fence_init(struct drm_device *dev, int engine)
-{
-	return 0;
-}
-
 static void
-nv84_fence_destroy(struct drm_device *dev, int engine)
+nv84_fence_destroy(struct nouveau_drm *drm)
 {
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct nv84_fence_priv *priv = nv_engine(dev, engine);
-
+	struct nv84_fence_priv *priv = drm->fence;
 	nouveau_gpuobj_ref(NULL, &priv->mem);
-	dev_priv->eng[engine] = NULL;
+	drm->fence = NULL;
 	kfree(priv);
 }
 
 int
-nv84_fence_create(struct drm_device *dev)
+nv84_fence_create(struct nouveau_drm *drm)
 {
-	struct nouveau_fifo_priv *pfifo = nv_engine(dev, NVOBJ_ENGINE_FIFO);
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_fifo *pfifo = nouveau_fifo(drm->device);
 	struct nv84_fence_priv *priv;
+	u32 chan = pfifo->max + 1;
 	int ret;
 
-	priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+	priv = drm->fence = kzalloc(sizeof(*priv), GFP_KERNEL);
 	if (!priv)
 		return -ENOMEM;
 
-	priv->base.engine.destroy = nv84_fence_destroy;
-	priv->base.engine.init = nv84_fence_init;
-	priv->base.engine.fini = nv84_fence_fini;
-	priv->base.engine.context_new = nv84_fence_context_new;
-	priv->base.engine.context_del = nv84_fence_context_del;
+	priv->base.dtor = nv84_fence_destroy;
+	priv->base.context_new = nv84_fence_context_new;
+	priv->base.context_del = nv84_fence_context_del;
 	priv->base.emit = nv84_fence_emit;
 	priv->base.sync = nv84_fence_sync;
 	priv->base.read = nv84_fence_read;
-	dev_priv->eng[NVOBJ_ENGINE_FENCE] = &priv->base.engine;
-
-	ret = nouveau_gpuobj_new(dev, NULL, 16 * pfifo->channels,
-				 0x1000, 0, &priv->mem);
-	if (ret)
-		goto out;
 
-out:
+	ret = nouveau_gpuobj_new(drm->device, NULL, chan * 16, 0x1000, 0,
+				&priv->mem);
 	if (ret)
-		nv84_fence_destroy(dev, NVOBJ_ENGINE_FENCE);
+		nv84_fence_destroy(drm);
 	return ret;
 }
diff --git a/drivers/gpu/drm/nouveau/nv84_fifo.c b/drivers/gpu/drm/nouveau/nv84_fifo.c
deleted file mode 100644
index 9844a65491c3..000000000000
--- a/drivers/gpu/drm/nouveau/nv84_fifo.c
+++ /dev/null
@@ -1,249 +0,0 @@
-/*
- * Copyright (C) 2012 Ben Skeggs.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining
- * a copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sublicense, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial
- * portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
- * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
- * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
- * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
- * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-
-#include <drm/drmP.h>
-#include "nouveau_drv.h"
-#include "nouveau_fifo.h"
-#include "nouveau_ramht.h"
-#include "nouveau_vm.h"
-
-struct nv84_fifo_priv {
-	struct nouveau_fifo_priv base;
-	struct nouveau_gpuobj *playlist[2];
-	int cur_playlist;
-};
-
-struct nv84_fifo_chan {
-	struct nouveau_fifo_chan base;
-	struct nouveau_gpuobj *ramfc;
-	struct nouveau_gpuobj *cache;
-};
-
-static int
-nv84_fifo_context_new(struct nouveau_channel *chan, int engine)
-{
-	struct nv84_fifo_priv *priv = nv_engine(chan->dev, engine);
-	struct nv84_fifo_chan *fctx;
-	struct drm_device *dev = chan->dev;
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-        u64 ib_offset = chan->pushbuf_base + chan->dma.ib_base * 4;
-	u64 instance;
-	unsigned long flags;
-	int ret;
-
-	fctx = chan->engctx[engine] = kzalloc(sizeof(*fctx), GFP_KERNEL);
-	if (!fctx)
-		return -ENOMEM;
-	atomic_inc(&chan->vm->engref[engine]);
-
-	chan->user = ioremap(pci_resource_start(dev->pdev, 0) +
-			     NV50_USER(chan->id), PAGE_SIZE);
-	if (!chan->user) {
-		ret = -ENOMEM;
-		goto error;
-	}
-
-	ret = nouveau_gpuobj_new(dev, chan, 256, 256, NVOBJ_FLAG_ZERO_ALLOC |
-				 NVOBJ_FLAG_ZERO_FREE, &fctx->ramfc);
-	if (ret)
-		goto error;
-
-	instance = fctx->ramfc->vinst >> 8;
-
-	ret = nouveau_gpuobj_new(dev, chan, 4096, 1024, 0, &fctx->cache);
-	if (ret)
-		goto error;
-
-	nv_wo32(fctx->ramfc, 0x3c, 0x403f6078);
-	nv_wo32(fctx->ramfc, 0x40, 0x00000000);
-	nv_wo32(fctx->ramfc, 0x44, 0x01003fff);
-	nv_wo32(fctx->ramfc, 0x48, chan->pushbuf->cinst >> 4);
-	nv_wo32(fctx->ramfc, 0x50, lower_32_bits(ib_offset));
-	nv_wo32(fctx->ramfc, 0x54, upper_32_bits(ib_offset) |
-				   drm_order(chan->dma.ib_max + 1) << 16);
-	nv_wo32(fctx->ramfc, 0x60, 0x7fffffff);
-	nv_wo32(fctx->ramfc, 0x78, 0x00000000);
-	nv_wo32(fctx->ramfc, 0x7c, 0x30000001);
-	nv_wo32(fctx->ramfc, 0x80, ((chan->ramht->bits - 9) << 27) |
-				   (4 << 24) /* SEARCH_FULL */ |
-				   (chan->ramht->gpuobj->cinst >> 4));
-	nv_wo32(fctx->ramfc, 0x88, fctx->cache->vinst >> 10);
-	nv_wo32(fctx->ramfc, 0x98, chan->ramin->vinst >> 12);
-
-	nv_wo32(chan->ramin, 0x00, chan->id);
-	nv_wo32(chan->ramin, 0x04, fctx->ramfc->vinst >> 8);
-
-	dev_priv->engine.instmem.flush(dev);
-
-	spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
-	nv_wr32(dev, 0x002600 + (chan->id * 4), 0x80000000 | instance);
-	nv50_fifo_playlist_update(dev);
-	spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
-
-error:
-	if (ret)
-		priv->base.base.context_del(chan, engine);
-	return ret;
-}
-
-static void
-nv84_fifo_context_del(struct nouveau_channel *chan, int engine)
-{
-	struct nv84_fifo_chan *fctx = chan->engctx[engine];
-	struct drm_device *dev = chan->dev;
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	unsigned long flags;
-	u32 save;
-
-	/* remove channel from playlist, will context switch if active */
-	spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
-	nv_mask(dev, 0x002600 + (chan->id * 4), 0x80000000, 0x00000000);
-	nv50_fifo_playlist_update(dev);
-
-	save = nv_mask(dev, 0x002520, 0x0000003f, 0x15);
-
-	/* tell any engines on this channel to unload their contexts */
-	nv_wr32(dev, 0x0032fc, chan->ramin->vinst >> 12);
-	if (!nv_wait_ne(dev, 0x0032fc, 0xffffffff, 0xffffffff))
-		NV_INFO(dev, "PFIFO: channel %d unload timeout\n", chan->id);
-
-	nv_wr32(dev, 0x002520, save);
-
-	nv_wr32(dev, 0x002600 + (chan->id * 4), 0x00000000);
-	spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
-
-	/* clean up */
-	if (chan->user) {
-		iounmap(chan->user);
-		chan->user = NULL;
-	}
-
-	nouveau_gpuobj_ref(NULL, &fctx->ramfc);
-	nouveau_gpuobj_ref(NULL, &fctx->cache);
-
-	atomic_dec(&chan->vm->engref[engine]);
-	chan->engctx[engine] = NULL;
-	kfree(fctx);
-}
-
-static int
-nv84_fifo_init(struct drm_device *dev, int engine)
-{
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct nv84_fifo_chan *fctx;
-	u32 instance;
-	int i;
-
-	nv_mask(dev, 0x000200, 0x00000100, 0x00000000);
-	nv_mask(dev, 0x000200, 0x00000100, 0x00000100);
-	nv_wr32(dev, 0x00250c, 0x6f3cfc34);
-	nv_wr32(dev, 0x002044, 0x01003fff);
-
-	nv_wr32(dev, 0x002100, 0xffffffff);
-	nv_wr32(dev, 0x002140, 0xffffffff);
-
-	for (i = 0; i < 128; i++) {
-		struct nouveau_channel *chan = dev_priv->channels.ptr[i];
-		if (chan && (fctx = chan->engctx[engine]))
-			instance = 0x80000000 | fctx->ramfc->vinst >> 8;
-		else
-			instance = 0x00000000;
-		nv_wr32(dev, 0x002600 + (i * 4), instance);
-	}
-
-	nv50_fifo_playlist_update(dev);
-
-	nv_wr32(dev, 0x003200, 1);
-	nv_wr32(dev, 0x003250, 1);
-	nv_wr32(dev, 0x002500, 1);
-	return 0;
-}
-
-static int
-nv84_fifo_fini(struct drm_device *dev, int engine, bool suspend)
-{
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct nv84_fifo_priv *priv = nv_engine(dev, engine);
-	int i;
-	u32 save;
-
-	/* set playlist length to zero, fifo will unload context */
-	nv_wr32(dev, 0x0032ec, 0);
-
-	save = nv_mask(dev, 0x002520, 0x0000003f, 0x15);
-
-	/* tell all connected engines to unload their contexts */
-	for (i = 0; i < priv->base.channels; i++) {
-		struct nouveau_channel *chan = dev_priv->channels.ptr[i];
-		if (chan)
-			nv_wr32(dev, 0x0032fc, chan->ramin->vinst >> 12);
-		if (!nv_wait_ne(dev, 0x0032fc, 0xffffffff, 0xffffffff)) {
-			NV_INFO(dev, "PFIFO: channel %d unload timeout\n", i);
-			return -EBUSY;
-		}
-	}
-
-	nv_wr32(dev, 0x002520, save);
-	nv_wr32(dev, 0x002140, 0);
-	return 0;
-}
-
-int
-nv84_fifo_create(struct drm_device *dev)
-{
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct nv84_fifo_priv *priv;
-	int ret;
-
-	priv = kzalloc(sizeof(*priv), GFP_KERNEL);
-	if (!priv)
-		return -ENOMEM;
-
-	priv->base.base.destroy = nv50_fifo_destroy;
-	priv->base.base.init = nv84_fifo_init;
-	priv->base.base.fini = nv84_fifo_fini;
-	priv->base.base.context_new = nv84_fifo_context_new;
-	priv->base.base.context_del = nv84_fifo_context_del;
-	priv->base.base.tlb_flush = nv50_fifo_tlb_flush;
-	priv->base.channels = 127;
-	dev_priv->eng[NVOBJ_ENGINE_FIFO] = &priv->base.base;
-
-	ret = nouveau_gpuobj_new(dev, NULL, priv->base.channels * 4, 0x1000,
-				 NVOBJ_FLAG_ZERO_ALLOC, &priv->playlist[0]);
-	if (ret)
-		goto error;
-
-	ret = nouveau_gpuobj_new(dev, NULL, priv->base.channels * 4, 0x1000,
-				 NVOBJ_FLAG_ZERO_ALLOC, &priv->playlist[1]);
-	if (ret)
-		goto error;
-
-	nouveau_irq_register(dev, 8, nv04_fifo_isr);
-error:
-	if (ret)
-		priv->base.base.destroy(dev, NVOBJ_ENGINE_FIFO);
-	return ret;
-}
diff --git a/drivers/gpu/drm/nouveau/nv84_vp.c b/drivers/gpu/drm/nouveau/nv84_vp.c
deleted file mode 100644
index 0dec4958eb5f..000000000000
--- a/drivers/gpu/drm/nouveau/nv84_vp.c
+++ /dev/null
@@ -1,83 +0,0 @@
-/*
- * Copyright 2011 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Ben Skeggs
- */
-
-#include <drm/drmP.h>
-#include "nouveau_drv.h"
-#include "nouveau_util.h"
-#include "nouveau_vm.h"
-#include "nouveau_ramht.h"
-
-/*XXX: This stub is currently used on NV98+ also, as soon as this becomes
- *     more than just an enable/disable stub this needs to be split out to
- *     nv98_vp.c...
- */
-
-struct nv84_vp_engine {
-	struct nouveau_exec_engine base;
-};
-
-static int
-nv84_vp_fini(struct drm_device *dev, int engine, bool suspend)
-{
-	if (!(nv_rd32(dev, 0x000200) & 0x00020000))
-		return 0;
-
-	nv_mask(dev, 0x000200, 0x00020000, 0x00000000);
-	return 0;
-}
-
-static int
-nv84_vp_init(struct drm_device *dev, int engine)
-{
-	nv_mask(dev, 0x000200, 0x00020000, 0x00000000);
-	nv_mask(dev, 0x000200, 0x00020000, 0x00020000);
-	return 0;
-}
-
-static void
-nv84_vp_destroy(struct drm_device *dev, int engine)
-{
-	struct nv84_vp_engine *pvp = nv_engine(dev, engine);
-
-	NVOBJ_ENGINE_DEL(dev, VP);
-
-	kfree(pvp);
-}
-
-int
-nv84_vp_create(struct drm_device *dev)
-{
-	struct nv84_vp_engine *pvp;
-
-	pvp = kzalloc(sizeof(*pvp), GFP_KERNEL);
-	if (!pvp)
-		return -ENOMEM;
-
-	pvp->base.destroy = nv84_vp_destroy;
-	pvp->base.init = nv84_vp_init;
-	pvp->base.fini = nv84_vp_fini;
-
-	NVOBJ_ENGINE_ADD(dev, VP, &pvp->base);
-	return 0;
-}
diff --git a/drivers/gpu/drm/nouveau/nv98_crypt.c b/drivers/gpu/drm/nouveau/nv98_crypt.c
deleted file mode 100644
index 6f4c15345b9b..000000000000
--- a/drivers/gpu/drm/nouveau/nv98_crypt.c
+++ /dev/null
@@ -1,216 +0,0 @@
-/*
- * Copyright 2011 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Ben Skeggs
- */
-
-#include <drm/drmP.h>
-
-#include "nouveau_drv.h"
-#include "nouveau_util.h"
-#include "nouveau_vm.h"
-#include "nouveau_ramht.h"
-
-#include "nv98_crypt.fuc.h"
-
-struct nv98_crypt_priv {
-	struct nouveau_exec_engine base;
-};
-
-struct nv98_crypt_chan {
-	struct nouveau_gpuobj *mem;
-};
-
-static int
-nv98_crypt_context_new(struct nouveau_channel *chan, int engine)
-{
-	struct drm_device *dev = chan->dev;
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct nv98_crypt_priv *priv = nv_engine(dev, engine);
-	struct nv98_crypt_chan *cctx;
-	int ret;
-
-	cctx = chan->engctx[engine] = kzalloc(sizeof(*cctx), GFP_KERNEL);
-	if (!cctx)
-		return -ENOMEM;
-
-	atomic_inc(&chan->vm->engref[engine]);
-
-	ret = nouveau_gpuobj_new(dev, chan, 256, 0, NVOBJ_FLAG_ZERO_ALLOC |
-				 NVOBJ_FLAG_ZERO_FREE, &cctx->mem);
-	if (ret)
-		goto error;
-
-	nv_wo32(chan->ramin, 0xa0, 0x00190000);
-	nv_wo32(chan->ramin, 0xa4, cctx->mem->vinst + cctx->mem->size - 1);
-	nv_wo32(chan->ramin, 0xa8, cctx->mem->vinst);
-	nv_wo32(chan->ramin, 0xac, 0x00000000);
-	nv_wo32(chan->ramin, 0xb0, 0x00000000);
-	nv_wo32(chan->ramin, 0xb4, 0x00000000);
-	dev_priv->engine.instmem.flush(dev);
-
-error:
-	if (ret)
-		priv->base.context_del(chan, engine);
-	return ret;
-}
-
-static void
-nv98_crypt_context_del(struct nouveau_channel *chan, int engine)
-{
-	struct nv98_crypt_chan *cctx = chan->engctx[engine];
-	int i;
-
-	for (i = 0xa0; i < 0xb4; i += 4)
-		nv_wo32(chan->ramin, i, 0x00000000);
-
-	nouveau_gpuobj_ref(NULL, &cctx->mem);
-
-	atomic_dec(&chan->vm->engref[engine]);
-	chan->engctx[engine] = NULL;
-	kfree(cctx);
-}
-
-static int
-nv98_crypt_object_new(struct nouveau_channel *chan, int engine,
-		     u32 handle, u16 class)
-{
-	struct nv98_crypt_chan *cctx = chan->engctx[engine];
-
-	/* fuc engine doesn't need an object, our ramht code does.. */
-	cctx->mem->engine = 5;
-	cctx->mem->class  = class;
-	return nouveau_ramht_insert(chan, handle, cctx->mem);
-}
-
-static void
-nv98_crypt_tlb_flush(struct drm_device *dev, int engine)
-{
-	nv50_vm_flush_engine(dev, 0x0a);
-}
-
-static int
-nv98_crypt_fini(struct drm_device *dev, int engine, bool suspend)
-{
-	nv_mask(dev, 0x000200, 0x00004000, 0x00000000);
-	return 0;
-}
-
-static int
-nv98_crypt_init(struct drm_device *dev, int engine)
-{
-	int i;
-
-	/* reset! */
-	nv_mask(dev, 0x000200, 0x00004000, 0x00000000);
-	nv_mask(dev, 0x000200, 0x00004000, 0x00004000);
-
-	/* wait for exit interrupt to signal */
-	nv_wait(dev, 0x087008, 0x00000010, 0x00000010);
-	nv_wr32(dev, 0x087004, 0x00000010);
-
-	/* upload microcode code and data segments */
-	nv_wr32(dev, 0x087ff8, 0x00100000);
-	for (i = 0; i < ARRAY_SIZE(nv98_pcrypt_code); i++)
-		nv_wr32(dev, 0x087ff4, nv98_pcrypt_code[i]);
-
-	nv_wr32(dev, 0x087ff8, 0x00000000);
-	for (i = 0; i < ARRAY_SIZE(nv98_pcrypt_data); i++)
-		nv_wr32(dev, 0x087ff4, nv98_pcrypt_data[i]);
-
-	/* start it running */
-	nv_wr32(dev, 0x08710c, 0x00000000);
-	nv_wr32(dev, 0x087104, 0x00000000); /* ENTRY */
-	nv_wr32(dev, 0x087100, 0x00000002); /* TRIGGER */
-	return 0;
-}
-
-static struct nouveau_enum nv98_crypt_isr_error_name[] = {
-	{ 0x0000, "ILLEGAL_MTHD" },
-	{ 0x0001, "INVALID_BITFIELD" },
-	{ 0x0002, "INVALID_ENUM" },
-	{ 0x0003, "QUERY" },
-	{}
-};
-
-static void
-nv98_crypt_isr(struct drm_device *dev)
-{
-	u32 disp = nv_rd32(dev, 0x08701c);
-	u32 stat = nv_rd32(dev, 0x087008) & disp & ~(disp >> 16);
-	u32 inst = nv_rd32(dev, 0x087050) & 0x3fffffff;
-	u32 ssta = nv_rd32(dev, 0x087040) & 0x0000ffff;
-	u32 addr = nv_rd32(dev, 0x087040) >> 16;
-	u32 mthd = (addr & 0x07ff) << 2;
-	u32 subc = (addr & 0x3800) >> 11;
-	u32 data = nv_rd32(dev, 0x087044);
-	int chid = nv50_graph_isr_chid(dev, inst);
-
-	if (stat & 0x00000040) {
-		NV_INFO(dev, "PCRYPT: DISPATCH_ERROR [");
-		nouveau_enum_print(nv98_crypt_isr_error_name, ssta);
-		printk("] ch %d [0x%08x] subc %d mthd 0x%04x data 0x%08x\n",
-			chid, inst, subc, mthd, data);
-		nv_wr32(dev, 0x087004, 0x00000040);
-		stat &= ~0x00000040;
-	}
-
-	if (stat) {
-		NV_INFO(dev, "PCRYPT: unhandled intr 0x%08x\n", stat);
-		nv_wr32(dev, 0x087004, stat);
-	}
-
-	nv50_fb_vm_trap(dev, 1);
-}
-
-static void
-nv98_crypt_destroy(struct drm_device *dev, int engine)
-{
-	struct nv98_crypt_priv *priv = nv_engine(dev, engine);
-
-	nouveau_irq_unregister(dev, 14);
-	NVOBJ_ENGINE_DEL(dev, CRYPT);
-	kfree(priv);
-}
-
-int
-nv98_crypt_create(struct drm_device *dev)
-{
-	struct nv98_crypt_priv *priv;
-
-	priv = kzalloc(sizeof(*priv), GFP_KERNEL);
-	if (!priv)
-		return -ENOMEM;
-
-	priv->base.destroy = nv98_crypt_destroy;
-	priv->base.init = nv98_crypt_init;
-	priv->base.fini = nv98_crypt_fini;
-	priv->base.context_new = nv98_crypt_context_new;
-	priv->base.context_del = nv98_crypt_context_del;
-	priv->base.object_new = nv98_crypt_object_new;
-	priv->base.tlb_flush = nv98_crypt_tlb_flush;
-
-	nouveau_irq_register(dev, 14, nv98_crypt_isr);
-
-	NVOBJ_ENGINE_ADD(dev, CRYPT, &priv->base);
-	NVOBJ_CLASS(dev, 0x88b4, CRYPT);
-	return 0;
-}
diff --git a/drivers/gpu/drm/nouveau/nva3_copy.c b/drivers/gpu/drm/nouveau/nva3_copy.c
deleted file mode 100644
index 7801cbd057fa..000000000000
--- a/drivers/gpu/drm/nouveau/nva3_copy.c
+++ /dev/null
@@ -1,203 +0,0 @@
-/*
- * Copyright 2011 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Ben Skeggs
- */
-
-#include <linux/firmware.h>
-#include <drm/drmP.h>
-#include "nouveau_drv.h"
-#include "nouveau_util.h"
-#include "nouveau_vm.h"
-#include "nouveau_ramht.h"
-#include "nva3_copy.fuc.h"
-
-struct nva3_copy_engine {
-	struct nouveau_exec_engine base;
-};
-
-static int
-nva3_copy_context_new(struct nouveau_channel *chan, int engine)
-{
-	struct drm_device *dev = chan->dev;
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct nouveau_gpuobj *ramin = chan->ramin;
-	struct nouveau_gpuobj *ctx = NULL;
-	int ret;
-
-	NV_DEBUG(dev, "ch%d\n", chan->id);
-
-	ret = nouveau_gpuobj_new(dev, chan, 256, 0, NVOBJ_FLAG_ZERO_ALLOC |
-				 NVOBJ_FLAG_ZERO_FREE, &ctx);
-	if (ret)
-		return ret;
-
-	nv_wo32(ramin, 0xc0, 0x00190000);
-	nv_wo32(ramin, 0xc4, ctx->vinst + ctx->size - 1);
-	nv_wo32(ramin, 0xc8, ctx->vinst);
-	nv_wo32(ramin, 0xcc, 0x00000000);
-	nv_wo32(ramin, 0xd0, 0x00000000);
-	nv_wo32(ramin, 0xd4, 0x00000000);
-	dev_priv->engine.instmem.flush(dev);
-
-	atomic_inc(&chan->vm->engref[engine]);
-	chan->engctx[engine] = ctx;
-	return 0;
-}
-
-static int
-nva3_copy_object_new(struct nouveau_channel *chan, int engine,
-		     u32 handle, u16 class)
-{
-	struct nouveau_gpuobj *ctx = chan->engctx[engine];
-
-	/* fuc engine doesn't need an object, our ramht code does.. */
-	ctx->engine = 3;
-	ctx->class  = class;
-	return nouveau_ramht_insert(chan, handle, ctx);
-}
-
-static void
-nva3_copy_context_del(struct nouveau_channel *chan, int engine)
-{
-	struct nouveau_gpuobj *ctx = chan->engctx[engine];
-	int i;
-
-	for (i = 0xc0; i <= 0xd4; i += 4)
-		nv_wo32(chan->ramin, i, 0x00000000);
-
-	atomic_dec(&chan->vm->engref[engine]);
-	nouveau_gpuobj_ref(NULL, &ctx);
-	chan->engctx[engine] = ctx;
-}
-
-static void
-nva3_copy_tlb_flush(struct drm_device *dev, int engine)
-{
-	nv50_vm_flush_engine(dev, 0x0d);
-}
-
-static int
-nva3_copy_init(struct drm_device *dev, int engine)
-{
-	int i;
-
-	nv_mask(dev, 0x000200, 0x00002000, 0x00000000);
-	nv_mask(dev, 0x000200, 0x00002000, 0x00002000);
-	nv_wr32(dev, 0x104014, 0xffffffff); /* disable all interrupts */
-
-	/* upload ucode */
-	nv_wr32(dev, 0x1041c0, 0x01000000);
-	for (i = 0; i < sizeof(nva3_pcopy_data) / 4; i++)
-		nv_wr32(dev, 0x1041c4, nva3_pcopy_data[i]);
-
-	nv_wr32(dev, 0x104180, 0x01000000);
-	for (i = 0; i < sizeof(nva3_pcopy_code) / 4; i++) {
-		if ((i & 0x3f) == 0)
-			nv_wr32(dev, 0x104188, i >> 6);
-		nv_wr32(dev, 0x104184, nva3_pcopy_code[i]);
-	}
-
-	/* start it running */
-	nv_wr32(dev, 0x10410c, 0x00000000);
-	nv_wr32(dev, 0x104104, 0x00000000); /* ENTRY */
-	nv_wr32(dev, 0x104100, 0x00000002); /* TRIGGER */
-	return 0;
-}
-
-static int
-nva3_copy_fini(struct drm_device *dev, int engine, bool suspend)
-{
-	nv_mask(dev, 0x104048, 0x00000003, 0x00000000);
-	nv_wr32(dev, 0x104014, 0xffffffff);
-	return 0;
-}
-
-static struct nouveau_enum nva3_copy_isr_error_name[] = {
-	{ 0x0001, "ILLEGAL_MTHD" },
-	{ 0x0002, "INVALID_ENUM" },
-	{ 0x0003, "INVALID_BITFIELD" },
-	{}
-};
-
-static void
-nva3_copy_isr(struct drm_device *dev)
-{
-	u32 dispatch = nv_rd32(dev, 0x10401c);
-	u32 stat = nv_rd32(dev, 0x104008) & dispatch & ~(dispatch >> 16);
-	u32 inst = nv_rd32(dev, 0x104050) & 0x3fffffff;
-	u32 ssta = nv_rd32(dev, 0x104040) & 0x0000ffff;
-	u32 addr = nv_rd32(dev, 0x104040) >> 16;
-	u32 mthd = (addr & 0x07ff) << 2;
-	u32 subc = (addr & 0x3800) >> 11;
-	u32 data = nv_rd32(dev, 0x104044);
-	int chid = nv50_graph_isr_chid(dev, inst);
-
-	if (stat & 0x00000040) {
-		NV_INFO(dev, "PCOPY: DISPATCH_ERROR [");
-		nouveau_enum_print(nva3_copy_isr_error_name, ssta);
-		printk("] ch %d [0x%08x] subc %d mthd 0x%04x data 0x%08x\n",
-			chid, inst, subc, mthd, data);
-		nv_wr32(dev, 0x104004, 0x00000040);
-		stat &= ~0x00000040;
-	}
-
-	if (stat) {
-		NV_INFO(dev, "PCOPY: unhandled intr 0x%08x\n", stat);
-		nv_wr32(dev, 0x104004, stat);
-	}
-	nv50_fb_vm_trap(dev, 1);
-}
-
-static void
-nva3_copy_destroy(struct drm_device *dev, int engine)
-{
-	struct nva3_copy_engine *pcopy = nv_engine(dev, engine);
-
-	nouveau_irq_unregister(dev, 22);
-
-	NVOBJ_ENGINE_DEL(dev, COPY0);
-	kfree(pcopy);
-}
-
-int
-nva3_copy_create(struct drm_device *dev)
-{
-	struct nva3_copy_engine *pcopy;
-
-	pcopy = kzalloc(sizeof(*pcopy), GFP_KERNEL);
-	if (!pcopy)
-		return -ENOMEM;
-
-	pcopy->base.destroy = nva3_copy_destroy;
-	pcopy->base.init = nva3_copy_init;
-	pcopy->base.fini = nva3_copy_fini;
-	pcopy->base.context_new = nva3_copy_context_new;
-	pcopy->base.context_del = nva3_copy_context_del;
-	pcopy->base.object_new = nva3_copy_object_new;
-	pcopy->base.tlb_flush = nva3_copy_tlb_flush;
-
-	nouveau_irq_register(dev, 22, nva3_copy_isr);
-
-	NVOBJ_ENGINE_ADD(dev, COPY0, &pcopy->base);
-	NVOBJ_CLASS(dev, 0x85b5, COPY0);
-	return 0;
-}
diff --git a/drivers/gpu/drm/nouveau/nva3_pm.c b/drivers/gpu/drm/nouveau/nva3_pm.c
index 9258524e4c80..863f010fafeb 100644
--- a/drivers/gpu/drm/nouveau/nva3_pm.c
+++ b/drivers/gpu/drm/nouveau/nva3_pm.c
@@ -23,17 +23,24 @@
  */
 
 #include <drm/drmP.h>
-#include "nouveau_drv.h"
+#include "nouveau_drm.h"
 #include "nouveau_bios.h"
 #include "nouveau_pm.h"
 
+#include <subdev/bios/pll.h>
+#include <subdev/bios.h>
+#include <subdev/clock.h>
+#include <subdev/timer.h>
+#include <subdev/fb.h>
+
 static u32 read_clk(struct drm_device *, int, bool);
 static u32 read_pll(struct drm_device *, int, u32);
 
 static u32
 read_vco(struct drm_device *dev, int clk)
 {
-	u32 sctl = nv_rd32(dev, 0x4120 + (clk * 4));
+	struct nouveau_device *device = nouveau_dev(dev);
+	u32 sctl = nv_rd32(device, 0x4120 + (clk * 4));
 	if ((sctl & 0x00000030) != 0x00000030)
 		return read_pll(dev, 0x41, 0x00e820);
 	return read_pll(dev, 0x42, 0x00e8a0);
@@ -42,26 +49,27 @@ read_vco(struct drm_device *dev, int clk)
 static u32
 read_clk(struct drm_device *dev, int clk, bool ignore_en)
 {
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_device *device = nouveau_dev(dev);
+	struct nouveau_drm *drm = nouveau_drm(dev);
 	u32 sctl, sdiv, sclk;
 
 	/* refclk for the 0xe8xx plls is a fixed frequency */
 	if (clk >= 0x40) {
-		if (dev_priv->chipset == 0xaf) {
+		if (nv_device(drm->device)->chipset == 0xaf) {
 			/* no joke.. seriously.. sigh.. */
-			return nv_rd32(dev, 0x00471c) * 1000;
+			return nv_rd32(device, 0x00471c) * 1000;
 		}
 
-		return dev_priv->crystal;
+		return device->crystal;
 	}
 
-	sctl = nv_rd32(dev, 0x4120 + (clk * 4));
+	sctl = nv_rd32(device, 0x4120 + (clk * 4));
 	if (!ignore_en && !(sctl & 0x00000100))
 		return 0;
 
 	switch (sctl & 0x00003000) {
 	case 0x00000000:
-		return dev_priv->crystal;
+		return device->crystal;
 	case 0x00002000:
 		if (sctl & 0x00000040)
 			return 108000;
@@ -78,12 +86,13 @@ read_clk(struct drm_device *dev, int clk, bool ignore_en)
 static u32
 read_pll(struct drm_device *dev, int clk, u32 pll)
 {
-	u32 ctrl = nv_rd32(dev, pll + 0);
+	struct nouveau_device *device = nouveau_dev(dev);
+	u32 ctrl = nv_rd32(device, pll + 0);
 	u32 sclk = 0, P = 1, N = 1, M = 1;
 
 	if (!(ctrl & 0x00000008)) {
 		if (ctrl & 0x00000001) {
-			u32 coef = nv_rd32(dev, pll + 4);
+			u32 coef = nv_rd32(device, pll + 4);
 			M = (coef & 0x000000ff) >> 0;
 			N = (coef & 0x0000ff00) >> 8;
 			P = (coef & 0x003f0000) >> 16;
@@ -111,7 +120,10 @@ struct creg {
 static int
 calc_clk(struct drm_device *dev, int clk, u32 pll, u32 khz, struct creg *reg)
 {
-	struct pll_lims limits;
+	struct nouveau_drm *drm = nouveau_drm(dev);
+	struct nouveau_device *device = nouveau_dev(dev);
+	struct nouveau_bios *bios = nouveau_bios(device);
+	struct nvbios_pll limits;
 	u32 oclk, sclk, sdiv;
 	int P, N, M, diff;
 	int ret;
@@ -119,7 +131,7 @@ calc_clk(struct drm_device *dev, int clk, u32 pll, u32 khz, struct creg *reg)
 	reg->pll = 0;
 	reg->clk = 0;
 	if (!khz) {
-		NV_DEBUG(dev, "no clock for 0x%04x/0x%02x\n", pll, clk);
+		NV_DEBUG(drm, "no clock for 0x%04x/0x%02x\n", pll, clk);
 		return 0;
 	}
 
@@ -154,14 +166,14 @@ calc_clk(struct drm_device *dev, int clk, u32 pll, u32 khz, struct creg *reg)
 		}
 
 		if (!pll) {
-			NV_ERROR(dev, "bad freq %02x: %d %d\n", clk, khz, sclk);
+			NV_ERROR(drm, "bad freq %02x: %d %d\n", clk, khz, sclk);
 			return -ERANGE;
 		}
 
 		break;
 	}
 
-	ret = get_pll_limits(dev, pll, &limits);
+	ret = nvbios_pll_parse(bios, pll, &limits);
 	if (ret)
 		return ret;
 
@@ -171,54 +183,60 @@ calc_clk(struct drm_device *dev, int clk, u32 pll, u32 khz, struct creg *reg)
 
 	ret = nva3_calc_pll(dev, &limits, khz, &N, NULL, &M, &P);
 	if (ret >= 0) {
-		reg->clk = nv_rd32(dev, 0x4120 + (clk * 4));
+		reg->clk = nv_rd32(device, 0x4120 + (clk * 4));
 		reg->pll = (P << 16) | (N << 8) | M;
 	}
+
 	return ret;
 }
 
 static void
 prog_pll(struct drm_device *dev, int clk, u32 pll, struct creg *reg)
 {
+	struct nouveau_device *device = nouveau_dev(dev);
+	struct nouveau_drm *drm = nouveau_drm(dev);
 	const u32 src0 = 0x004120 + (clk * 4);
 	const u32 src1 = 0x004160 + (clk * 4);
 	const u32 ctrl = pll + 0;
 	const u32 coef = pll + 4;
 
 	if (!reg->clk && !reg->pll) {
-		NV_DEBUG(dev, "no clock for %02x\n", clk);
+		NV_DEBUG(drm, "no clock for %02x\n", clk);
 		return;
 	}
 
 	if (reg->pll) {
-		nv_mask(dev, src0, 0x00000101, 0x00000101);
-		nv_wr32(dev, coef, reg->pll);
-		nv_mask(dev, ctrl, 0x00000015, 0x00000015);
-		nv_mask(dev, ctrl, 0x00000010, 0x00000000);
-		nv_wait(dev, ctrl, 0x00020000, 0x00020000);
-		nv_mask(dev, ctrl, 0x00000010, 0x00000010);
-		nv_mask(dev, ctrl, 0x00000008, 0x00000000);
-		nv_mask(dev, src1, 0x00000100, 0x00000000);
-		nv_mask(dev, src1, 0x00000001, 0x00000000);
+		nv_mask(device, src0, 0x00000101, 0x00000101);
+		nv_wr32(device, coef, reg->pll);
+		nv_mask(device, ctrl, 0x00000015, 0x00000015);
+		nv_mask(device, ctrl, 0x00000010, 0x00000000);
+		nv_wait(device, ctrl, 0x00020000, 0x00020000);
+		nv_mask(device, ctrl, 0x00000010, 0x00000010);
+		nv_mask(device, ctrl, 0x00000008, 0x00000000);
+		nv_mask(device, src1, 0x00000100, 0x00000000);
+		nv_mask(device, src1, 0x00000001, 0x00000000);
 	} else {
-		nv_mask(dev, src1, 0x003f3141, 0x00000101 | reg->clk);
-		nv_mask(dev, ctrl, 0x00000018, 0x00000018);
+		nv_mask(device, src1, 0x003f3141, 0x00000101 | reg->clk);
+		nv_mask(device, ctrl, 0x00000018, 0x00000018);
 		udelay(20);
-		nv_mask(dev, ctrl, 0x00000001, 0x00000000);
-		nv_mask(dev, src0, 0x00000100, 0x00000000);
-		nv_mask(dev, src0, 0x00000001, 0x00000000);
+		nv_mask(device, ctrl, 0x00000001, 0x00000000);
+		nv_mask(device, src0, 0x00000100, 0x00000000);
+		nv_mask(device, src0, 0x00000001, 0x00000000);
 	}
 }
 
 static void
 prog_clk(struct drm_device *dev, int clk, struct creg *reg)
 {
+	struct nouveau_device *device = nouveau_dev(dev);
+	struct nouveau_drm *drm = nouveau_drm(dev);
+
 	if (!reg->clk) {
-		NV_DEBUG(dev, "no clock for %02x\n", clk);
+		NV_DEBUG(drm, "no clock for %02x\n", clk);
 		return;
 	}
 
-	nv_mask(dev, 0x004120 + (clk * 4), 0x003f3141, 0x00000101 | reg->clk);
+	nv_mask(device, 0x004120 + (clk * 4), 0x003f3141, 0x00000101 | reg->clk);
 }
 
 int
@@ -309,10 +327,11 @@ static bool
 nva3_pm_grcp_idle(void *data)
 {
 	struct drm_device *dev = data;
+	struct nouveau_device *device = nouveau_dev(dev);
 
-	if (!(nv_rd32(dev, 0x400304) & 0x00000001))
+	if (!(nv_rd32(device, 0x400304) & 0x00000001))
 		return true;
-	if (nv_rd32(dev, 0x400308) == 0x0050001c)
+	if (nv_rd32(device, 0x400308) == 0x0050001c)
 		return true;
 	return false;
 }
@@ -320,85 +339,91 @@ nva3_pm_grcp_idle(void *data)
 static void
 mclk_precharge(struct nouveau_mem_exec_func *exec)
 {
-	nv_wr32(exec->dev, 0x1002d4, 0x00000001);
+	struct nouveau_device *device = nouveau_dev(exec->dev);
+	nv_wr32(device, 0x1002d4, 0x00000001);
 }
 
 static void
 mclk_refresh(struct nouveau_mem_exec_func *exec)
 {
-	nv_wr32(exec->dev, 0x1002d0, 0x00000001);
+	struct nouveau_device *device = nouveau_dev(exec->dev);
+	nv_wr32(device, 0x1002d0, 0x00000001);
 }
 
 static void
 mclk_refresh_auto(struct nouveau_mem_exec_func *exec, bool enable)
 {
-	nv_wr32(exec->dev, 0x100210, enable ? 0x80000000 : 0x00000000);
+	struct nouveau_device *device = nouveau_dev(exec->dev);
+	nv_wr32(device, 0x100210, enable ? 0x80000000 : 0x00000000);
 }
 
 static void
 mclk_refresh_self(struct nouveau_mem_exec_func *exec, bool enable)
 {
-	nv_wr32(exec->dev, 0x1002dc, enable ? 0x00000001 : 0x00000000);
+	struct nouveau_device *device = nouveau_dev(exec->dev);
+	nv_wr32(device, 0x1002dc, enable ? 0x00000001 : 0x00000000);
 }
 
 static void
 mclk_wait(struct nouveau_mem_exec_func *exec, u32 nsec)
 {
-	volatile u32 post = nv_rd32(exec->dev, 0); (void)post;
+	struct nouveau_device *device = nouveau_dev(exec->dev);
+	volatile u32 post = nv_rd32(device, 0); (void)post;
 	udelay((nsec + 500) / 1000);
 }
 
 static u32
 mclk_mrg(struct nouveau_mem_exec_func *exec, int mr)
 {
+	struct nouveau_device *device = nouveau_dev(exec->dev);
 	if (mr <= 1)
-		return nv_rd32(exec->dev, 0x1002c0 + ((mr - 0) * 4));
+		return nv_rd32(device, 0x1002c0 + ((mr - 0) * 4));
 	if (mr <= 3)
-		return nv_rd32(exec->dev, 0x1002e0 + ((mr - 2) * 4));
+		return nv_rd32(device, 0x1002e0 + ((mr - 2) * 4));
 	return 0;
 }
 
 static void
 mclk_mrs(struct nouveau_mem_exec_func *exec, int mr, u32 data)
 {
-	struct drm_nouveau_private *dev_priv = exec->dev->dev_private;
-
+	struct nouveau_device *device = nouveau_dev(exec->dev);
+	struct nouveau_fb *pfb = nouveau_fb(device);
 	if (mr <= 1) {
-		if (dev_priv->vram_rank_B)
-			nv_wr32(exec->dev, 0x1002c8 + ((mr - 0) * 4), data);
-		nv_wr32(exec->dev, 0x1002c0 + ((mr - 0) * 4), data);
+		if (pfb->ram.ranks > 1)
+			nv_wr32(device, 0x1002c8 + ((mr - 0) * 4), data);
+		nv_wr32(device, 0x1002c0 + ((mr - 0) * 4), data);
 	} else
 	if (mr <= 3) {
-		if (dev_priv->vram_rank_B)
-			nv_wr32(exec->dev, 0x1002e8 + ((mr - 2) * 4), data);
-		nv_wr32(exec->dev, 0x1002e0 + ((mr - 2) * 4), data);
+		if (pfb->ram.ranks > 1)
+			nv_wr32(device, 0x1002e8 + ((mr - 2) * 4), data);
+		nv_wr32(device, 0x1002e0 + ((mr - 2) * 4), data);
 	}
 }
 
 static void
 mclk_clock_set(struct nouveau_mem_exec_func *exec)
 {
-	struct drm_device *dev = exec->dev;
+	struct nouveau_device *device = nouveau_dev(exec->dev);
 	struct nva3_pm_state *info = exec->priv;
 	u32 ctrl;
 
-	ctrl = nv_rd32(dev, 0x004000);
+	ctrl = nv_rd32(device, 0x004000);
 	if (!(ctrl & 0x00000008) && info->mclk.pll) {
-		nv_wr32(dev, 0x004000, (ctrl |=  0x00000008));
-		nv_mask(dev, 0x1110e0, 0x00088000, 0x00088000);
-		nv_wr32(dev, 0x004018, 0x00001000);
-		nv_wr32(dev, 0x004000, (ctrl &= ~0x00000001));
-		nv_wr32(dev, 0x004004, info->mclk.pll);
-		nv_wr32(dev, 0x004000, (ctrl |=  0x00000001));
+		nv_wr32(device, 0x004000, (ctrl |=  0x00000008));
+		nv_mask(device, 0x1110e0, 0x00088000, 0x00088000);
+		nv_wr32(device, 0x004018, 0x00001000);
+		nv_wr32(device, 0x004000, (ctrl &= ~0x00000001));
+		nv_wr32(device, 0x004004, info->mclk.pll);
+		nv_wr32(device, 0x004000, (ctrl |=  0x00000001));
 		udelay(64);
-		nv_wr32(dev, 0x004018, 0x00005000 | info->r004018);
+		nv_wr32(device, 0x004018, 0x00005000 | info->r004018);
 		udelay(20);
 	} else
 	if (!info->mclk.pll) {
-		nv_mask(dev, 0x004168, 0x003f3040, info->mclk.clk);
-		nv_wr32(dev, 0x004000, (ctrl |= 0x00000008));
-		nv_mask(dev, 0x1110e0, 0x00088000, 0x00088000);
-		nv_wr32(dev, 0x004018, 0x0000d000 | info->r004018);
+		nv_mask(device, 0x004168, 0x003f3040, info->mclk.clk);
+		nv_wr32(device, 0x004000, (ctrl |= 0x00000008));
+		nv_mask(device, 0x1110e0, 0x00088000, 0x00088000);
+		nv_wr32(device, 0x004018, 0x0000d000 | info->r004018);
 	}
 
 	if (info->rammap) {
@@ -410,67 +435,68 @@ mclk_clock_set(struct nouveau_mem_exec_func *exec)
 				     (info->ramcfg[3] & 0x0f) << 16 |
 				     (info->ramcfg[9] & 0x0f) |
 				     0x80000000;
-			nv_wr32(dev, 0x1005a0, unk5a0);
-			nv_wr32(dev, 0x1005a4, unk5a4);
-			nv_wr32(dev, 0x10f804, unk804);
-			nv_mask(dev, 0x10053c, 0x00001000, 0x00000000);
+			nv_wr32(device, 0x1005a0, unk5a0);
+			nv_wr32(device, 0x1005a4, unk5a4);
+			nv_wr32(device, 0x10f804, unk804);
+			nv_mask(device, 0x10053c, 0x00001000, 0x00000000);
 		} else {
-			nv_mask(dev, 0x10053c, 0x00001000, 0x00001000);
-			nv_mask(dev, 0x10f804, 0x80000000, 0x00000000);
-			nv_mask(dev, 0x100760, 0x22222222, info->r100760);
-			nv_mask(dev, 0x1007a0, 0x22222222, info->r100760);
-			nv_mask(dev, 0x1007e0, 0x22222222, info->r100760);
+			nv_mask(device, 0x10053c, 0x00001000, 0x00001000);
+			nv_mask(device, 0x10f804, 0x80000000, 0x00000000);
+			nv_mask(device, 0x100760, 0x22222222, info->r100760);
+			nv_mask(device, 0x1007a0, 0x22222222, info->r100760);
+			nv_mask(device, 0x1007e0, 0x22222222, info->r100760);
 		}
 	}
 
 	if (info->mclk.pll) {
-		nv_mask(dev, 0x1110e0, 0x00088000, 0x00011000);
-		nv_wr32(dev, 0x004000, (ctrl &= ~0x00000008));
+		nv_mask(device, 0x1110e0, 0x00088000, 0x00011000);
+		nv_wr32(device, 0x004000, (ctrl &= ~0x00000008));
 	}
 }
 
 static void
 mclk_timing_set(struct nouveau_mem_exec_func *exec)
 {
-	struct drm_device *dev = exec->dev;
+	struct nouveau_device *device = nouveau_dev(exec->dev);
 	struct nva3_pm_state *info = exec->priv;
 	struct nouveau_pm_level *perflvl = info->perflvl;
 	int i;
 
 	for (i = 0; i < 9; i++)
-		nv_wr32(dev, 0x100220 + (i * 4), perflvl->timing.reg[i]);
+		nv_wr32(device, 0x100220 + (i * 4), perflvl->timing.reg[i]);
 
 	if (info->ramcfg) {
 		u32 data = (info->ramcfg[2] & 0x08) ? 0x00000000 : 0x00001000;
-		nv_mask(dev, 0x100200, 0x00001000, data);
+		nv_mask(device, 0x100200, 0x00001000, data);
 	}
 
 	if (info->ramcfg) {
-		u32 unk714 = nv_rd32(dev, 0x100714) & ~0xf0000010;
-		u32 unk718 = nv_rd32(dev, 0x100718) & ~0x00000100;
-		u32 unk71c = nv_rd32(dev, 0x10071c) & ~0x00000100;
+		u32 unk714 = nv_rd32(device, 0x100714) & ~0xf0000010;
+		u32 unk718 = nv_rd32(device, 0x100718) & ~0x00000100;
+		u32 unk71c = nv_rd32(device, 0x10071c) & ~0x00000100;
 		if ( (info->ramcfg[2] & 0x20))
 			unk714 |= 0xf0000000;
 		if (!(info->ramcfg[2] & 0x04))
 			unk714 |= 0x00000010;
-		nv_wr32(dev, 0x100714, unk714);
+		nv_wr32(device, 0x100714, unk714);
 
 		if (info->ramcfg[2] & 0x01)
 			unk71c |= 0x00000100;
-		nv_wr32(dev, 0x10071c, unk71c);
+		nv_wr32(device, 0x10071c, unk71c);
 
 		if (info->ramcfg[2] & 0x02)
 			unk718 |= 0x00000100;
-		nv_wr32(dev, 0x100718, unk718);
+		nv_wr32(device, 0x100718, unk718);
 
 		if (info->ramcfg[2] & 0x10)
-			nv_wr32(dev, 0x111100, 0x48000000); /*XXX*/
+			nv_wr32(device, 0x111100, 0x48000000); /*XXX*/
 	}
 }
 
 static void
 prog_mem(struct drm_device *dev, struct nva3_pm_state *info)
 {
+	struct nouveau_device *device = nouveau_dev(dev);
 	struct nouveau_mem_exec_func exec = {
 		.dev = dev,
 		.precharge = mclk_precharge,
@@ -492,17 +518,17 @@ prog_mem(struct drm_device *dev, struct nva3_pm_state *info)
 		info->r100760 = 0x22222222;
 	}
 
-	ctrl = nv_rd32(dev, 0x004000);
+	ctrl = nv_rd32(device, 0x004000);
 	if (ctrl & 0x00000008) {
 		if (info->mclk.pll) {
-			nv_mask(dev, 0x004128, 0x00000101, 0x00000101);
-			nv_wr32(dev, 0x004004, info->mclk.pll);
-			nv_wr32(dev, 0x004000, (ctrl |= 0x00000001));
-			nv_wr32(dev, 0x004000, (ctrl &= 0xffffffef));
-			nv_wait(dev, 0x004000, 0x00020000, 0x00020000);
-			nv_wr32(dev, 0x004000, (ctrl |= 0x00000010));
-			nv_wr32(dev, 0x004018, 0x00005000 | info->r004018);
-			nv_wr32(dev, 0x004000, (ctrl |= 0x00000004));
+			nv_mask(device, 0x004128, 0x00000101, 0x00000101);
+			nv_wr32(device, 0x004004, info->mclk.pll);
+			nv_wr32(device, 0x004000, (ctrl |= 0x00000001));
+			nv_wr32(device, 0x004000, (ctrl &= 0xffffffef));
+			nv_wait(device, 0x004000, 0x00020000, 0x00020000);
+			nv_wr32(device, 0x004000, (ctrl |= 0x00000010));
+			nv_wr32(device, 0x004018, 0x00005000 | info->r004018);
+			nv_wr32(device, 0x004000, (ctrl |= 0x00000004));
 		}
 	} else {
 		u32 ssel = 0x00000101;
@@ -510,68 +536,67 @@ prog_mem(struct drm_device *dev, struct nva3_pm_state *info)
 			ssel |= info->mclk.clk;
 		else
 			ssel |= 0x00080000; /* 324MHz, shouldn't matter... */
-		nv_mask(dev, 0x004168, 0x003f3141, ctrl);
+		nv_mask(device, 0x004168, 0x003f3141, ctrl);
 	}
 
 	if (info->ramcfg) {
 		if (info->ramcfg[2] & 0x10) {
-			nv_mask(dev, 0x111104, 0x00000600, 0x00000000);
+			nv_mask(device, 0x111104, 0x00000600, 0x00000000);
 		} else {
-			nv_mask(dev, 0x111100, 0x40000000, 0x40000000);
-			nv_mask(dev, 0x111104, 0x00000180, 0x00000000);
+			nv_mask(device, 0x111100, 0x40000000, 0x40000000);
+			nv_mask(device, 0x111104, 0x00000180, 0x00000000);
 		}
 	}
 	if (info->rammap && !(info->rammap[4] & 0x02))
-		nv_mask(dev, 0x100200, 0x00000800, 0x00000000);
-	nv_wr32(dev, 0x611200, 0x00003300);
+		nv_mask(device, 0x100200, 0x00000800, 0x00000000);
+	nv_wr32(device, 0x611200, 0x00003300);
 	if (!(info->ramcfg[2] & 0x10))
-		nv_wr32(dev, 0x111100, 0x4c020000); /*XXX*/
+		nv_wr32(device, 0x111100, 0x4c020000); /*XXX*/
 
 	nouveau_mem_exec(&exec, info->perflvl);
 
-	nv_wr32(dev, 0x611200, 0x00003330);
+	nv_wr32(device, 0x611200, 0x00003330);
 	if (info->rammap && (info->rammap[4] & 0x02))
-		nv_mask(dev, 0x100200, 0x00000800, 0x00000800);
+		nv_mask(device, 0x100200, 0x00000800, 0x00000800);
 	if (info->ramcfg) {
 		if (info->ramcfg[2] & 0x10) {
-			nv_mask(dev, 0x111104, 0x00000180, 0x00000180);
-			nv_mask(dev, 0x111100, 0x40000000, 0x00000000);
+			nv_mask(device, 0x111104, 0x00000180, 0x00000180);
+			nv_mask(device, 0x111100, 0x40000000, 0x00000000);
 		} else {
-			nv_mask(dev, 0x111104, 0x00000600, 0x00000600);
+			nv_mask(device, 0x111104, 0x00000600, 0x00000600);
 		}
 	}
 
 	if (info->mclk.pll) {
-		nv_mask(dev, 0x004168, 0x00000001, 0x00000000);
-		nv_mask(dev, 0x004168, 0x00000100, 0x00000000);
+		nv_mask(device, 0x004168, 0x00000001, 0x00000000);
+		nv_mask(device, 0x004168, 0x00000100, 0x00000000);
 	} else {
-		nv_mask(dev, 0x004000, 0x00000001, 0x00000000);
-		nv_mask(dev, 0x004128, 0x00000001, 0x00000000);
-		nv_mask(dev, 0x004128, 0x00000100, 0x00000000);
+		nv_mask(device, 0x004000, 0x00000001, 0x00000000);
+		nv_mask(device, 0x004128, 0x00000001, 0x00000000);
+		nv_mask(device, 0x004128, 0x00000100, 0x00000000);
 	}
 }
 
 int
 nva3_pm_clocks_set(struct drm_device *dev, void *pre_state)
 {
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_device *device = nouveau_dev(dev);
+	struct nouveau_drm *drm = nouveau_drm(dev);
 	struct nva3_pm_state *info = pre_state;
-	unsigned long flags;
 	int ret = -EAGAIN;
 
 	/* prevent any new grctx switches from starting */
-	spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
-	nv_wr32(dev, 0x400324, 0x00000000);
-	nv_wr32(dev, 0x400328, 0x0050001c); /* wait flag 0x1c */
+	nv_wr32(device, 0x400324, 0x00000000);
+	nv_wr32(device, 0x400328, 0x0050001c); /* wait flag 0x1c */
 	/* wait for any pending grctx switches to complete */
-	if (!nv_wait_cb(dev, nva3_pm_grcp_idle, dev)) {
-		NV_ERROR(dev, "pm: ctxprog didn't go idle\n");
+	if (!nv_wait_cb(device, nva3_pm_grcp_idle, dev)) {
+		NV_ERROR(drm, "pm: ctxprog didn't go idle\n");
 		goto cleanup;
 	}
 	/* freeze PFIFO */
-	nv_mask(dev, 0x002504, 0x00000001, 0x00000001);
-	if (!nv_wait(dev, 0x002504, 0x00000010, 0x00000010)) {
-		NV_ERROR(dev, "pm: fifo didn't go idle\n");
+	nv_mask(device, 0x002504, 0x00000001, 0x00000001);
+	if (!nv_wait(device, 0x002504, 0x00000010, 0x00000010)) {
+		NV_ERROR(drm, "pm: fifo didn't go idle\n");
 		goto cleanup;
 	}
 
@@ -587,14 +612,13 @@ nva3_pm_clocks_set(struct drm_device *dev, void *pre_state)
 
 cleanup:
 	/* unfreeze PFIFO */
-	nv_mask(dev, 0x002504, 0x00000001, 0x00000000);
+	nv_mask(device, 0x002504, 0x00000001, 0x00000000);
 	/* restore ctxprog to normal */
-	nv_wr32(dev, 0x400324, 0x00000000);
-	nv_wr32(dev, 0x400328, 0x0070009c); /* set flag 0x1c */
+	nv_wr32(device, 0x400324, 0x00000000);
+	nv_wr32(device, 0x400328, 0x0070009c); /* set flag 0x1c */
 	/* unblock it if necessary */
-	if (nv_rd32(dev, 0x400308) == 0x0050001c)
-		nv_mask(dev, 0x400824, 0x10000000, 0x10000000);
-	spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
+	if (nv_rd32(device, 0x400308) == 0x0050001c)
+		nv_mask(device, 0x400824, 0x10000000, 0x10000000);
 	kfree(info);
 	return ret;
 }
diff --git a/drivers/gpu/drm/nouveau/nvc0_copy.c b/drivers/gpu/drm/nouveau/nvc0_copy.c
deleted file mode 100644
index 88a922d60822..000000000000
--- a/drivers/gpu/drm/nouveau/nvc0_copy.c
+++ /dev/null
@@ -1,243 +0,0 @@
-/*
- * Copyright 2011 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Ben Skeggs
- */
-
-#include <linux/firmware.h>
-#include <drm/drmP.h>
-#include "nouveau_drv.h"
-#include "nouveau_util.h"
-#include "nouveau_vm.h"
-#include "nouveau_ramht.h"
-#include "nvc0_copy.fuc.h"
-
-struct nvc0_copy_engine {
-	struct nouveau_exec_engine base;
-	u32 irq;
-	u32 pmc;
-	u32 fuc;
-	u32 ctx;
-};
-
-static int
-nvc0_copy_context_new(struct nouveau_channel *chan, int engine)
-{
-	struct nvc0_copy_engine *pcopy = nv_engine(chan->dev, engine);
-	struct drm_device *dev = chan->dev;
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct nouveau_gpuobj *ramin = chan->ramin;
-	struct nouveau_gpuobj *ctx = NULL;
-	int ret;
-
-	ret = nouveau_gpuobj_new(dev, chan, 256, 256,
-				 NVOBJ_FLAG_VM | NVOBJ_FLAG_VM_USER |
-				 NVOBJ_FLAG_ZERO_ALLOC, &ctx);
-	if (ret)
-		return ret;
-
-	nv_wo32(ramin, pcopy->ctx + 0, lower_32_bits(ctx->linst));
-	nv_wo32(ramin, pcopy->ctx + 4, upper_32_bits(ctx->linst));
-	dev_priv->engine.instmem.flush(dev);
-
-	chan->engctx[engine] = ctx;
-	return 0;
-}
-
-static int
-nvc0_copy_object_new(struct nouveau_channel *chan, int engine,
-		     u32 handle, u16 class)
-{
-	return 0;
-}
-
-static void
-nvc0_copy_context_del(struct nouveau_channel *chan, int engine)
-{
-	struct nvc0_copy_engine *pcopy = nv_engine(chan->dev, engine);
-	struct nouveau_gpuobj *ctx = chan->engctx[engine];
-	struct drm_device *dev = chan->dev;
-	u32 inst;
-
-	inst  = (chan->ramin->vinst >> 12);
-	inst |= 0x40000000;
-
-	/* disable fifo access */
-	nv_wr32(dev, pcopy->fuc + 0x048, 0x00000000);
-	/* mark channel as unloaded if it's currently active */
-	if (nv_rd32(dev, pcopy->fuc + 0x050) == inst)
-		nv_mask(dev, pcopy->fuc + 0x050, 0x40000000, 0x00000000);
-	/* mark next channel as invalid if it's about to be loaded */
-	if (nv_rd32(dev, pcopy->fuc + 0x054) == inst)
-		nv_mask(dev, pcopy->fuc + 0x054, 0x40000000, 0x00000000);
-	/* restore fifo access */
-	nv_wr32(dev, pcopy->fuc + 0x048, 0x00000003);
-
-	nv_wo32(chan->ramin, pcopy->ctx + 0, 0x00000000);
-	nv_wo32(chan->ramin, pcopy->ctx + 4, 0x00000000);
-	nouveau_gpuobj_ref(NULL, &ctx);
-
-	chan->engctx[engine] = ctx;
-}
-
-static int
-nvc0_copy_init(struct drm_device *dev, int engine)
-{
-	struct nvc0_copy_engine *pcopy = nv_engine(dev, engine);
-	int i;
-
-	nv_mask(dev, 0x000200, pcopy->pmc, 0x00000000);
-	nv_mask(dev, 0x000200, pcopy->pmc, pcopy->pmc);
-	nv_wr32(dev, pcopy->fuc + 0x014, 0xffffffff);
-
-	nv_wr32(dev, pcopy->fuc + 0x1c0, 0x01000000);
-	for (i = 0; i < sizeof(nvc0_pcopy_data) / 4; i++)
-		nv_wr32(dev, pcopy->fuc + 0x1c4, nvc0_pcopy_data[i]);
-
-	nv_wr32(dev, pcopy->fuc + 0x180, 0x01000000);
-	for (i = 0; i < sizeof(nvc0_pcopy_code) / 4; i++) {
-		if ((i & 0x3f) == 0)
-			nv_wr32(dev, pcopy->fuc + 0x188, i >> 6);
-		nv_wr32(dev, pcopy->fuc + 0x184, nvc0_pcopy_code[i]);
-	}
-
-	nv_wr32(dev, pcopy->fuc + 0x084, engine - NVOBJ_ENGINE_COPY0);
-	nv_wr32(dev, pcopy->fuc + 0x10c, 0x00000000);
-	nv_wr32(dev, pcopy->fuc + 0x104, 0x00000000); /* ENTRY */
-	nv_wr32(dev, pcopy->fuc + 0x100, 0x00000002); /* TRIGGER */
-	return 0;
-}
-
-static int
-nvc0_copy_fini(struct drm_device *dev, int engine, bool suspend)
-{
-	struct nvc0_copy_engine *pcopy = nv_engine(dev, engine);
-
-	nv_mask(dev, pcopy->fuc + 0x048, 0x00000003, 0x00000000);
-
-	/* trigger fuc context unload */
-	nv_wait(dev, pcopy->fuc + 0x008, 0x0000000c, 0x00000000);
-	nv_mask(dev, pcopy->fuc + 0x054, 0x40000000, 0x00000000);
-	nv_wr32(dev, pcopy->fuc + 0x000, 0x00000008);
-	nv_wait(dev, pcopy->fuc + 0x008, 0x00000008, 0x00000000);
-
-	nv_wr32(dev, pcopy->fuc + 0x014, 0xffffffff);
-	return 0;
-}
-
-static struct nouveau_enum nvc0_copy_isr_error_name[] = {
-	{ 0x0001, "ILLEGAL_MTHD" },
-	{ 0x0002, "INVALID_ENUM" },
-	{ 0x0003, "INVALID_BITFIELD" },
-	{}
-};
-
-static void
-nvc0_copy_isr(struct drm_device *dev, int engine)
-{
-	struct nvc0_copy_engine *pcopy = nv_engine(dev, engine);
-	u32 disp = nv_rd32(dev, pcopy->fuc + 0x01c);
-	u32 stat = nv_rd32(dev, pcopy->fuc + 0x008) & disp & ~(disp >> 16);
-	u64 inst = (u64)(nv_rd32(dev, pcopy->fuc + 0x050) & 0x0fffffff) << 12;
-	u32 chid = nvc0_graph_isr_chid(dev, inst);
-	u32 ssta = nv_rd32(dev, pcopy->fuc + 0x040) & 0x0000ffff;
-	u32 addr = nv_rd32(dev, pcopy->fuc + 0x040) >> 16;
-	u32 mthd = (addr & 0x07ff) << 2;
-	u32 subc = (addr & 0x3800) >> 11;
-	u32 data = nv_rd32(dev, pcopy->fuc + 0x044);
-
-	if (stat & 0x00000040) {
-		NV_INFO(dev, "PCOPY: DISPATCH_ERROR [");
-		nouveau_enum_print(nvc0_copy_isr_error_name, ssta);
-		printk("] ch %d [0x%010llx] subc %d mthd 0x%04x data 0x%08x\n",
-			chid, inst, subc, mthd, data);
-		nv_wr32(dev, pcopy->fuc + 0x004, 0x00000040);
-		stat &= ~0x00000040;
-	}
-
-	if (stat) {
-		NV_INFO(dev, "PCOPY: unhandled intr 0x%08x\n", stat);
-		nv_wr32(dev, pcopy->fuc + 0x004, stat);
-	}
-}
-
-static void
-nvc0_copy_isr_0(struct drm_device *dev)
-{
-	nvc0_copy_isr(dev, NVOBJ_ENGINE_COPY0);
-}
-
-static void
-nvc0_copy_isr_1(struct drm_device *dev)
-{
-	nvc0_copy_isr(dev, NVOBJ_ENGINE_COPY1);
-}
-
-static void
-nvc0_copy_destroy(struct drm_device *dev, int engine)
-{
-	struct nvc0_copy_engine *pcopy = nv_engine(dev, engine);
-
-	nouveau_irq_unregister(dev, pcopy->irq);
-
-	if (engine == NVOBJ_ENGINE_COPY0)
-		NVOBJ_ENGINE_DEL(dev, COPY0);
-	else
-		NVOBJ_ENGINE_DEL(dev, COPY1);
-	kfree(pcopy);
-}
-
-int
-nvc0_copy_create(struct drm_device *dev, int engine)
-{
-	struct nvc0_copy_engine *pcopy;
-
-	pcopy = kzalloc(sizeof(*pcopy), GFP_KERNEL);
-	if (!pcopy)
-		return -ENOMEM;
-
-	pcopy->base.destroy = nvc0_copy_destroy;
-	pcopy->base.init = nvc0_copy_init;
-	pcopy->base.fini = nvc0_copy_fini;
-	pcopy->base.context_new = nvc0_copy_context_new;
-	pcopy->base.context_del = nvc0_copy_context_del;
-	pcopy->base.object_new = nvc0_copy_object_new;
-
-	if (engine == 0) {
-		pcopy->irq = 5;
-		pcopy->pmc = 0x00000040;
-		pcopy->fuc = 0x104000;
-		pcopy->ctx = 0x0230;
-		nouveau_irq_register(dev, pcopy->irq, nvc0_copy_isr_0);
-		NVOBJ_ENGINE_ADD(dev, COPY0, &pcopy->base);
-		NVOBJ_CLASS(dev, 0x90b5, COPY0);
-	} else {
-		pcopy->irq = 6;
-		pcopy->pmc = 0x00000080;
-		pcopy->fuc = 0x105000;
-		pcopy->ctx = 0x0240;
-		nouveau_irq_register(dev, pcopy->irq, nvc0_copy_isr_1);
-		NVOBJ_ENGINE_ADD(dev, COPY1, &pcopy->base);
-		NVOBJ_CLASS(dev, 0x90b8, COPY1);
-	}
-
-	return 0;
-}
diff --git a/drivers/gpu/drm/nouveau/nvc0_fb.c b/drivers/gpu/drm/nouveau/nvc0_fb.c
deleted file mode 100644
index 7da32a9ef08e..000000000000
--- a/drivers/gpu/drm/nouveau/nvc0_fb.c
+++ /dev/null
@@ -1,134 +0,0 @@
-/*
- * Copyright 2011 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Ben Skeggs
- */
-
-#include <drm/drmP.h>
-#include "nouveau_drv.h"
-#include <drm/nouveau_drm.h>
-
-struct nvc0_fb_priv {
-	struct page *r100c10_page;
-	dma_addr_t r100c10;
-};
-
-static inline void
-nvc0_mfb_subp_isr(struct drm_device *dev, int unit, int subp)
-{
-	u32 subp_base = 0x141000 + (unit * 0x2000) + (subp * 0x400);
-	u32 stat = nv_rd32(dev, subp_base + 0x020);
-
-	if (stat) {
-		NV_INFO(dev, "PMFB%d_SUBP%d: 0x%08x\n", unit, subp, stat);
-		nv_wr32(dev, subp_base + 0x020, stat);
-	}
-}
-
-static void
-nvc0_mfb_isr(struct drm_device *dev)
-{
-	u32 units = nv_rd32(dev, 0x00017c);
-	while (units) {
-		u32 subp, unit = ffs(units) - 1;
-		for (subp = 0; subp < 2; subp++)
-			nvc0_mfb_subp_isr(dev, unit, subp);
-		units &= ~(1 << unit);
-	}
-
-	/* we do something horribly wrong and upset PMFB a lot, so mask off
-	 * interrupts from it after the first one until it's fixed
-	 */
-	nv_mask(dev, 0x000640, 0x02000000, 0x00000000);
-}
-
-static void
-nvc0_fb_destroy(struct drm_device *dev)
-{
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct nouveau_fb_engine *pfb = &dev_priv->engine.fb;
-	struct nvc0_fb_priv *priv = pfb->priv;
-
-	nouveau_irq_unregister(dev, 25);
-
-	if (priv->r100c10_page) {
-		pci_unmap_page(dev->pdev, priv->r100c10, PAGE_SIZE,
-			       PCI_DMA_BIDIRECTIONAL);
-		__free_page(priv->r100c10_page);
-	}
-
-	kfree(priv);
-	pfb->priv = NULL;
-}
-
-static int
-nvc0_fb_create(struct drm_device *dev)
-{
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct nouveau_fb_engine *pfb = &dev_priv->engine.fb;
-	struct nvc0_fb_priv *priv;
-
-	priv = kzalloc(sizeof(*priv), GFP_KERNEL);
-	if (!priv)
-		return -ENOMEM;
-	pfb->priv = priv;
-
-	priv->r100c10_page = alloc_page(GFP_KERNEL | __GFP_ZERO);
-	if (!priv->r100c10_page) {
-		nvc0_fb_destroy(dev);
-		return -ENOMEM;
-	}
-
-	priv->r100c10 = pci_map_page(dev->pdev, priv->r100c10_page, 0,
-				     PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
-	if (pci_dma_mapping_error(dev->pdev, priv->r100c10)) {
-		nvc0_fb_destroy(dev);
-		return -EFAULT;
-	}
-
-	nouveau_irq_register(dev, 25, nvc0_mfb_isr);
-	return 0;
-}
-
-int
-nvc0_fb_init(struct drm_device *dev)
-{
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct nvc0_fb_priv *priv;
-	int ret;
-
-	if (!dev_priv->engine.fb.priv) {
-		ret = nvc0_fb_create(dev);
-		if (ret)
-			return ret;
-	}
-	priv = dev_priv->engine.fb.priv;
-
-	nv_wr32(dev, 0x100c10, priv->r100c10 >> 8);
-	nv_mask(dev, 0x17e820, 0x00100000, 0x00000000); /* NV_PLTCG_INTR_EN */
-	return 0;
-}
-
-void
-nvc0_fb_takedown(struct drm_device *dev)
-{
-	nvc0_fb_destroy(dev);
-}
diff --git a/drivers/gpu/drm/nouveau/nvc0_fbcon.c b/drivers/gpu/drm/nouveau/nvc0_fbcon.c
index ade005fa9de1..9dcd30f3e1e0 100644
--- a/drivers/gpu/drm/nouveau/nvc0_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nvc0_fbcon.c
@@ -22,20 +22,16 @@
  * Authors: Ben Skeggs
  */
 
-#include <drm/drmP.h>
-#include "nouveau_drv.h"
+#include "nouveau_drm.h"
 #include "nouveau_dma.h"
-#include "nouveau_ramht.h"
 #include "nouveau_fbcon.h"
-#include "nouveau_mm.h"
 
 int
 nvc0_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
 {
 	struct nouveau_fbdev *nfbdev = info->par;
-	struct drm_device *dev = nfbdev->dev;
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct nouveau_channel *chan = dev_priv->channel;
+	struct nouveau_drm *drm = nouveau_drm(nfbdev->dev);
+	struct nouveau_channel *chan = drm->channel;
 	int ret;
 
 	ret = RING_SPACE(chan, rect->rop == ROP_COPY ? 7 : 11);
@@ -69,9 +65,8 @@ int
 nvc0_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region)
 {
 	struct nouveau_fbdev *nfbdev = info->par;
-	struct drm_device *dev = nfbdev->dev;
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct nouveau_channel *chan = dev_priv->channel;
+	struct nouveau_drm *drm = nouveau_drm(nfbdev->dev);
+	struct nouveau_channel *chan = drm->channel;
 	int ret;
 
 	ret = RING_SPACE(chan, 12);
@@ -98,9 +93,8 @@ int
 nvc0_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
 {
 	struct nouveau_fbdev *nfbdev = info->par;
-	struct drm_device *dev = nfbdev->dev;
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct nouveau_channel *chan = dev_priv->channel;
+	struct nouveau_drm *drm = nouveau_drm(nfbdev->dev);
+	struct nouveau_channel *chan = drm->channel;
 	uint32_t width, dwords, *data = (uint32_t *)image->data;
 	uint32_t mask = ~(~0 >> (32 - info->var.bits_per_pixel));
 	uint32_t *palette = info->pseudo_palette;
@@ -157,12 +151,14 @@ nvc0_fbcon_accel_init(struct fb_info *info)
 {
 	struct nouveau_fbdev *nfbdev = info->par;
 	struct drm_device *dev = nfbdev->dev;
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct nouveau_channel *chan = dev_priv->channel;
 	struct nouveau_framebuffer *fb = &nfbdev->nouveau_fb;
+	struct nouveau_drm *drm = nouveau_drm(dev);
+	struct nouveau_channel *chan = drm->channel;
+	struct nouveau_object *object;
 	int ret, format;
 
-	ret = nouveau_gpuobj_gr_new(chan, 0x902d, 0x902d);
+	ret = nouveau_object_new(nv_object(chan->cli), NVDRM_CHAN, Nv2D,
+				 0x902d, NULL, 0, &object);
 	if (ret)
 		return ret;
 
@@ -202,9 +198,6 @@ nvc0_fbcon_accel_init(struct fb_info *info)
 
 	BEGIN_NVC0(chan, NvSub2D, 0x0000, 1);
 	OUT_RING  (chan, 0x0000902d);
-	BEGIN_NVC0(chan, NvSub2D, 0x0104, 2);
-	OUT_RING  (chan, upper_32_bits(chan->notifier_vma.offset));
-	OUT_RING  (chan, lower_32_bits(chan->notifier_vma.offset));
 	BEGIN_NVC0(chan, NvSub2D, 0x0290, 1);
 	OUT_RING  (chan, 0);
 	BEGIN_NVC0(chan, NvSub2D, 0x0888, 1);
diff --git a/drivers/gpu/drm/nouveau/nvc0_fence.c b/drivers/gpu/drm/nouveau/nvc0_fence.c
index 2e666d0c4048..53299eac9676 100644
--- a/drivers/gpu/drm/nouveau/nvc0_fence.c
+++ b/drivers/gpu/drm/nouveau/nvc0_fence.c
@@ -22,29 +22,44 @@
  * Authors: Ben Skeggs
  */
 
-#include <drm/drmP.h>
-#include "nouveau_drv.h"
+#include <core/object.h>
+#include <core/client.h>
+#include <core/class.h>
+
+#include <engine/fifo.h>
+
+#include "nouveau_drm.h"
 #include "nouveau_dma.h"
-#include "nouveau_fifo.h"
-#include "nouveau_ramht.h"
 #include "nouveau_fence.h"
 
+#include "nv50_display.h"
+
 struct nvc0_fence_priv {
 	struct nouveau_fence_priv base;
 	struct nouveau_bo *bo;
+	u32 *suspend;
 };
 
 struct nvc0_fence_chan {
 	struct nouveau_fence_chan base;
 	struct nouveau_vma vma;
+	struct nouveau_vma dispc_vma[4];
 };
 
+u64
+nvc0_fence_crtc(struct nouveau_channel *chan, int crtc)
+{
+	struct nvc0_fence_chan *fctx = chan->fence;
+	return fctx->dispc_vma[crtc].offset;
+}
+
 static int
 nvc0_fence_emit(struct nouveau_fence *fence)
 {
 	struct nouveau_channel *chan = fence->channel;
-	struct nvc0_fence_chan *fctx = chan->engctx[NVOBJ_ENGINE_FENCE];
-	u64 addr = fctx->vma.offset + chan->id * 16;
+	struct nvc0_fence_chan *fctx = chan->fence;
+	struct nouveau_fifo_chan *fifo = (void *)chan->object;
+	u64 addr = fctx->vma.offset + fifo->chid * 16;
 	int ret;
 
 	ret = RING_SPACE(chan, 5);
@@ -64,8 +79,9 @@ static int
 nvc0_fence_sync(struct nouveau_fence *fence,
 		struct nouveau_channel *prev, struct nouveau_channel *chan)
 {
-	struct nvc0_fence_chan *fctx = chan->engctx[NVOBJ_ENGINE_FENCE];
-	u64 addr = fctx->vma.offset + prev->id * 16;
+	struct nvc0_fence_chan *fctx = chan->fence;
+	struct nouveau_fifo_chan *fifo = (void *)prev->object;
+	u64 addr = fctx->vma.offset + fifo->chid * 16;
 	int ret;
 
 	ret = RING_SPACE(chan, 5);
@@ -85,91 +101,135 @@ nvc0_fence_sync(struct nouveau_fence *fence,
 static u32
 nvc0_fence_read(struct nouveau_channel *chan)
 {
-	struct nvc0_fence_priv *priv = nv_engine(chan->dev, NVOBJ_ENGINE_FENCE);
-	return nouveau_bo_rd32(priv->bo, chan->id * 16/4);
+	struct nouveau_fifo_chan *fifo = (void *)chan->object;
+	struct nvc0_fence_priv *priv = chan->drm->fence;
+	return nouveau_bo_rd32(priv->bo, fifo->chid * 16/4);
 }
 
 static void
-nvc0_fence_context_del(struct nouveau_channel *chan, int engine)
+nvc0_fence_context_del(struct nouveau_channel *chan)
 {
-	struct nvc0_fence_priv *priv = nv_engine(chan->dev, engine);
-	struct nvc0_fence_chan *fctx = chan->engctx[engine];
+	struct drm_device *dev = chan->drm->dev;
+	struct nvc0_fence_priv *priv = chan->drm->fence;
+	struct nvc0_fence_chan *fctx = chan->fence;
+	int i;
+
+	if (nv_device(chan->drm->device)->card_type >= NV_D0) {
+		for (i = 0; i < dev->mode_config.num_crtc; i++) {
+			struct nouveau_bo *bo = nvd0_display_crtc_sema(dev, i);
+			nouveau_bo_vma_del(bo, &fctx->dispc_vma[i]);
+		}
+	} else
+	if (nv_device(chan->drm->device)->card_type >= NV_50) {
+		for (i = 0; i < dev->mode_config.num_crtc; i++) {
+			struct nouveau_bo *bo = nv50_display_crtc_sema(dev, i);
+			nouveau_bo_vma_del(bo, &fctx->dispc_vma[i]);
+		}
+	}
 
 	nouveau_bo_vma_del(priv->bo, &fctx->vma);
 	nouveau_fence_context_del(&fctx->base);
-	chan->engctx[engine] = NULL;
+	chan->fence = NULL;
 	kfree(fctx);
 }
 
 static int
-nvc0_fence_context_new(struct nouveau_channel *chan, int engine)
+nvc0_fence_context_new(struct nouveau_channel *chan)
 {
-	struct nvc0_fence_priv *priv = nv_engine(chan->dev, engine);
+	struct nouveau_fifo_chan *fifo = (void *)chan->object;
+	struct nouveau_client *client = nouveau_client(fifo);
+	struct nvc0_fence_priv *priv = chan->drm->fence;
 	struct nvc0_fence_chan *fctx;
-	int ret;
+	int ret, i;
 
-	fctx = chan->engctx[engine] = kzalloc(sizeof(*fctx), GFP_KERNEL);
+	fctx = chan->fence = kzalloc(sizeof(*fctx), GFP_KERNEL);
 	if (!fctx)
 		return -ENOMEM;
 
 	nouveau_fence_context_new(&fctx->base);
 
-	ret = nouveau_bo_vma_add(priv->bo, chan->vm, &fctx->vma);
+	ret = nouveau_bo_vma_add(priv->bo, client->vm, &fctx->vma);
 	if (ret)
-		nvc0_fence_context_del(chan, engine);
+		nvc0_fence_context_del(chan);
+
+	/* map display semaphore buffers into channel's vm */
+	for (i = 0; !ret && i < chan->drm->dev->mode_config.num_crtc; i++) {
+		struct nouveau_bo *bo;
+		if (nv_device(chan->drm->device)->card_type >= NV_D0)
+			bo = nvd0_display_crtc_sema(chan->drm->dev, i);
+		else
+			bo = nv50_display_crtc_sema(chan->drm->dev, i);
 
-	nouveau_bo_wr32(priv->bo, chan->id * 16/4, 0x00000000);
+		ret = nouveau_bo_vma_add(bo, client->vm, &fctx->dispc_vma[i]);
+	}
+
+	nouveau_bo_wr32(priv->bo, fifo->chid * 16/4, 0x00000000);
 	return ret;
 }
 
-static int
-nvc0_fence_fini(struct drm_device *dev, int engine, bool suspend)
+static bool
+nvc0_fence_suspend(struct nouveau_drm *drm)
 {
-	return 0;
+	struct nouveau_fifo *pfifo = nouveau_fifo(drm->device);
+	struct nvc0_fence_priv *priv = drm->fence;
+	int i;
+
+	priv->suspend = vmalloc((pfifo->max + 1) * sizeof(u32));
+	if (priv->suspend) {
+		for (i = 0; i <= pfifo->max; i++)
+			priv->suspend[i] = nouveau_bo_rd32(priv->bo, i);
+	}
+
+	return priv->suspend != NULL;
 }
 
-static int
-nvc0_fence_init(struct drm_device *dev, int engine)
+static void
+nvc0_fence_resume(struct nouveau_drm *drm)
 {
-	return 0;
+	struct nouveau_fifo *pfifo = nouveau_fifo(drm->device);
+	struct nvc0_fence_priv *priv = drm->fence;
+	int i;
+
+	if (priv->suspend) {
+		for (i = 0; i <= pfifo->max; i++)
+			nouveau_bo_wr32(priv->bo, i, priv->suspend[i]);
+		vfree(priv->suspend);
+		priv->suspend = NULL;
+	}
 }
 
 static void
-nvc0_fence_destroy(struct drm_device *dev, int engine)
+nvc0_fence_destroy(struct nouveau_drm *drm)
 {
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct nvc0_fence_priv *priv = nv_engine(dev, engine);
-
+	struct nvc0_fence_priv *priv = drm->fence;
 	nouveau_bo_unmap(priv->bo);
 	nouveau_bo_ref(NULL, &priv->bo);
-	dev_priv->eng[engine] = NULL;
+	drm->fence = NULL;
 	kfree(priv);
 }
 
 int
-nvc0_fence_create(struct drm_device *dev)
+nvc0_fence_create(struct nouveau_drm *drm)
 {
-	struct nouveau_fifo_priv *pfifo = nv_engine(dev, NVOBJ_ENGINE_FIFO);
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_fifo *pfifo = nouveau_fifo(drm->device);
 	struct nvc0_fence_priv *priv;
 	int ret;
 
-	priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+	priv = drm->fence = kzalloc(sizeof(*priv), GFP_KERNEL);
 	if (!priv)
 		return -ENOMEM;
 
-	priv->base.engine.destroy = nvc0_fence_destroy;
-	priv->base.engine.init = nvc0_fence_init;
-	priv->base.engine.fini = nvc0_fence_fini;
-	priv->base.engine.context_new = nvc0_fence_context_new;
-	priv->base.engine.context_del = nvc0_fence_context_del;
+	priv->base.dtor = nvc0_fence_destroy;
+	priv->base.suspend = nvc0_fence_suspend;
+	priv->base.resume = nvc0_fence_resume;
+	priv->base.context_new = nvc0_fence_context_new;
+	priv->base.context_del = nvc0_fence_context_del;
 	priv->base.emit = nvc0_fence_emit;
 	priv->base.sync = nvc0_fence_sync;
 	priv->base.read = nvc0_fence_read;
-	dev_priv->eng[NVOBJ_ENGINE_FENCE] = &priv->base.engine;
 
-	ret = nouveau_bo_new(dev, 16 * pfifo->channels, 0, TTM_PL_FLAG_VRAM,
-			     0, 0, NULL, &priv->bo);
+	ret = nouveau_bo_new(drm->dev, 16 * (pfifo->max + 1), 0,
+			     TTM_PL_FLAG_VRAM, 0, 0, NULL, &priv->bo);
 	if (ret == 0) {
 		ret = nouveau_bo_pin(priv->bo, TTM_PL_FLAG_VRAM);
 		if (ret == 0)
@@ -179,6 +239,6 @@ nvc0_fence_create(struct drm_device *dev)
 	}
 
 	if (ret)
-		nvc0_fence_destroy(dev, NVOBJ_ENGINE_FENCE);
+		nvc0_fence_destroy(drm);
 	return ret;
 }
diff --git a/drivers/gpu/drm/nouveau/nvc0_fifo.c b/drivers/gpu/drm/nouveau/nvc0_fifo.c
deleted file mode 100644
index d03ba8631a69..000000000000
--- a/drivers/gpu/drm/nouveau/nvc0_fifo.c
+++ /dev/null
@@ -1,477 +0,0 @@
-/*
- * Copyright 2010 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Ben Skeggs
- */
-
-#include <drm/drmP.h>
-
-#include "nouveau_drv.h"
-#include "nouveau_mm.h"
-#include "nouveau_fifo.h"
-
-static void nvc0_fifo_isr(struct drm_device *);
-
-struct nvc0_fifo_priv {
-	struct nouveau_fifo_priv base;
-	struct nouveau_gpuobj *playlist[2];
-	int cur_playlist;
-	struct nouveau_vma user_vma;
-	int spoon_nr;
-};
-
-struct nvc0_fifo_chan {
-	struct nouveau_fifo_chan base;
-	struct nouveau_gpuobj *user;
-};
-
-static void
-nvc0_fifo_playlist_update(struct drm_device *dev)
-{
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct nouveau_instmem_engine *pinstmem = &dev_priv->engine.instmem;
-	struct nvc0_fifo_priv *priv = nv_engine(dev, NVOBJ_ENGINE_FIFO);
-	struct nouveau_gpuobj *cur;
-	int i, p;
-
-	cur = priv->playlist[priv->cur_playlist];
-	priv->cur_playlist = !priv->cur_playlist;
-
-	for (i = 0, p = 0; i < 128; i++) {
-		if (!(nv_rd32(dev, 0x3004 + (i * 8)) & 1))
-			continue;
-		nv_wo32(cur, p + 0, i);
-		nv_wo32(cur, p + 4, 0x00000004);
-		p += 8;
-	}
-	pinstmem->flush(dev);
-
-	nv_wr32(dev, 0x002270, cur->vinst >> 12);
-	nv_wr32(dev, 0x002274, 0x01f00000 | (p >> 3));
-	if (!nv_wait(dev, 0x00227c, 0x00100000, 0x00000000))
-		NV_ERROR(dev, "PFIFO - playlist update failed\n");
-}
-
-static int
-nvc0_fifo_context_new(struct nouveau_channel *chan, int engine)
-{
-	struct drm_device *dev = chan->dev;
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct nouveau_instmem_engine *pinstmem = &dev_priv->engine.instmem;
-	struct nvc0_fifo_priv *priv = nv_engine(dev, engine);
-	struct nvc0_fifo_chan *fctx;
-	u64 ib_virt = chan->pushbuf_base + chan->dma.ib_base * 4;
-	int ret, i;
-
-	fctx = chan->engctx[engine] = kzalloc(sizeof(*fctx), GFP_KERNEL);
-	if (!fctx)
-		return -ENOMEM;
-
-	chan->user = ioremap_wc(pci_resource_start(dev->pdev, 1) +
-				priv->user_vma.offset + (chan->id * 0x1000),
-				PAGE_SIZE);
-	if (!chan->user) {
-		ret = -ENOMEM;
-		goto error;
-	}
-
-	/* allocate vram for control regs, map into polling area */
-	ret = nouveau_gpuobj_new(dev, NULL, 0x1000, 0x1000,
-				 NVOBJ_FLAG_ZERO_ALLOC, &fctx->user);
-	if (ret)
-		goto error;
-
-	nouveau_vm_map_at(&priv->user_vma, chan->id * 0x1000,
-			  *(struct nouveau_mem **)fctx->user->node);
-
-	for (i = 0; i < 0x100; i += 4)
-		nv_wo32(chan->ramin, i, 0x00000000);
-	nv_wo32(chan->ramin, 0x08, lower_32_bits(fctx->user->vinst));
-	nv_wo32(chan->ramin, 0x0c, upper_32_bits(fctx->user->vinst));
-	nv_wo32(chan->ramin, 0x10, 0x0000face);
-	nv_wo32(chan->ramin, 0x30, 0xfffff902);
-	nv_wo32(chan->ramin, 0x48, lower_32_bits(ib_virt));
-	nv_wo32(chan->ramin, 0x4c, drm_order(chan->dma.ib_max + 1) << 16 |
-				   upper_32_bits(ib_virt));
-	nv_wo32(chan->ramin, 0x54, 0x00000002);
-	nv_wo32(chan->ramin, 0x84, 0x20400000);
-	nv_wo32(chan->ramin, 0x94, 0x30000001);
-	nv_wo32(chan->ramin, 0x9c, 0x00000100);
-	nv_wo32(chan->ramin, 0xa4, 0x1f1f1f1f);
-	nv_wo32(chan->ramin, 0xa8, 0x1f1f1f1f);
-	nv_wo32(chan->ramin, 0xac, 0x0000001f);
-	nv_wo32(chan->ramin, 0xb8, 0xf8000000);
-	nv_wo32(chan->ramin, 0xf8, 0x10003080); /* 0x002310 */
-	nv_wo32(chan->ramin, 0xfc, 0x10000010); /* 0x002350 */
-	pinstmem->flush(dev);
-
-	nv_wr32(dev, 0x003000 + (chan->id * 8), 0xc0000000 |
-						(chan->ramin->vinst >> 12));
-	nv_wr32(dev, 0x003004 + (chan->id * 8), 0x001f0001);
-	nvc0_fifo_playlist_update(dev);
-
-error:
-	if (ret)
-		priv->base.base.context_del(chan, engine);
-	return ret;
-}
-
-static void
-nvc0_fifo_context_del(struct nouveau_channel *chan, int engine)
-{
-	struct nvc0_fifo_chan *fctx = chan->engctx[engine];
-	struct drm_device *dev = chan->dev;
-
-	nv_mask(dev, 0x003004 + (chan->id * 8), 0x00000001, 0x00000000);
-	nv_wr32(dev, 0x002634, chan->id);
-	if (!nv_wait(dev, 0x0002634, 0xffffffff, chan->id))
-		NV_WARN(dev, "0x2634 != chid: 0x%08x\n", nv_rd32(dev, 0x2634));
-	nvc0_fifo_playlist_update(dev);
-	nv_wr32(dev, 0x003000 + (chan->id * 8), 0x00000000);
-
-	nouveau_gpuobj_ref(NULL, &fctx->user);
-	if (chan->user) {
-		iounmap(chan->user);
-		chan->user = NULL;
-	}
-
-	chan->engctx[engine] = NULL;
-	kfree(fctx);
-}
-
-static int
-nvc0_fifo_init(struct drm_device *dev, int engine)
-{
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct nvc0_fifo_priv *priv = nv_engine(dev, engine);
-	struct nouveau_channel *chan;
-	int i;
-
-	/* reset PFIFO, enable all available PSUBFIFO areas */
-	nv_mask(dev, 0x000200, 0x00000100, 0x00000000);
-	nv_mask(dev, 0x000200, 0x00000100, 0x00000100);
-	nv_wr32(dev, 0x000204, 0xffffffff);
-	nv_wr32(dev, 0x002204, 0xffffffff);
-
-	priv->spoon_nr = hweight32(nv_rd32(dev, 0x002204));
-	NV_DEBUG(dev, "PFIFO: %d subfifo(s)\n", priv->spoon_nr);
-
-	/* assign engines to subfifos */
-	if (priv->spoon_nr >= 3) {
-		nv_wr32(dev, 0x002208, ~(1 << 0)); /* PGRAPH */
-		nv_wr32(dev, 0x00220c, ~(1 << 1)); /* PVP */
-		nv_wr32(dev, 0x002210, ~(1 << 1)); /* PPP */
-		nv_wr32(dev, 0x002214, ~(1 << 1)); /* PBSP */
-		nv_wr32(dev, 0x002218, ~(1 << 2)); /* PCE0 */
-		nv_wr32(dev, 0x00221c, ~(1 << 1)); /* PCE1 */
-	}
-
-	/* PSUBFIFO[n] */
-	for (i = 0; i < priv->spoon_nr; i++) {
-		nv_mask(dev, 0x04013c + (i * 0x2000), 0x10000100, 0x00000000);
-		nv_wr32(dev, 0x040108 + (i * 0x2000), 0xffffffff); /* INTR */
-		nv_wr32(dev, 0x04010c + (i * 0x2000), 0xfffffeff); /* INTR_EN */
-	}
-
-	nv_mask(dev, 0x002200, 0x00000001, 0x00000001);
-	nv_wr32(dev, 0x002254, 0x10000000 | priv->user_vma.offset >> 12);
-
-	nv_wr32(dev, 0x002a00, 0xffffffff); /* clears PFIFO.INTR bit 30 */
-	nv_wr32(dev, 0x002100, 0xffffffff);
-	nv_wr32(dev, 0x002140, 0xbfffffff);
-
-	/* restore PFIFO context table */
-	for (i = 0; i < 128; i++) {
-		chan = dev_priv->channels.ptr[i];
-		if (!chan || !chan->engctx[engine])
-			continue;
-
-		nv_wr32(dev, 0x003000 + (i * 8), 0xc0000000 |
-						 (chan->ramin->vinst >> 12));
-		nv_wr32(dev, 0x003004 + (i * 8), 0x001f0001);
-	}
-	nvc0_fifo_playlist_update(dev);
-
-	return 0;
-}
-
-static int
-nvc0_fifo_fini(struct drm_device *dev, int engine, bool suspend)
-{
-	int i;
-
-	for (i = 0; i < 128; i++) {
-		if (!(nv_rd32(dev, 0x003004 + (i * 8)) & 1))
-			continue;
-
-		nv_mask(dev, 0x003004 + (i * 8), 0x00000001, 0x00000000);
-		nv_wr32(dev, 0x002634, i);
-		if (!nv_wait(dev, 0x002634, 0xffffffff, i)) {
-			NV_INFO(dev, "PFIFO: kick ch %d failed: 0x%08x\n",
-				i, nv_rd32(dev, 0x002634));
-			return -EBUSY;
-		}
-	}
-
-	nv_wr32(dev, 0x002140, 0x00000000);
-	return 0;
-}
-
-
-struct nouveau_enum nvc0_fifo_fault_unit[] = {
-	{ 0x00, "PGRAPH" },
-	{ 0x03, "PEEPHOLE" },
-	{ 0x04, "BAR1" },
-	{ 0x05, "BAR3" },
-	{ 0x07, "PFIFO" },
-	{ 0x10, "PBSP" },
-	{ 0x11, "PPPP" },
-	{ 0x13, "PCOUNTER" },
-	{ 0x14, "PVP" },
-	{ 0x15, "PCOPY0" },
-	{ 0x16, "PCOPY1" },
-	{ 0x17, "PDAEMON" },
-	{}
-};
-
-struct nouveau_enum nvc0_fifo_fault_reason[] = {
-	{ 0x00, "PT_NOT_PRESENT" },
-	{ 0x01, "PT_TOO_SHORT" },
-	{ 0x02, "PAGE_NOT_PRESENT" },
-	{ 0x03, "VM_LIMIT_EXCEEDED" },
-	{ 0x04, "NO_CHANNEL" },
-	{ 0x05, "PAGE_SYSTEM_ONLY" },
-	{ 0x06, "PAGE_READ_ONLY" },
-	{ 0x0a, "COMPRESSED_SYSRAM" },
-	{ 0x0c, "INVALID_STORAGE_TYPE" },
-	{}
-};
-
-struct nouveau_enum nvc0_fifo_fault_hubclient[] = {
-	{ 0x01, "PCOPY0" },
-	{ 0x02, "PCOPY1" },
-	{ 0x04, "DISPATCH" },
-	{ 0x05, "CTXCTL" },
-	{ 0x06, "PFIFO" },
-	{ 0x07, "BAR_READ" },
-	{ 0x08, "BAR_WRITE" },
-	{ 0x0b, "PVP" },
-	{ 0x0c, "PPPP" },
-	{ 0x0d, "PBSP" },
-	{ 0x11, "PCOUNTER" },
-	{ 0x12, "PDAEMON" },
-	{ 0x14, "CCACHE" },
-	{ 0x15, "CCACHE_POST" },
-	{}
-};
-
-struct nouveau_enum nvc0_fifo_fault_gpcclient[] = {
-	{ 0x01, "TEX" },
-	{ 0x0c, "ESETUP" },
-	{ 0x0e, "CTXCTL" },
-	{ 0x0f, "PROP" },
-	{}
-};
-
-struct nouveau_bitfield nvc0_fifo_subfifo_intr[] = {
-/*	{ 0x00008000, "" }	seen with null ib push */
-	{ 0x00200000, "ILLEGAL_MTHD" },
-	{ 0x00800000, "EMPTY_SUBC" },
-	{}
-};
-
-static void
-nvc0_fifo_isr_vm_fault(struct drm_device *dev, int unit)
-{
-	u32 inst = nv_rd32(dev, 0x2800 + (unit * 0x10));
-	u32 valo = nv_rd32(dev, 0x2804 + (unit * 0x10));
-	u32 vahi = nv_rd32(dev, 0x2808 + (unit * 0x10));
-	u32 stat = nv_rd32(dev, 0x280c + (unit * 0x10));
-	u32 client = (stat & 0x00001f00) >> 8;
-
-	NV_INFO(dev, "PFIFO: %s fault at 0x%010llx [",
-		(stat & 0x00000080) ? "write" : "read", (u64)vahi << 32 | valo);
-	nouveau_enum_print(nvc0_fifo_fault_reason, stat & 0x0000000f);
-	printk("] from ");
-	nouveau_enum_print(nvc0_fifo_fault_unit, unit);
-	if (stat & 0x00000040) {
-		printk("/");
-		nouveau_enum_print(nvc0_fifo_fault_hubclient, client);
-	} else {
-		printk("/GPC%d/", (stat & 0x1f000000) >> 24);
-		nouveau_enum_print(nvc0_fifo_fault_gpcclient, client);
-	}
-	printk(" on channel 0x%010llx\n", (u64)inst << 12);
-}
-
-static int
-nvc0_fifo_page_flip(struct drm_device *dev, u32 chid)
-{
-	struct nvc0_fifo_priv *priv = nv_engine(dev, NVOBJ_ENGINE_FIFO);
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct nouveau_channel *chan = NULL;
-	unsigned long flags;
-	int ret = -EINVAL;
-
-	spin_lock_irqsave(&dev_priv->channels.lock, flags);
-	if (likely(chid >= 0 && chid < priv->base.channels)) {
-		chan = dev_priv->channels.ptr[chid];
-		if (likely(chan))
-			ret = nouveau_finish_page_flip(chan, NULL);
-	}
-	spin_unlock_irqrestore(&dev_priv->channels.lock, flags);
-	return ret;
-}
-
-static void
-nvc0_fifo_isr_subfifo_intr(struct drm_device *dev, int unit)
-{
-	u32 stat = nv_rd32(dev, 0x040108 + (unit * 0x2000));
-	u32 addr = nv_rd32(dev, 0x0400c0 + (unit * 0x2000));
-	u32 data = nv_rd32(dev, 0x0400c4 + (unit * 0x2000));
-	u32 chid = nv_rd32(dev, 0x040120 + (unit * 0x2000)) & 0x7f;
-	u32 subc = (addr & 0x00070000);
-	u32 mthd = (addr & 0x00003ffc);
-	u32 show = stat;
-
-	if (stat & 0x00200000) {
-		if (mthd == 0x0054) {
-			if (!nvc0_fifo_page_flip(dev, chid))
-				show &= ~0x00200000;
-		}
-	}
-
-	if (show) {
-		NV_INFO(dev, "PFIFO%d:", unit);
-		nouveau_bitfield_print(nvc0_fifo_subfifo_intr, show);
-		NV_INFO(dev, "PFIFO%d: ch %d subc %d mthd 0x%04x data 0x%08x\n",
-			     unit, chid, subc, mthd, data);
-	}
-
-	nv_wr32(dev, 0x0400c0 + (unit * 0x2000), 0x80600008);
-	nv_wr32(dev, 0x040108 + (unit * 0x2000), stat);
-}
-
-static void
-nvc0_fifo_isr(struct drm_device *dev)
-{
-	u32 mask = nv_rd32(dev, 0x002140);
-	u32 stat = nv_rd32(dev, 0x002100) & mask;
-
-	if (stat & 0x00000100) {
-		NV_INFO(dev, "PFIFO: unknown status 0x00000100\n");
-		nv_wr32(dev, 0x002100, 0x00000100);
-		stat &= ~0x00000100;
-	}
-
-	if (stat & 0x10000000) {
-		u32 units = nv_rd32(dev, 0x00259c);
-		u32 u = units;
-
-		while (u) {
-			int i = ffs(u) - 1;
-			nvc0_fifo_isr_vm_fault(dev, i);
-			u &= ~(1 << i);
-		}
-
-		nv_wr32(dev, 0x00259c, units);
-		stat &= ~0x10000000;
-	}
-
-	if (stat & 0x20000000) {
-		u32 units = nv_rd32(dev, 0x0025a0);
-		u32 u = units;
-
-		while (u) {
-			int i = ffs(u) - 1;
-			nvc0_fifo_isr_subfifo_intr(dev, i);
-			u &= ~(1 << i);
-		}
-
-		nv_wr32(dev, 0x0025a0, units);
-		stat &= ~0x20000000;
-	}
-
-	if (stat & 0x40000000) {
-		NV_INFO(dev, "PFIFO: unknown status 0x40000000\n");
-		nv_mask(dev, 0x002a00, 0x00000000, 0x00000000);
-		stat &= ~0x40000000;
-	}
-
-	if (stat) {
-		NV_INFO(dev, "PFIFO: unhandled status 0x%08x\n", stat);
-		nv_wr32(dev, 0x002100, stat);
-		nv_wr32(dev, 0x002140, 0);
-	}
-}
-
-static void
-nvc0_fifo_destroy(struct drm_device *dev, int engine)
-{
-	struct nvc0_fifo_priv *priv = nv_engine(dev, NVOBJ_ENGINE_FIFO);
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-
-	nouveau_vm_put(&priv->user_vma);
-	nouveau_gpuobj_ref(NULL, &priv->playlist[1]);
-	nouveau_gpuobj_ref(NULL, &priv->playlist[0]);
-
-	dev_priv->eng[engine] = NULL;
-	kfree(priv);
-}
-
-int
-nvc0_fifo_create(struct drm_device *dev)
-{
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct nvc0_fifo_priv *priv;
-	int ret;
-
-	priv = kzalloc(sizeof(*priv), GFP_KERNEL);
-	if (!priv)
-		return -ENOMEM;
-
-	priv->base.base.destroy = nvc0_fifo_destroy;
-	priv->base.base.init = nvc0_fifo_init;
-	priv->base.base.fini = nvc0_fifo_fini;
-	priv->base.base.context_new = nvc0_fifo_context_new;
-	priv->base.base.context_del = nvc0_fifo_context_del;
-	priv->base.channels = 128;
-	dev_priv->eng[NVOBJ_ENGINE_FIFO] = &priv->base.base;
-
-	ret = nouveau_gpuobj_new(dev, NULL, 4096, 4096, 0, &priv->playlist[0]);
-	if (ret)
-		goto error;
-
-	ret = nouveau_gpuobj_new(dev, NULL, 4096, 4096, 0, &priv->playlist[1]);
-	if (ret)
-		goto error;
-
-	ret = nouveau_vm_get(dev_priv->bar1_vm, priv->base.channels * 0x1000,
-			     12, NV_MEM_ACCESS_RW, &priv->user_vma);
-	if (ret)
-		goto error;
-
-	nouveau_irq_register(dev, 8, nvc0_fifo_isr);
-error:
-	if (ret)
-		priv->base.base.destroy(dev, NVOBJ_ENGINE_FIFO);
-	return ret;
-}
diff --git a/drivers/gpu/drm/nouveau/nvc0_graph.c b/drivers/gpu/drm/nouveau/nvc0_graph.c
deleted file mode 100644
index 59670acad7b9..000000000000
--- a/drivers/gpu/drm/nouveau/nvc0_graph.c
+++ /dev/null
@@ -1,897 +0,0 @@
-/*
- * Copyright 2010 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Ben Skeggs
- */
-
-#include <linux/firmware.h>
-#include <linux/module.h>
-
-#include <drm/drmP.h>
-
-#include "nouveau_drv.h"
-#include "nouveau_mm.h"
-#include "nouveau_fifo.h"
-
-#include "nvc0_graph.h"
-#include "nvc0_grhub.fuc.h"
-#include "nvc0_grgpc.fuc.h"
-
-static void
-nvc0_graph_ctxctl_debug_unit(struct drm_device *dev, u32 base)
-{
-	NV_INFO(dev, "PGRAPH: %06x - done 0x%08x\n", base,
-		nv_rd32(dev, base + 0x400));
-	NV_INFO(dev, "PGRAPH: %06x - stat 0x%08x 0x%08x 0x%08x 0x%08x\n", base,
-		nv_rd32(dev, base + 0x800), nv_rd32(dev, base + 0x804),
-		nv_rd32(dev, base + 0x808), nv_rd32(dev, base + 0x80c));
-	NV_INFO(dev, "PGRAPH: %06x - stat 0x%08x 0x%08x 0x%08x 0x%08x\n", base,
-		nv_rd32(dev, base + 0x810), nv_rd32(dev, base + 0x814),
-		nv_rd32(dev, base + 0x818), nv_rd32(dev, base + 0x81c));
-}
-
-static void
-nvc0_graph_ctxctl_debug(struct drm_device *dev)
-{
-	u32 gpcnr = nv_rd32(dev, 0x409604) & 0xffff;
-	u32 gpc;
-
-	nvc0_graph_ctxctl_debug_unit(dev, 0x409000);
-	for (gpc = 0; gpc < gpcnr; gpc++)
-		nvc0_graph_ctxctl_debug_unit(dev, 0x502000 + (gpc * 0x8000));
-}
-
-static int
-nvc0_graph_load_context(struct nouveau_channel *chan)
-{
-	struct drm_device *dev = chan->dev;
-
-	nv_wr32(dev, 0x409840, 0x00000030);
-	nv_wr32(dev, 0x409500, 0x80000000 | chan->ramin->vinst >> 12);
-	nv_wr32(dev, 0x409504, 0x00000003);
-	if (!nv_wait(dev, 0x409800, 0x00000010, 0x00000010))
-		NV_ERROR(dev, "PGRAPH: load_ctx timeout\n");
-
-	return 0;
-}
-
-static int
-nvc0_graph_unload_context_to(struct drm_device *dev, u64 chan)
-{
-	nv_wr32(dev, 0x409840, 0x00000003);
-	nv_wr32(dev, 0x409500, 0x80000000 | chan >> 12);
-	nv_wr32(dev, 0x409504, 0x00000009);
-	if (!nv_wait(dev, 0x409800, 0x00000001, 0x00000000)) {
-		NV_ERROR(dev, "PGRAPH: unload_ctx timeout\n");
-		return -EBUSY;
-	}
-
-	return 0;
-}
-
-static int
-nvc0_graph_construct_context(struct nouveau_channel *chan)
-{
-	struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
-	struct nvc0_graph_priv *priv = nv_engine(chan->dev, NVOBJ_ENGINE_GR);
-	struct nvc0_graph_chan *grch = chan->engctx[NVOBJ_ENGINE_GR];
-	struct drm_device *dev = chan->dev;
-	int ret, i;
-	u32 *ctx;
-
-	ctx = kmalloc(priv->grctx_size, GFP_KERNEL);
-	if (!ctx)
-		return -ENOMEM;
-
-	if (!nouveau_ctxfw) {
-		nv_wr32(dev, 0x409840, 0x80000000);
-		nv_wr32(dev, 0x409500, 0x80000000 | chan->ramin->vinst >> 12);
-		nv_wr32(dev, 0x409504, 0x00000001);
-		if (!nv_wait(dev, 0x409800, 0x80000000, 0x80000000)) {
-			NV_ERROR(dev, "PGRAPH: HUB_SET_CHAN timeout\n");
-			nvc0_graph_ctxctl_debug(dev);
-			ret = -EBUSY;
-			goto err;
-		}
-	} else {
-		nvc0_graph_load_context(chan);
-
-		nv_wo32(grch->grctx, 0x1c, 1);
-		nv_wo32(grch->grctx, 0x20, 0);
-		nv_wo32(grch->grctx, 0x28, 0);
-		nv_wo32(grch->grctx, 0x2c, 0);
-		dev_priv->engine.instmem.flush(dev);
-	}
-
-	ret = nvc0_grctx_generate(chan);
-	if (ret)
-		goto err;
-
-	if (!nouveau_ctxfw) {
-		nv_wr32(dev, 0x409840, 0x80000000);
-		nv_wr32(dev, 0x409500, 0x80000000 | chan->ramin->vinst >> 12);
-		nv_wr32(dev, 0x409504, 0x00000002);
-		if (!nv_wait(dev, 0x409800, 0x80000000, 0x80000000)) {
-			NV_ERROR(dev, "PGRAPH: HUB_CTX_SAVE timeout\n");
-			nvc0_graph_ctxctl_debug(dev);
-			ret = -EBUSY;
-			goto err;
-		}
-	} else {
-		ret = nvc0_graph_unload_context_to(dev, chan->ramin->vinst);
-		if (ret)
-			goto err;
-	}
-
-	for (i = 0; i < priv->grctx_size; i += 4)
-		ctx[i / 4] = nv_ro32(grch->grctx, i);
-
-	priv->grctx_vals = ctx;
-	return 0;
-
-err:
-	kfree(ctx);
-	return ret;
-}
-
-static int
-nvc0_graph_create_context_mmio_list(struct nouveau_channel *chan)
-{
-	struct nvc0_graph_priv *priv = nv_engine(chan->dev, NVOBJ_ENGINE_GR);
-	struct nvc0_graph_chan *grch = chan->engctx[NVOBJ_ENGINE_GR];
-	struct drm_device *dev = chan->dev;
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	int i = 0, gpc, tp, ret;
-
-	ret = nouveau_gpuobj_new(dev, chan, 0x2000, 256, NVOBJ_FLAG_VM,
-				 &grch->unk408004);
-	if (ret)
-		return ret;
-
-	ret = nouveau_gpuobj_new(dev, chan, 0x8000, 256, NVOBJ_FLAG_VM,
-				 &grch->unk40800c);
-	if (ret)
-		return ret;
-
-	ret = nouveau_gpuobj_new(dev, chan, 384 * 1024, 4096,
-				 NVOBJ_FLAG_VM | NVOBJ_FLAG_VM_USER,
-				 &grch->unk418810);
-	if (ret)
-		return ret;
-
-	ret = nouveau_gpuobj_new(dev, chan, 0x1000, 0, NVOBJ_FLAG_VM,
-				 &grch->mmio);
-	if (ret)
-		return ret;
-
-
-	nv_wo32(grch->mmio, i++ * 4, 0x00408004);
-	nv_wo32(grch->mmio, i++ * 4, grch->unk408004->linst >> 8);
-	nv_wo32(grch->mmio, i++ * 4, 0x00408008);
-	nv_wo32(grch->mmio, i++ * 4, 0x80000018);
-
-	nv_wo32(grch->mmio, i++ * 4, 0x0040800c);
-	nv_wo32(grch->mmio, i++ * 4, grch->unk40800c->linst >> 8);
-	nv_wo32(grch->mmio, i++ * 4, 0x00408010);
-	nv_wo32(grch->mmio, i++ * 4, 0x80000000);
-
-	nv_wo32(grch->mmio, i++ * 4, 0x00418810);
-	nv_wo32(grch->mmio, i++ * 4, 0x80000000 | grch->unk418810->linst >> 12);
-	nv_wo32(grch->mmio, i++ * 4, 0x00419848);
-	nv_wo32(grch->mmio, i++ * 4, 0x10000000 | grch->unk418810->linst >> 12);
-
-	nv_wo32(grch->mmio, i++ * 4, 0x00419004);
-	nv_wo32(grch->mmio, i++ * 4, grch->unk40800c->linst >> 8);
-	nv_wo32(grch->mmio, i++ * 4, 0x00419008);
-	nv_wo32(grch->mmio, i++ * 4, 0x00000000);
-
-	nv_wo32(grch->mmio, i++ * 4, 0x00418808);
-	nv_wo32(grch->mmio, i++ * 4, grch->unk408004->linst >> 8);
-	nv_wo32(grch->mmio, i++ * 4, 0x0041880c);
-	nv_wo32(grch->mmio, i++ * 4, 0x80000018);
-
-	if (dev_priv->chipset != 0xc1) {
-		u32 magic = 0x02180000;
-		nv_wo32(grch->mmio, i++ * 4, 0x00405830);
-		nv_wo32(grch->mmio, i++ * 4, magic);
-		for (gpc = 0; gpc < priv->gpc_nr; gpc++) {
-			for (tp = 0; tp < priv->tp_nr[gpc]; tp++) {
-				u32 reg = TP_UNIT(gpc, tp, 0x520);
-				nv_wo32(grch->mmio, i++ * 4, reg);
-				nv_wo32(grch->mmio, i++ * 4, magic);
-				magic += 0x0324;
-			}
-		}
-	} else {
-		u32 magic = 0x02180000;
-		nv_wo32(grch->mmio, i++ * 4, 0x00405830);
-		nv_wo32(grch->mmio, i++ * 4, magic | 0x0000218);
-		nv_wo32(grch->mmio, i++ * 4, 0x004064c4);
-		nv_wo32(grch->mmio, i++ * 4, 0x0086ffff);
-		for (gpc = 0; gpc < priv->gpc_nr; gpc++) {
-			for (tp = 0; tp < priv->tp_nr[gpc]; tp++) {
-				u32 reg = TP_UNIT(gpc, tp, 0x520);
-				nv_wo32(grch->mmio, i++ * 4, reg);
-				nv_wo32(grch->mmio, i++ * 4, (1 << 28) | magic);
-				magic += 0x0324;
-			}
-			for (tp = 0; tp < priv->tp_nr[gpc]; tp++) {
-				u32 reg = TP_UNIT(gpc, tp, 0x544);
-				nv_wo32(grch->mmio, i++ * 4, reg);
-				nv_wo32(grch->mmio, i++ * 4, magic);
-				magic += 0x0324;
-			}
-		}
-	}
-
-	grch->mmio_nr = i / 2;
-	return 0;
-}
-
-static int
-nvc0_graph_context_new(struct nouveau_channel *chan, int engine)
-{
-	struct drm_device *dev = chan->dev;
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct nouveau_instmem_engine *pinstmem = &dev_priv->engine.instmem;
-	struct nvc0_graph_priv *priv = nv_engine(dev, engine);
-	struct nvc0_graph_chan *grch;
-	struct nouveau_gpuobj *grctx;
-	int ret, i;
-
-	grch = kzalloc(sizeof(*grch), GFP_KERNEL);
-	if (!grch)
-		return -ENOMEM;
-	chan->engctx[NVOBJ_ENGINE_GR] = grch;
-
-	ret = nouveau_gpuobj_new(dev, chan, priv->grctx_size, 256,
-				 NVOBJ_FLAG_VM | NVOBJ_FLAG_ZERO_ALLOC,
-				 &grch->grctx);
-	if (ret)
-		goto error;
-	grctx = grch->grctx;
-
-	ret = nvc0_graph_create_context_mmio_list(chan);
-	if (ret)
-		goto error;
-
-	nv_wo32(chan->ramin, 0x0210, lower_32_bits(grctx->linst) | 4);
-	nv_wo32(chan->ramin, 0x0214, upper_32_bits(grctx->linst));
-	pinstmem->flush(dev);
-
-	if (!priv->grctx_vals) {
-		ret = nvc0_graph_construct_context(chan);
-		if (ret)
-			goto error;
-	}
-
-	for (i = 0; i < priv->grctx_size; i += 4)
-		nv_wo32(grctx, i, priv->grctx_vals[i / 4]);
-
-	if (!nouveau_ctxfw) {
-		nv_wo32(grctx, 0x00, grch->mmio_nr);
-		nv_wo32(grctx, 0x04, grch->mmio->linst >> 8);
-	} else {
-		nv_wo32(grctx, 0xf4, 0);
-		nv_wo32(grctx, 0xf8, 0);
-		nv_wo32(grctx, 0x10, grch->mmio_nr);
-		nv_wo32(grctx, 0x14, lower_32_bits(grch->mmio->linst));
-		nv_wo32(grctx, 0x18, upper_32_bits(grch->mmio->linst));
-		nv_wo32(grctx, 0x1c, 1);
-		nv_wo32(grctx, 0x20, 0);
-		nv_wo32(grctx, 0x28, 0);
-		nv_wo32(grctx, 0x2c, 0);
-	}
-	pinstmem->flush(dev);
-	return 0;
-
-error:
-	priv->base.context_del(chan, engine);
-	return ret;
-}
-
-static void
-nvc0_graph_context_del(struct nouveau_channel *chan, int engine)
-{
-	struct nvc0_graph_chan *grch = chan->engctx[engine];
-
-	nouveau_gpuobj_ref(NULL, &grch->mmio);
-	nouveau_gpuobj_ref(NULL, &grch->unk418810);
-	nouveau_gpuobj_ref(NULL, &grch->unk40800c);
-	nouveau_gpuobj_ref(NULL, &grch->unk408004);
-	nouveau_gpuobj_ref(NULL, &grch->grctx);
-	chan->engctx[engine] = NULL;
-}
-
-static int
-nvc0_graph_object_new(struct nouveau_channel *chan, int engine,
-		      u32 handle, u16 class)
-{
-	return 0;
-}
-
-static int
-nvc0_graph_fini(struct drm_device *dev, int engine, bool suspend)
-{
-	return 0;
-}
-
-static void
-nvc0_graph_init_obj418880(struct drm_device *dev)
-{
-	struct nvc0_graph_priv *priv = nv_engine(dev, NVOBJ_ENGINE_GR);
-	int i;
-
-	nv_wr32(dev, GPC_BCAST(0x0880), 0x00000000);
-	nv_wr32(dev, GPC_BCAST(0x08a4), 0x00000000);
-	for (i = 0; i < 4; i++)
-		nv_wr32(dev, GPC_BCAST(0x0888) + (i * 4), 0x00000000);
-	nv_wr32(dev, GPC_BCAST(0x08b4), priv->unk4188b4->vinst >> 8);
-	nv_wr32(dev, GPC_BCAST(0x08b8), priv->unk4188b8->vinst >> 8);
-}
-
-static void
-nvc0_graph_init_regs(struct drm_device *dev)
-{
-	nv_wr32(dev, 0x400080, 0x003083c2);
-	nv_wr32(dev, 0x400088, 0x00006fe7);
-	nv_wr32(dev, 0x40008c, 0x00000000);
-	nv_wr32(dev, 0x400090, 0x00000030);
-	nv_wr32(dev, 0x40013c, 0x013901f7);
-	nv_wr32(dev, 0x400140, 0x00000100);
-	nv_wr32(dev, 0x400144, 0x00000000);
-	nv_wr32(dev, 0x400148, 0x00000110);
-	nv_wr32(dev, 0x400138, 0x00000000);
-	nv_wr32(dev, 0x400130, 0x00000000);
-	nv_wr32(dev, 0x400134, 0x00000000);
-	nv_wr32(dev, 0x400124, 0x00000002);
-}
-
-static void
-nvc0_graph_init_gpc_0(struct drm_device *dev)
-{
-	struct nvc0_graph_priv *priv = nv_engine(dev, NVOBJ_ENGINE_GR);
-	const u32 magicgpc918 = DIV_ROUND_UP(0x00800000, priv->tp_total);
-	u32 data[TP_MAX / 8];
-	u8  tpnr[GPC_MAX];
-	int i, gpc, tpc;
-
-	nv_wr32(dev, TP_UNIT(0, 0, 0x5c), 1); /* affects TFB offset queries */
-
-	/*
-	 *      TP      ROP UNKVAL(magic_not_rop_nr)
-	 * 450: 4/0/0/0 2        3
-	 * 460: 3/4/0/0 4        1
-	 * 465: 3/4/4/0 4        7
-	 * 470: 3/3/4/4 5        5
-	 * 480: 3/4/4/4 6        6
-	 */
-
-	memset(data, 0x00, sizeof(data));
-	memcpy(tpnr, priv->tp_nr, sizeof(priv->tp_nr));
-	for (i = 0, gpc = -1; i < priv->tp_total; i++) {
-		do {
-			gpc = (gpc + 1) % priv->gpc_nr;
-		} while (!tpnr[gpc]);
-		tpc = priv->tp_nr[gpc] - tpnr[gpc]--;
-
-		data[i / 8] |= tpc << ((i % 8) * 4);
-	}
-
-	nv_wr32(dev, GPC_BCAST(0x0980), data[0]);
-	nv_wr32(dev, GPC_BCAST(0x0984), data[1]);
-	nv_wr32(dev, GPC_BCAST(0x0988), data[2]);
-	nv_wr32(dev, GPC_BCAST(0x098c), data[3]);
-
-	for (gpc = 0; gpc < priv->gpc_nr; gpc++) {
-		nv_wr32(dev, GPC_UNIT(gpc, 0x0914), priv->magic_not_rop_nr << 8 |
-						  priv->tp_nr[gpc]);
-		nv_wr32(dev, GPC_UNIT(gpc, 0x0910), 0x00040000 | priv->tp_total);
-		nv_wr32(dev, GPC_UNIT(gpc, 0x0918), magicgpc918);
-	}
-
-	nv_wr32(dev, GPC_BCAST(0x1bd4), magicgpc918);
-	nv_wr32(dev, GPC_BCAST(0x08ac), nv_rd32(dev, 0x100800));
-}
-
-static void
-nvc0_graph_init_units(struct drm_device *dev)
-{
-	nv_wr32(dev, 0x409c24, 0x000f0000);
-	nv_wr32(dev, 0x404000, 0xc0000000); /* DISPATCH */
-	nv_wr32(dev, 0x404600, 0xc0000000); /* M2MF */
-	nv_wr32(dev, 0x408030, 0xc0000000);
-	nv_wr32(dev, 0x40601c, 0xc0000000);
-	nv_wr32(dev, 0x404490, 0xc0000000); /* MACRO */
-	nv_wr32(dev, 0x406018, 0xc0000000);
-	nv_wr32(dev, 0x405840, 0xc0000000);
-	nv_wr32(dev, 0x405844, 0x00ffffff);
-	nv_mask(dev, 0x419cc0, 0x00000008, 0x00000008);
-	nv_mask(dev, 0x419eb4, 0x00001000, 0x00001000);
-}
-
-static void
-nvc0_graph_init_gpc_1(struct drm_device *dev)
-{
-	struct nvc0_graph_priv *priv = nv_engine(dev, NVOBJ_ENGINE_GR);
-	int gpc, tp;
-
-	for (gpc = 0; gpc < priv->gpc_nr; gpc++) {
-		nv_wr32(dev, GPC_UNIT(gpc, 0x0420), 0xc0000000);
-		nv_wr32(dev, GPC_UNIT(gpc, 0x0900), 0xc0000000);
-		nv_wr32(dev, GPC_UNIT(gpc, 0x1028), 0xc0000000);
-		nv_wr32(dev, GPC_UNIT(gpc, 0x0824), 0xc0000000);
-		for (tp = 0; tp < priv->tp_nr[gpc]; tp++) {
-			nv_wr32(dev, TP_UNIT(gpc, tp, 0x508), 0xffffffff);
-			nv_wr32(dev, TP_UNIT(gpc, tp, 0x50c), 0xffffffff);
-			nv_wr32(dev, TP_UNIT(gpc, tp, 0x224), 0xc0000000);
-			nv_wr32(dev, TP_UNIT(gpc, tp, 0x48c), 0xc0000000);
-			nv_wr32(dev, TP_UNIT(gpc, tp, 0x084), 0xc0000000);
-			nv_wr32(dev, TP_UNIT(gpc, tp, 0x644), 0x001ffffe);
-			nv_wr32(dev, TP_UNIT(gpc, tp, 0x64c), 0x0000000f);
-		}
-		nv_wr32(dev, GPC_UNIT(gpc, 0x2c90), 0xffffffff);
-		nv_wr32(dev, GPC_UNIT(gpc, 0x2c94), 0xffffffff);
-	}
-}
-
-static void
-nvc0_graph_init_rop(struct drm_device *dev)
-{
-	struct nvc0_graph_priv *priv = nv_engine(dev, NVOBJ_ENGINE_GR);
-	int rop;
-
-	for (rop = 0; rop < priv->rop_nr; rop++) {
-		nv_wr32(dev, ROP_UNIT(rop, 0x144), 0xc0000000);
-		nv_wr32(dev, ROP_UNIT(rop, 0x070), 0xc0000000);
-		nv_wr32(dev, ROP_UNIT(rop, 0x204), 0xffffffff);
-		nv_wr32(dev, ROP_UNIT(rop, 0x208), 0xffffffff);
-	}
-}
-
-static void
-nvc0_graph_init_fuc(struct drm_device *dev, u32 fuc_base,
-		    struct nvc0_graph_fuc *code, struct nvc0_graph_fuc *data)
-{
-	int i;
-
-	nv_wr32(dev, fuc_base + 0x01c0, 0x01000000);
-	for (i = 0; i < data->size / 4; i++)
-		nv_wr32(dev, fuc_base + 0x01c4, data->data[i]);
-
-	nv_wr32(dev, fuc_base + 0x0180, 0x01000000);
-	for (i = 0; i < code->size / 4; i++) {
-		if ((i & 0x3f) == 0)
-			nv_wr32(dev, fuc_base + 0x0188, i >> 6);
-		nv_wr32(dev, fuc_base + 0x0184, code->data[i]);
-	}
-}
-
-static int
-nvc0_graph_init_ctxctl(struct drm_device *dev)
-{
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct nvc0_graph_priv *priv = nv_engine(dev, NVOBJ_ENGINE_GR);
-	u32 r000260;
-	int i;
-
-	if (!nouveau_ctxfw) {
-		/* load HUB microcode */
-		r000260 = nv_mask(dev, 0x000260, 0x00000001, 0x00000000);
-		nv_wr32(dev, 0x4091c0, 0x01000000);
-		for (i = 0; i < sizeof(nvc0_grhub_data) / 4; i++)
-			nv_wr32(dev, 0x4091c4, nvc0_grhub_data[i]);
-
-		nv_wr32(dev, 0x409180, 0x01000000);
-		for (i = 0; i < sizeof(nvc0_grhub_code) / 4; i++) {
-			if ((i & 0x3f) == 0)
-				nv_wr32(dev, 0x409188, i >> 6);
-			nv_wr32(dev, 0x409184, nvc0_grhub_code[i]);
-		}
-
-		/* load GPC microcode */
-		nv_wr32(dev, 0x41a1c0, 0x01000000);
-		for (i = 0; i < sizeof(nvc0_grgpc_data) / 4; i++)
-			nv_wr32(dev, 0x41a1c4, nvc0_grgpc_data[i]);
-
-		nv_wr32(dev, 0x41a180, 0x01000000);
-		for (i = 0; i < sizeof(nvc0_grgpc_code) / 4; i++) {
-			if ((i & 0x3f) == 0)
-				nv_wr32(dev, 0x41a188, i >> 6);
-			nv_wr32(dev, 0x41a184, nvc0_grgpc_code[i]);
-		}
-		nv_wr32(dev, 0x000260, r000260);
-
-		/* start HUB ucode running, it'll init the GPCs */
-		nv_wr32(dev, 0x409800, dev_priv->chipset);
-		nv_wr32(dev, 0x40910c, 0x00000000);
-		nv_wr32(dev, 0x409100, 0x00000002);
-		if (!nv_wait(dev, 0x409800, 0x80000000, 0x80000000)) {
-			NV_ERROR(dev, "PGRAPH: HUB_INIT timed out\n");
-			nvc0_graph_ctxctl_debug(dev);
-			return -EBUSY;
-		}
-
-		priv->grctx_size = nv_rd32(dev, 0x409804);
-		return 0;
-	}
-
-	/* load fuc microcode */
-	r000260 = nv_mask(dev, 0x000260, 0x00000001, 0x00000000);
-	nvc0_graph_init_fuc(dev, 0x409000, &priv->fuc409c, &priv->fuc409d);
-	nvc0_graph_init_fuc(dev, 0x41a000, &priv->fuc41ac, &priv->fuc41ad);
-	nv_wr32(dev, 0x000260, r000260);
-
-	/* start both of them running */
-	nv_wr32(dev, 0x409840, 0xffffffff);
-	nv_wr32(dev, 0x41a10c, 0x00000000);
-	nv_wr32(dev, 0x40910c, 0x00000000);
-	nv_wr32(dev, 0x41a100, 0x00000002);
-	nv_wr32(dev, 0x409100, 0x00000002);
-	if (!nv_wait(dev, 0x409800, 0x00000001, 0x00000001))
-		NV_INFO(dev, "0x409800 wait failed\n");
-
-	nv_wr32(dev, 0x409840, 0xffffffff);
-	nv_wr32(dev, 0x409500, 0x7fffffff);
-	nv_wr32(dev, 0x409504, 0x00000021);
-
-	nv_wr32(dev, 0x409840, 0xffffffff);
-	nv_wr32(dev, 0x409500, 0x00000000);
-	nv_wr32(dev, 0x409504, 0x00000010);
-	if (!nv_wait_ne(dev, 0x409800, 0xffffffff, 0x00000000)) {
-		NV_ERROR(dev, "fuc09 req 0x10 timeout\n");
-		return -EBUSY;
-	}
-	priv->grctx_size = nv_rd32(dev, 0x409800);
-
-	nv_wr32(dev, 0x409840, 0xffffffff);
-	nv_wr32(dev, 0x409500, 0x00000000);
-	nv_wr32(dev, 0x409504, 0x00000016);
-	if (!nv_wait_ne(dev, 0x409800, 0xffffffff, 0x00000000)) {
-		NV_ERROR(dev, "fuc09 req 0x16 timeout\n");
-		return -EBUSY;
-	}
-
-	nv_wr32(dev, 0x409840, 0xffffffff);
-	nv_wr32(dev, 0x409500, 0x00000000);
-	nv_wr32(dev, 0x409504, 0x00000025);
-	if (!nv_wait_ne(dev, 0x409800, 0xffffffff, 0x00000000)) {
-		NV_ERROR(dev, "fuc09 req 0x25 timeout\n");
-		return -EBUSY;
-	}
-
-	return 0;
-}
-
-static int
-nvc0_graph_init(struct drm_device *dev, int engine)
-{
-	int ret;
-
-	nv_mask(dev, 0x000200, 0x18001000, 0x00000000);
-	nv_mask(dev, 0x000200, 0x18001000, 0x18001000);
-
-	nvc0_graph_init_obj418880(dev);
-	nvc0_graph_init_regs(dev);
-	/*nvc0_graph_init_unitplemented_magics(dev);*/
-	nvc0_graph_init_gpc_0(dev);
-	/*nvc0_graph_init_unitplemented_c242(dev);*/
-
-	nv_wr32(dev, 0x400500, 0x00010001);
-	nv_wr32(dev, 0x400100, 0xffffffff);
-	nv_wr32(dev, 0x40013c, 0xffffffff);
-
-	nvc0_graph_init_units(dev);
-	nvc0_graph_init_gpc_1(dev);
-	nvc0_graph_init_rop(dev);
-
-	nv_wr32(dev, 0x400108, 0xffffffff);
-	nv_wr32(dev, 0x400138, 0xffffffff);
-	nv_wr32(dev, 0x400118, 0xffffffff);
-	nv_wr32(dev, 0x400130, 0xffffffff);
-	nv_wr32(dev, 0x40011c, 0xffffffff);
-	nv_wr32(dev, 0x400134, 0xffffffff);
-	nv_wr32(dev, 0x400054, 0x34ce3464);
-
-	ret = nvc0_graph_init_ctxctl(dev);
-	if (ret)
-		return ret;
-
-	return 0;
-}
-
-int
-nvc0_graph_isr_chid(struct drm_device *dev, u64 inst)
-{
-	struct nouveau_fifo_priv *pfifo = nv_engine(dev, NVOBJ_ENGINE_FIFO);
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct nouveau_channel *chan;
-	unsigned long flags;
-	int i;
-
-	spin_lock_irqsave(&dev_priv->channels.lock, flags);
-	for (i = 0; i < pfifo->channels; i++) {
-		chan = dev_priv->channels.ptr[i];
-		if (!chan || !chan->ramin)
-			continue;
-
-		if (inst == chan->ramin->vinst)
-			break;
-	}
-	spin_unlock_irqrestore(&dev_priv->channels.lock, flags);
-	return i;
-}
-
-static void
-nvc0_graph_ctxctl_isr(struct drm_device *dev)
-{
-	u32 ustat = nv_rd32(dev, 0x409c18);
-
-	if (ustat & 0x00000001)
-		NV_INFO(dev, "PGRAPH: CTXCTRL ucode error\n");
-	if (ustat & 0x00080000)
-		NV_INFO(dev, "PGRAPH: CTXCTRL watchdog timeout\n");
-	if (ustat & ~0x00080001)
-		NV_INFO(dev, "PGRAPH: CTXCTRL 0x%08x\n", ustat);
-
-	nvc0_graph_ctxctl_debug(dev);
-	nv_wr32(dev, 0x409c20, ustat);
-}
-
-static void
-nvc0_graph_isr(struct drm_device *dev)
-{
-	u64 inst = (u64)(nv_rd32(dev, 0x409b00) & 0x0fffffff) << 12;
-	u32 chid = nvc0_graph_isr_chid(dev, inst);
-	u32 stat = nv_rd32(dev, 0x400100);
-	u32 addr = nv_rd32(dev, 0x400704);
-	u32 mthd = (addr & 0x00003ffc);
-	u32 subc = (addr & 0x00070000) >> 16;
-	u32 data = nv_rd32(dev, 0x400708);
-	u32 code = nv_rd32(dev, 0x400110);
-	u32 class = nv_rd32(dev, 0x404200 + (subc * 4));
-
-	if (stat & 0x00000010) {
-		if (nouveau_gpuobj_mthd_call2(dev, chid, class, mthd, data)) {
-			NV_INFO(dev, "PGRAPH: ILLEGAL_MTHD ch %d [0x%010llx] "
-				     "subc %d class 0x%04x mthd 0x%04x "
-				     "data 0x%08x\n",
-				chid, inst, subc, class, mthd, data);
-		}
-		nv_wr32(dev, 0x400100, 0x00000010);
-		stat &= ~0x00000010;
-	}
-
-	if (stat & 0x00000020) {
-		NV_INFO(dev, "PGRAPH: ILLEGAL_CLASS ch %d [0x%010llx] subc %d "
-			     "class 0x%04x mthd 0x%04x data 0x%08x\n",
-			chid, inst, subc, class, mthd, data);
-		nv_wr32(dev, 0x400100, 0x00000020);
-		stat &= ~0x00000020;
-	}
-
-	if (stat & 0x00100000) {
-		NV_INFO(dev, "PGRAPH: DATA_ERROR [");
-		nouveau_enum_print(nv50_data_error_names, code);
-		printk("] ch %d [0x%010llx] subc %d class 0x%04x "
-		       "mthd 0x%04x data 0x%08x\n",
-		       chid, inst, subc, class, mthd, data);
-		nv_wr32(dev, 0x400100, 0x00100000);
-		stat &= ~0x00100000;
-	}
-
-	if (stat & 0x00200000) {
-		u32 trap = nv_rd32(dev, 0x400108);
-		NV_INFO(dev, "PGRAPH: TRAP ch %d status 0x%08x\n", chid, trap);
-		nv_wr32(dev, 0x400108, trap);
-		nv_wr32(dev, 0x400100, 0x00200000);
-		stat &= ~0x00200000;
-	}
-
-	if (stat & 0x00080000) {
-		nvc0_graph_ctxctl_isr(dev);
-		nv_wr32(dev, 0x400100, 0x00080000);
-		stat &= ~0x00080000;
-	}
-
-	if (stat) {
-		NV_INFO(dev, "PGRAPH: unknown stat 0x%08x\n", stat);
-		nv_wr32(dev, 0x400100, stat);
-	}
-
-	nv_wr32(dev, 0x400500, 0x00010001);
-}
-
-static int
-nvc0_graph_create_fw(struct drm_device *dev, const char *fwname,
-		     struct nvc0_graph_fuc *fuc)
-{
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	const struct firmware *fw;
-	char f[32];
-	int ret;
-
-	snprintf(f, sizeof(f), "nouveau/nv%02x_%s", dev_priv->chipset, fwname);
-	ret = request_firmware(&fw, f, &dev->pdev->dev);
-	if (ret) {
-		snprintf(f, sizeof(f), "nouveau/%s", fwname);
-		ret = request_firmware(&fw, f, &dev->pdev->dev);
-		if (ret) {
-			NV_ERROR(dev, "failed to load %s\n", fwname);
-			return ret;
-		}
-	}
-
-	fuc->size = fw->size;
-	fuc->data = kmemdup(fw->data, fuc->size, GFP_KERNEL);
-	release_firmware(fw);
-	return (fuc->data != NULL) ? 0 : -ENOMEM;
-}
-
-static void
-nvc0_graph_destroy_fw(struct nvc0_graph_fuc *fuc)
-{
-	if (fuc->data) {
-		kfree(fuc->data);
-		fuc->data = NULL;
-	}
-}
-
-static void
-nvc0_graph_destroy(struct drm_device *dev, int engine)
-{
-	struct nvc0_graph_priv *priv = nv_engine(dev, engine);
-
-	if (nouveau_ctxfw) {
-		nvc0_graph_destroy_fw(&priv->fuc409c);
-		nvc0_graph_destroy_fw(&priv->fuc409d);
-		nvc0_graph_destroy_fw(&priv->fuc41ac);
-		nvc0_graph_destroy_fw(&priv->fuc41ad);
-	}
-
-	nouveau_irq_unregister(dev, 12);
-
-	nouveau_gpuobj_ref(NULL, &priv->unk4188b8);
-	nouveau_gpuobj_ref(NULL, &priv->unk4188b4);
-
-	if (priv->grctx_vals)
-		kfree(priv->grctx_vals);
-
-	NVOBJ_ENGINE_DEL(dev, GR);
-	kfree(priv);
-}
-
-int
-nvc0_graph_create(struct drm_device *dev)
-{
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct nvc0_graph_priv *priv;
-	int ret, gpc, i;
-	u32 fermi;
-
-	fermi = nvc0_graph_class(dev);
-	if (!fermi) {
-		NV_ERROR(dev, "PGRAPH: unsupported chipset, please report!\n");
-		return 0;
-	}
-
-	priv = kzalloc(sizeof(*priv), GFP_KERNEL);
-	if (!priv)
-		return -ENOMEM;
-
-	priv->base.destroy = nvc0_graph_destroy;
-	priv->base.init = nvc0_graph_init;
-	priv->base.fini = nvc0_graph_fini;
-	priv->base.context_new = nvc0_graph_context_new;
-	priv->base.context_del = nvc0_graph_context_del;
-	priv->base.object_new = nvc0_graph_object_new;
-
-	NVOBJ_ENGINE_ADD(dev, GR, &priv->base);
-	nouveau_irq_register(dev, 12, nvc0_graph_isr);
-
-	if (nouveau_ctxfw) {
-		NV_INFO(dev, "PGRAPH: using external firmware\n");
-		if (nvc0_graph_create_fw(dev, "fuc409c", &priv->fuc409c) ||
-		    nvc0_graph_create_fw(dev, "fuc409d", &priv->fuc409d) ||
-		    nvc0_graph_create_fw(dev, "fuc41ac", &priv->fuc41ac) ||
-		    nvc0_graph_create_fw(dev, "fuc41ad", &priv->fuc41ad)) {
-			ret = 0;
-			goto error;
-		}
-	}
-
-	ret = nouveau_gpuobj_new(dev, NULL, 0x1000, 256, 0, &priv->unk4188b4);
-	if (ret)
-		goto error;
-
-	ret = nouveau_gpuobj_new(dev, NULL, 0x1000, 256, 0, &priv->unk4188b8);
-	if (ret)
-		goto error;
-
-	for (i = 0; i < 0x1000; i += 4) {
-		nv_wo32(priv->unk4188b4, i, 0x00000010);
-		nv_wo32(priv->unk4188b8, i, 0x00000010);
-	}
-
-	priv->gpc_nr  =  nv_rd32(dev, 0x409604) & 0x0000001f;
-	priv->rop_nr = (nv_rd32(dev, 0x409604) & 0x001f0000) >> 16;
-	for (gpc = 0; gpc < priv->gpc_nr; gpc++) {
-		priv->tp_nr[gpc] = nv_rd32(dev, GPC_UNIT(gpc, 0x2608));
-		priv->tp_total += priv->tp_nr[gpc];
-	}
-
-	/*XXX: these need figuring out... */
-	switch (dev_priv->chipset) {
-	case 0xc0:
-		if (priv->tp_total == 11) { /* 465, 3/4/4/0, 4 */
-			priv->magic_not_rop_nr = 0x07;
-		} else
-		if (priv->tp_total == 14) { /* 470, 3/3/4/4, 5 */
-			priv->magic_not_rop_nr = 0x05;
-		} else
-		if (priv->tp_total == 15) { /* 480, 3/4/4/4, 6 */
-			priv->magic_not_rop_nr = 0x06;
-		}
-		break;
-	case 0xc3: /* 450, 4/0/0/0, 2 */
-		priv->magic_not_rop_nr = 0x03;
-		break;
-	case 0xc4: /* 460, 3/4/0/0, 4 */
-		priv->magic_not_rop_nr = 0x01;
-		break;
-	case 0xc1: /* 2/0/0/0, 1 */
-		priv->magic_not_rop_nr = 0x01;
-		break;
-	case 0xc8: /* 4/4/3/4, 5 */
-		priv->magic_not_rop_nr = 0x06;
-		break;
-	case 0xce: /* 4/4/0/0, 4 */
-		priv->magic_not_rop_nr = 0x03;
-		break;
-	case 0xcf: /* 4/0/0/0, 3 */
-		priv->magic_not_rop_nr = 0x03;
-		break;
-	case 0xd9: /* 1/0/0/0, 1 */
-		priv->magic_not_rop_nr = 0x01;
-		break;
-	}
-
-	if (!priv->magic_not_rop_nr) {
-		NV_ERROR(dev, "PGRAPH: unknown config: %d/%d/%d/%d, %d\n",
-			 priv->tp_nr[0], priv->tp_nr[1], priv->tp_nr[2],
-			 priv->tp_nr[3], priv->rop_nr);
-		priv->magic_not_rop_nr = 0x00;
-	}
-
-	NVOBJ_CLASS(dev, 0x902d, GR); /* 2D */
-	NVOBJ_CLASS(dev, 0x9039, GR); /* M2MF */
-	NVOBJ_CLASS(dev, 0x9097, GR); /* 3D */
-	if (fermi >= 0x9197)
-		NVOBJ_CLASS(dev, 0x9197, GR); /* 3D (NVC1-) */
-	if (fermi >= 0x9297)
-		NVOBJ_CLASS(dev, 0x9297, GR); /* 3D (NVC8-) */
-	NVOBJ_CLASS(dev, 0x90c0, GR); /* COMPUTE */
-	return 0;
-
-error:
-	nvc0_graph_destroy(dev, NVOBJ_ENGINE_GR);
-	return ret;
-}
diff --git a/drivers/gpu/drm/nouveau/nvc0_graph.h b/drivers/gpu/drm/nouveau/nvc0_graph.h
deleted file mode 100644
index 91d44ea662d9..000000000000
--- a/drivers/gpu/drm/nouveau/nvc0_graph.h
+++ /dev/null
@@ -1,97 +0,0 @@
-/*
- * Copyright 2010 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Ben Skeggs
- */
-
-#ifndef __NVC0_GRAPH_H__
-#define __NVC0_GRAPH_H__
-
-#define GPC_MAX 4
-#define TP_MAX 32
-
-#define ROP_BCAST(r)     (0x408800 + (r))
-#define ROP_UNIT(u, r)   (0x410000 + (u) * 0x400 + (r))
-#define GPC_BCAST(r)     (0x418000 + (r))
-#define GPC_UNIT(t, r)   (0x500000 + (t) * 0x8000 + (r))
-#define TP_UNIT(t, m, r) (0x504000 + (t) * 0x8000 + (m) * 0x800 + (r))
-
-struct nvc0_graph_fuc {
-	u32 *data;
-	u32  size;
-};
-
-struct nvc0_graph_priv {
-	struct nouveau_exec_engine base;
-
-	struct nvc0_graph_fuc fuc409c;
-	struct nvc0_graph_fuc fuc409d;
-	struct nvc0_graph_fuc fuc41ac;
-	struct nvc0_graph_fuc fuc41ad;
-
-	u8 gpc_nr;
-	u8 rop_nr;
-	u8 tp_nr[GPC_MAX];
-	u8 tp_total;
-
-	u32  grctx_size;
-	u32 *grctx_vals;
-	struct nouveau_gpuobj *unk4188b4;
-	struct nouveau_gpuobj *unk4188b8;
-
-	u8 magic_not_rop_nr;
-};
-
-struct nvc0_graph_chan {
-	struct nouveau_gpuobj *grctx;
-	struct nouveau_gpuobj *unk408004; /* 0x418810 too */
-	struct nouveau_gpuobj *unk40800c; /* 0x419004 too */
-	struct nouveau_gpuobj *unk418810; /* 0x419848 too */
-	struct nouveau_gpuobj *mmio;
-	int mmio_nr;
-};
-
-int nvc0_grctx_generate(struct nouveau_channel *);
-
-/* nvc0_graph.c uses this also to determine supported chipsets */
-static inline u32
-nvc0_graph_class(struct drm_device *dev)
-{
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-
-	switch (dev_priv->chipset) {
-	case 0xc0:
-	case 0xc3:
-	case 0xc4:
-	case 0xce: /* guess, mmio trace shows only 0x9097 state */
-	case 0xcf: /* guess, mmio trace shows only 0x9097 state */
-		return 0x9097;
-	case 0xc1:
-		return 0x9197;
-	case 0xc8:
-	case 0xd9:
-		return 0x9297;
-	default:
-		return 0;
-	}
-}
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/nvc0_grctx.c b/drivers/gpu/drm/nouveau/nvc0_grctx.c
deleted file mode 100644
index 2f17654e79a6..000000000000
--- a/drivers/gpu/drm/nouveau/nvc0_grctx.c
+++ /dev/null
@@ -1,2878 +0,0 @@
-/*
- * Copyright 2010 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Ben Skeggs
- */
-
-#include <drm/drmP.h>
-#include "nouveau_drv.h"
-#include "nouveau_mm.h"
-#include "nvc0_graph.h"
-
-static void
-nv_icmd(struct drm_device *dev, u32 icmd, u32 data)
-{
-	nv_wr32(dev, 0x400204, data);
-	nv_wr32(dev, 0x400200, icmd);
-	while (nv_rd32(dev, 0x400700) & 2) {}
-}
-
-static void
-nv_mthd(struct drm_device *dev, u32 class, u32 mthd, u32 data)
-{
-	nv_wr32(dev, 0x40448c, data);
-	nv_wr32(dev, 0x404488, 0x80000000 | (mthd << 14) | class);
-}
-
-static void
-nvc0_grctx_generate_9097(struct drm_device *dev)
-{
-	u32 fermi = nvc0_graph_class(dev);
-	u32 mthd;
-
-	nv_mthd(dev, 0x9097, 0x0800, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x0840, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x0880, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x08c0, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x0900, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x0940, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x0980, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x09c0, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x0804, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x0844, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x0884, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x08c4, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x0904, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x0944, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x0984, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x09c4, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x0808, 0x00000400);
-	nv_mthd(dev, 0x9097, 0x0848, 0x00000400);
-	nv_mthd(dev, 0x9097, 0x0888, 0x00000400);
-	nv_mthd(dev, 0x9097, 0x08c8, 0x00000400);
-	nv_mthd(dev, 0x9097, 0x0908, 0x00000400);
-	nv_mthd(dev, 0x9097, 0x0948, 0x00000400);
-	nv_mthd(dev, 0x9097, 0x0988, 0x00000400);
-	nv_mthd(dev, 0x9097, 0x09c8, 0x00000400);
-	nv_mthd(dev, 0x9097, 0x080c, 0x00000300);
-	nv_mthd(dev, 0x9097, 0x084c, 0x00000300);
-	nv_mthd(dev, 0x9097, 0x088c, 0x00000300);
-	nv_mthd(dev, 0x9097, 0x08cc, 0x00000300);
-	nv_mthd(dev, 0x9097, 0x090c, 0x00000300);
-	nv_mthd(dev, 0x9097, 0x094c, 0x00000300);
-	nv_mthd(dev, 0x9097, 0x098c, 0x00000300);
-	nv_mthd(dev, 0x9097, 0x09cc, 0x00000300);
-	nv_mthd(dev, 0x9097, 0x0810, 0x000000cf);
-	nv_mthd(dev, 0x9097, 0x0850, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x0890, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x08d0, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x0910, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x0950, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x0990, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x09d0, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x0814, 0x00000040);
-	nv_mthd(dev, 0x9097, 0x0854, 0x00000040);
-	nv_mthd(dev, 0x9097, 0x0894, 0x00000040);
-	nv_mthd(dev, 0x9097, 0x08d4, 0x00000040);
-	nv_mthd(dev, 0x9097, 0x0914, 0x00000040);
-	nv_mthd(dev, 0x9097, 0x0954, 0x00000040);
-	nv_mthd(dev, 0x9097, 0x0994, 0x00000040);
-	nv_mthd(dev, 0x9097, 0x09d4, 0x00000040);
-	nv_mthd(dev, 0x9097, 0x0818, 0x00000001);
-	nv_mthd(dev, 0x9097, 0x0858, 0x00000001);
-	nv_mthd(dev, 0x9097, 0x0898, 0x00000001);
-	nv_mthd(dev, 0x9097, 0x08d8, 0x00000001);
-	nv_mthd(dev, 0x9097, 0x0918, 0x00000001);
-	nv_mthd(dev, 0x9097, 0x0958, 0x00000001);
-	nv_mthd(dev, 0x9097, 0x0998, 0x00000001);
-	nv_mthd(dev, 0x9097, 0x09d8, 0x00000001);
-	nv_mthd(dev, 0x9097, 0x081c, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x085c, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x089c, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x08dc, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x091c, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x095c, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x099c, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x09dc, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x0820, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x0860, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x08a0, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x08e0, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x0920, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x0960, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x09a0, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x09e0, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x2700, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x2720, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x2740, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x2760, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x2780, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x27a0, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x27c0, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x27e0, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x2704, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x2724, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x2744, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x2764, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x2784, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x27a4, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x27c4, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x27e4, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x2708, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x2728, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x2748, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x2768, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x2788, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x27a8, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x27c8, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x27e8, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x270c, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x272c, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x274c, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x276c, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x278c, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x27ac, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x27cc, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x27ec, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x2710, 0x00014000);
-	nv_mthd(dev, 0x9097, 0x2730, 0x00014000);
-	nv_mthd(dev, 0x9097, 0x2750, 0x00014000);
-	nv_mthd(dev, 0x9097, 0x2770, 0x00014000);
-	nv_mthd(dev, 0x9097, 0x2790, 0x00014000);
-	nv_mthd(dev, 0x9097, 0x27b0, 0x00014000);
-	nv_mthd(dev, 0x9097, 0x27d0, 0x00014000);
-	nv_mthd(dev, 0x9097, 0x27f0, 0x00014000);
-	nv_mthd(dev, 0x9097, 0x2714, 0x00000040);
-	nv_mthd(dev, 0x9097, 0x2734, 0x00000040);
-	nv_mthd(dev, 0x9097, 0x2754, 0x00000040);
-	nv_mthd(dev, 0x9097, 0x2774, 0x00000040);
-	nv_mthd(dev, 0x9097, 0x2794, 0x00000040);
-	nv_mthd(dev, 0x9097, 0x27b4, 0x00000040);
-	nv_mthd(dev, 0x9097, 0x27d4, 0x00000040);
-	nv_mthd(dev, 0x9097, 0x27f4, 0x00000040);
-	nv_mthd(dev, 0x9097, 0x1c00, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x1c10, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x1c20, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x1c30, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x1c40, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x1c50, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x1c60, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x1c70, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x1c80, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x1c90, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x1ca0, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x1cb0, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x1cc0, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x1cd0, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x1ce0, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x1cf0, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x1c04, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x1c14, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x1c24, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x1c34, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x1c44, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x1c54, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x1c64, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x1c74, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x1c84, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x1c94, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x1ca4, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x1cb4, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x1cc4, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x1cd4, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x1ce4, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x1cf4, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x1c08, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x1c18, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x1c28, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x1c38, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x1c48, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x1c58, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x1c68, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x1c78, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x1c88, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x1c98, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x1ca8, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x1cb8, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x1cc8, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x1cd8, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x1ce8, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x1cf8, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x1c0c, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x1c1c, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x1c2c, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x1c3c, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x1c4c, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x1c5c, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x1c6c, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x1c7c, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x1c8c, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x1c9c, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x1cac, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x1cbc, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x1ccc, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x1cdc, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x1cec, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x1cfc, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x1d00, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x1d10, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x1d20, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x1d30, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x1d40, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x1d50, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x1d60, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x1d70, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x1d80, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x1d90, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x1da0, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x1db0, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x1dc0, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x1dd0, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x1de0, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x1df0, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x1d04, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x1d14, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x1d24, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x1d34, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x1d44, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x1d54, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x1d64, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x1d74, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x1d84, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x1d94, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x1da4, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x1db4, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x1dc4, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x1dd4, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x1de4, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x1df4, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x1d08, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x1d18, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x1d28, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x1d38, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x1d48, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x1d58, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x1d68, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x1d78, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x1d88, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x1d98, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x1da8, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x1db8, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x1dc8, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x1dd8, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x1de8, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x1df8, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x1d0c, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x1d1c, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x1d2c, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x1d3c, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x1d4c, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x1d5c, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x1d6c, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x1d7c, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x1d8c, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x1d9c, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x1dac, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x1dbc, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x1dcc, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x1ddc, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x1dec, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x1dfc, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x1f00, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x1f08, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x1f10, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x1f18, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x1f20, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x1f28, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x1f30, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x1f38, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x1f40, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x1f48, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x1f50, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x1f58, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x1f60, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x1f68, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x1f70, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x1f78, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x1f04, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x1f0c, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x1f14, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x1f1c, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x1f24, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x1f2c, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x1f34, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x1f3c, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x1f44, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x1f4c, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x1f54, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x1f5c, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x1f64, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x1f6c, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x1f74, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x1f7c, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x1f80, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x1f88, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x1f90, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x1f98, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x1fa0, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x1fa8, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x1fb0, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x1fb8, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x1fc0, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x1fc8, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x1fd0, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x1fd8, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x1fe0, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x1fe8, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x1ff0, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x1ff8, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x1f84, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x1f8c, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x1f94, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x1f9c, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x1fa4, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x1fac, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x1fb4, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x1fbc, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x1fc4, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x1fcc, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x1fd4, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x1fdc, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x1fe4, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x1fec, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x1ff4, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x1ffc, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x2200, 0x00000022);
-	nv_mthd(dev, 0x9097, 0x2210, 0x00000022);
-	nv_mthd(dev, 0x9097, 0x2220, 0x00000022);
-	nv_mthd(dev, 0x9097, 0x2230, 0x00000022);
-	nv_mthd(dev, 0x9097, 0x2240, 0x00000022);
-	nv_mthd(dev, 0x9097, 0x2000, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x2040, 0x00000011);
-	nv_mthd(dev, 0x9097, 0x2080, 0x00000020);
-	nv_mthd(dev, 0x9097, 0x20c0, 0x00000030);
-	nv_mthd(dev, 0x9097, 0x2100, 0x00000040);
-	nv_mthd(dev, 0x9097, 0x2140, 0x00000051);
-	nv_mthd(dev, 0x9097, 0x200c, 0x00000001);
-	nv_mthd(dev, 0x9097, 0x204c, 0x00000001);
-	nv_mthd(dev, 0x9097, 0x208c, 0x00000001);
-	nv_mthd(dev, 0x9097, 0x20cc, 0x00000001);
-	nv_mthd(dev, 0x9097, 0x210c, 0x00000001);
-	nv_mthd(dev, 0x9097, 0x214c, 0x00000001);
-	nv_mthd(dev, 0x9097, 0x2010, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x2050, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x2090, 0x00000001);
-	nv_mthd(dev, 0x9097, 0x20d0, 0x00000002);
-	nv_mthd(dev, 0x9097, 0x2110, 0x00000003);
-	nv_mthd(dev, 0x9097, 0x2150, 0x00000004);
-	nv_mthd(dev, 0x9097, 0x0380, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x03a0, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x03c0, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x03e0, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x0384, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x03a4, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x03c4, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x03e4, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x0388, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x03a8, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x03c8, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x03e8, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x038c, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x03ac, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x03cc, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x03ec, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x0700, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x0710, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x0720, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x0730, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x0704, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x0714, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x0724, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x0734, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x0708, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x0718, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x0728, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x0738, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x2800, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x2804, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x2808, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x280c, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x2810, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x2814, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x2818, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x281c, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x2820, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x2824, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x2828, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x282c, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x2830, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x2834, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x2838, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x283c, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x2840, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x2844, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x2848, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x284c, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x2850, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x2854, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x2858, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x285c, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x2860, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x2864, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x2868, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x286c, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x2870, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x2874, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x2878, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x287c, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x2880, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x2884, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x2888, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x288c, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x2890, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x2894, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x2898, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x289c, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x28a0, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x28a4, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x28a8, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x28ac, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x28b0, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x28b4, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x28b8, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x28bc, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x28c0, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x28c4, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x28c8, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x28cc, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x28d0, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x28d4, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x28d8, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x28dc, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x28e0, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x28e4, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x28e8, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x28ec, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x28f0, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x28f4, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x28f8, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x28fc, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x2900, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x2904, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x2908, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x290c, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x2910, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x2914, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x2918, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x291c, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x2920, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x2924, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x2928, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x292c, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x2930, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x2934, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x2938, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x293c, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x2940, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x2944, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x2948, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x294c, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x2950, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x2954, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x2958, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x295c, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x2960, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x2964, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x2968, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x296c, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x2970, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x2974, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x2978, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x297c, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x2980, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x2984, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x2988, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x298c, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x2990, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x2994, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x2998, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x299c, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x29a0, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x29a4, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x29a8, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x29ac, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x29b0, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x29b4, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x29b8, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x29bc, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x29c0, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x29c4, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x29c8, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x29cc, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x29d0, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x29d4, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x29d8, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x29dc, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x29e0, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x29e4, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x29e8, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x29ec, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x29f0, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x29f4, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x29f8, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x29fc, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x0a00, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x0a20, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x0a40, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x0a60, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x0a80, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x0aa0, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x0ac0, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x0ae0, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x0b00, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x0b20, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x0b40, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x0b60, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x0b80, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x0ba0, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x0bc0, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x0be0, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x0a04, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x0a24, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x0a44, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x0a64, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x0a84, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x0aa4, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x0ac4, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x0ae4, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x0b04, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x0b24, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x0b44, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x0b64, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x0b84, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x0ba4, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x0bc4, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x0be4, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x0a08, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x0a28, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x0a48, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x0a68, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x0a88, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x0aa8, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x0ac8, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x0ae8, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x0b08, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x0b28, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x0b48, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x0b68, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x0b88, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x0ba8, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x0bc8, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x0be8, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x0a0c, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x0a2c, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x0a4c, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x0a6c, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x0a8c, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x0aac, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x0acc, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x0aec, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x0b0c, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x0b2c, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x0b4c, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x0b6c, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x0b8c, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x0bac, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x0bcc, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x0bec, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x0a10, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x0a30, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x0a50, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x0a70, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x0a90, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x0ab0, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x0ad0, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x0af0, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x0b10, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x0b30, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x0b50, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x0b70, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x0b90, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x0bb0, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x0bd0, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x0bf0, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x0a14, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x0a34, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x0a54, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x0a74, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x0a94, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x0ab4, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x0ad4, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x0af4, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x0b14, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x0b34, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x0b54, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x0b74, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x0b94, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x0bb4, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x0bd4, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x0bf4, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x0c00, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x0c10, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x0c20, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x0c30, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x0c40, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x0c50, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x0c60, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x0c70, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x0c80, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x0c90, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x0ca0, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x0cb0, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x0cc0, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x0cd0, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x0ce0, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x0cf0, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x0c04, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x0c14, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x0c24, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x0c34, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x0c44, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x0c54, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x0c64, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x0c74, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x0c84, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x0c94, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x0ca4, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x0cb4, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x0cc4, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x0cd4, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x0ce4, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x0cf4, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x0c08, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x0c18, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x0c28, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x0c38, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x0c48, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x0c58, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x0c68, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x0c78, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x0c88, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x0c98, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x0ca8, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x0cb8, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x0cc8, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x0cd8, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x0ce8, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x0cf8, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x0c0c, 0x3f800000);
-	nv_mthd(dev, 0x9097, 0x0c1c, 0x3f800000);
-	nv_mthd(dev, 0x9097, 0x0c2c, 0x3f800000);
-	nv_mthd(dev, 0x9097, 0x0c3c, 0x3f800000);
-	nv_mthd(dev, 0x9097, 0x0c4c, 0x3f800000);
-	nv_mthd(dev, 0x9097, 0x0c5c, 0x3f800000);
-	nv_mthd(dev, 0x9097, 0x0c6c, 0x3f800000);
-	nv_mthd(dev, 0x9097, 0x0c7c, 0x3f800000);
-	nv_mthd(dev, 0x9097, 0x0c8c, 0x3f800000);
-	nv_mthd(dev, 0x9097, 0x0c9c, 0x3f800000);
-	nv_mthd(dev, 0x9097, 0x0cac, 0x3f800000);
-	nv_mthd(dev, 0x9097, 0x0cbc, 0x3f800000);
-	nv_mthd(dev, 0x9097, 0x0ccc, 0x3f800000);
-	nv_mthd(dev, 0x9097, 0x0cdc, 0x3f800000);
-	nv_mthd(dev, 0x9097, 0x0cec, 0x3f800000);
-	nv_mthd(dev, 0x9097, 0x0cfc, 0x3f800000);
-	nv_mthd(dev, 0x9097, 0x0d00, 0xffff0000);
-	nv_mthd(dev, 0x9097, 0x0d08, 0xffff0000);
-	nv_mthd(dev, 0x9097, 0x0d10, 0xffff0000);
-	nv_mthd(dev, 0x9097, 0x0d18, 0xffff0000);
-	nv_mthd(dev, 0x9097, 0x0d20, 0xffff0000);
-	nv_mthd(dev, 0x9097, 0x0d28, 0xffff0000);
-	nv_mthd(dev, 0x9097, 0x0d30, 0xffff0000);
-	nv_mthd(dev, 0x9097, 0x0d38, 0xffff0000);
-	nv_mthd(dev, 0x9097, 0x0d04, 0xffff0000);
-	nv_mthd(dev, 0x9097, 0x0d0c, 0xffff0000);
-	nv_mthd(dev, 0x9097, 0x0d14, 0xffff0000);
-	nv_mthd(dev, 0x9097, 0x0d1c, 0xffff0000);
-	nv_mthd(dev, 0x9097, 0x0d24, 0xffff0000);
-	nv_mthd(dev, 0x9097, 0x0d2c, 0xffff0000);
-	nv_mthd(dev, 0x9097, 0x0d34, 0xffff0000);
-	nv_mthd(dev, 0x9097, 0x0d3c, 0xffff0000);
-	nv_mthd(dev, 0x9097, 0x0e00, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x0e10, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x0e20, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x0e30, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x0e40, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x0e50, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x0e60, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x0e70, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x0e80, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x0e90, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x0ea0, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x0eb0, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x0ec0, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x0ed0, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x0ee0, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x0ef0, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x0e04, 0xffff0000);
-	nv_mthd(dev, 0x9097, 0x0e14, 0xffff0000);
-	nv_mthd(dev, 0x9097, 0x0e24, 0xffff0000);
-	nv_mthd(dev, 0x9097, 0x0e34, 0xffff0000);
-	nv_mthd(dev, 0x9097, 0x0e44, 0xffff0000);
-	nv_mthd(dev, 0x9097, 0x0e54, 0xffff0000);
-	nv_mthd(dev, 0x9097, 0x0e64, 0xffff0000);
-	nv_mthd(dev, 0x9097, 0x0e74, 0xffff0000);
-	nv_mthd(dev, 0x9097, 0x0e84, 0xffff0000);
-	nv_mthd(dev, 0x9097, 0x0e94, 0xffff0000);
-	nv_mthd(dev, 0x9097, 0x0ea4, 0xffff0000);
-	nv_mthd(dev, 0x9097, 0x0eb4, 0xffff0000);
-	nv_mthd(dev, 0x9097, 0x0ec4, 0xffff0000);
-	nv_mthd(dev, 0x9097, 0x0ed4, 0xffff0000);
-	nv_mthd(dev, 0x9097, 0x0ee4, 0xffff0000);
-	nv_mthd(dev, 0x9097, 0x0ef4, 0xffff0000);
-	nv_mthd(dev, 0x9097, 0x0e08, 0xffff0000);
-	nv_mthd(dev, 0x9097, 0x0e18, 0xffff0000);
-	nv_mthd(dev, 0x9097, 0x0e28, 0xffff0000);
-	nv_mthd(dev, 0x9097, 0x0e38, 0xffff0000);
-	nv_mthd(dev, 0x9097, 0x0e48, 0xffff0000);
-	nv_mthd(dev, 0x9097, 0x0e58, 0xffff0000);
-	nv_mthd(dev, 0x9097, 0x0e68, 0xffff0000);
-	nv_mthd(dev, 0x9097, 0x0e78, 0xffff0000);
-	nv_mthd(dev, 0x9097, 0x0e88, 0xffff0000);
-	nv_mthd(dev, 0x9097, 0x0e98, 0xffff0000);
-	nv_mthd(dev, 0x9097, 0x0ea8, 0xffff0000);
-	nv_mthd(dev, 0x9097, 0x0eb8, 0xffff0000);
-	nv_mthd(dev, 0x9097, 0x0ec8, 0xffff0000);
-	nv_mthd(dev, 0x9097, 0x0ed8, 0xffff0000);
-	nv_mthd(dev, 0x9097, 0x0ee8, 0xffff0000);
-	nv_mthd(dev, 0x9097, 0x0ef8, 0xffff0000);
-	nv_mthd(dev, 0x9097, 0x0d40, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x0d48, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x0d50, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x0d58, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x0d44, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x0d4c, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x0d54, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x0d5c, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x1e00, 0x00000001);
-	nv_mthd(dev, 0x9097, 0x1e20, 0x00000001);
-	nv_mthd(dev, 0x9097, 0x1e40, 0x00000001);
-	nv_mthd(dev, 0x9097, 0x1e60, 0x00000001);
-	nv_mthd(dev, 0x9097, 0x1e80, 0x00000001);
-	nv_mthd(dev, 0x9097, 0x1ea0, 0x00000001);
-	nv_mthd(dev, 0x9097, 0x1ec0, 0x00000001);
-	nv_mthd(dev, 0x9097, 0x1ee0, 0x00000001);
-	nv_mthd(dev, 0x9097, 0x1e04, 0x00000001);
-	nv_mthd(dev, 0x9097, 0x1e24, 0x00000001);
-	nv_mthd(dev, 0x9097, 0x1e44, 0x00000001);
-	nv_mthd(dev, 0x9097, 0x1e64, 0x00000001);
-	nv_mthd(dev, 0x9097, 0x1e84, 0x00000001);
-	nv_mthd(dev, 0x9097, 0x1ea4, 0x00000001);
-	nv_mthd(dev, 0x9097, 0x1ec4, 0x00000001);
-	nv_mthd(dev, 0x9097, 0x1ee4, 0x00000001);
-	nv_mthd(dev, 0x9097, 0x1e08, 0x00000002);
-	nv_mthd(dev, 0x9097, 0x1e28, 0x00000002);
-	nv_mthd(dev, 0x9097, 0x1e48, 0x00000002);
-	nv_mthd(dev, 0x9097, 0x1e68, 0x00000002);
-	nv_mthd(dev, 0x9097, 0x1e88, 0x00000002);
-	nv_mthd(dev, 0x9097, 0x1ea8, 0x00000002);
-	nv_mthd(dev, 0x9097, 0x1ec8, 0x00000002);
-	nv_mthd(dev, 0x9097, 0x1ee8, 0x00000002);
-	nv_mthd(dev, 0x9097, 0x1e0c, 0x00000001);
-	nv_mthd(dev, 0x9097, 0x1e2c, 0x00000001);
-	nv_mthd(dev, 0x9097, 0x1e4c, 0x00000001);
-	nv_mthd(dev, 0x9097, 0x1e6c, 0x00000001);
-	nv_mthd(dev, 0x9097, 0x1e8c, 0x00000001);
-	nv_mthd(dev, 0x9097, 0x1eac, 0x00000001);
-	nv_mthd(dev, 0x9097, 0x1ecc, 0x00000001);
-	nv_mthd(dev, 0x9097, 0x1eec, 0x00000001);
-	nv_mthd(dev, 0x9097, 0x1e10, 0x00000001);
-	nv_mthd(dev, 0x9097, 0x1e30, 0x00000001);
-	nv_mthd(dev, 0x9097, 0x1e50, 0x00000001);
-	nv_mthd(dev, 0x9097, 0x1e70, 0x00000001);
-	nv_mthd(dev, 0x9097, 0x1e90, 0x00000001);
-	nv_mthd(dev, 0x9097, 0x1eb0, 0x00000001);
-	nv_mthd(dev, 0x9097, 0x1ed0, 0x00000001);
-	nv_mthd(dev, 0x9097, 0x1ef0, 0x00000001);
-	nv_mthd(dev, 0x9097, 0x1e14, 0x00000002);
-	nv_mthd(dev, 0x9097, 0x1e34, 0x00000002);
-	nv_mthd(dev, 0x9097, 0x1e54, 0x00000002);
-	nv_mthd(dev, 0x9097, 0x1e74, 0x00000002);
-	nv_mthd(dev, 0x9097, 0x1e94, 0x00000002);
-	nv_mthd(dev, 0x9097, 0x1eb4, 0x00000002);
-	nv_mthd(dev, 0x9097, 0x1ed4, 0x00000002);
-	nv_mthd(dev, 0x9097, 0x1ef4, 0x00000002);
-	nv_mthd(dev, 0x9097, 0x1e18, 0x00000001);
-	nv_mthd(dev, 0x9097, 0x1e38, 0x00000001);
-	nv_mthd(dev, 0x9097, 0x1e58, 0x00000001);
-	nv_mthd(dev, 0x9097, 0x1e78, 0x00000001);
-	nv_mthd(dev, 0x9097, 0x1e98, 0x00000001);
-	nv_mthd(dev, 0x9097, 0x1eb8, 0x00000001);
-	nv_mthd(dev, 0x9097, 0x1ed8, 0x00000001);
-	nv_mthd(dev, 0x9097, 0x1ef8, 0x00000001);
-	if (fermi == 0x9097) {
-		for (mthd = 0x3400; mthd <= 0x35fc; mthd += 4)
-			nv_mthd(dev, 0x9097, mthd, 0x00000000);
-	}
-	nv_mthd(dev, 0x9097, 0x030c, 0x00000001);
-	nv_mthd(dev, 0x9097, 0x1944, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x1514, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x0d68, 0x0000ffff);
-	nv_mthd(dev, 0x9097, 0x121c, 0x0fac6881);
-	nv_mthd(dev, 0x9097, 0x0fac, 0x00000001);
-	nv_mthd(dev, 0x9097, 0x1538, 0x00000001);
-	nv_mthd(dev, 0x9097, 0x0fe0, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x0fe4, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x0fe8, 0x00000014);
-	nv_mthd(dev, 0x9097, 0x0fec, 0x00000040);
-	nv_mthd(dev, 0x9097, 0x0ff0, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x179c, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x1228, 0x00000400);
-	nv_mthd(dev, 0x9097, 0x122c, 0x00000300);
-	nv_mthd(dev, 0x9097, 0x1230, 0x00010001);
-	nv_mthd(dev, 0x9097, 0x07f8, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x15b4, 0x00000001);
-	nv_mthd(dev, 0x9097, 0x15cc, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x1534, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x0fb0, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x15d0, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x153c, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x16b4, 0x00000003);
-	nv_mthd(dev, 0x9097, 0x0fbc, 0x0000ffff);
-	nv_mthd(dev, 0x9097, 0x0fc0, 0x0000ffff);
-	nv_mthd(dev, 0x9097, 0x0fc4, 0x0000ffff);
-	nv_mthd(dev, 0x9097, 0x0fc8, 0x0000ffff);
-	nv_mthd(dev, 0x9097, 0x0df8, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x0dfc, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x1948, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x1970, 0x00000001);
-	nv_mthd(dev, 0x9097, 0x161c, 0x000009f0);
-	nv_mthd(dev, 0x9097, 0x0dcc, 0x00000010);
-	nv_mthd(dev, 0x9097, 0x163c, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x15e4, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x1160, 0x25e00040);
-	nv_mthd(dev, 0x9097, 0x1164, 0x25e00040);
-	nv_mthd(dev, 0x9097, 0x1168, 0x25e00040);
-	nv_mthd(dev, 0x9097, 0x116c, 0x25e00040);
-	nv_mthd(dev, 0x9097, 0x1170, 0x25e00040);
-	nv_mthd(dev, 0x9097, 0x1174, 0x25e00040);
-	nv_mthd(dev, 0x9097, 0x1178, 0x25e00040);
-	nv_mthd(dev, 0x9097, 0x117c, 0x25e00040);
-	nv_mthd(dev, 0x9097, 0x1180, 0x25e00040);
-	nv_mthd(dev, 0x9097, 0x1184, 0x25e00040);
-	nv_mthd(dev, 0x9097, 0x1188, 0x25e00040);
-	nv_mthd(dev, 0x9097, 0x118c, 0x25e00040);
-	nv_mthd(dev, 0x9097, 0x1190, 0x25e00040);
-	nv_mthd(dev, 0x9097, 0x1194, 0x25e00040);
-	nv_mthd(dev, 0x9097, 0x1198, 0x25e00040);
-	nv_mthd(dev, 0x9097, 0x119c, 0x25e00040);
-	nv_mthd(dev, 0x9097, 0x11a0, 0x25e00040);
-	nv_mthd(dev, 0x9097, 0x11a4, 0x25e00040);
-	nv_mthd(dev, 0x9097, 0x11a8, 0x25e00040);
-	nv_mthd(dev, 0x9097, 0x11ac, 0x25e00040);
-	nv_mthd(dev, 0x9097, 0x11b0, 0x25e00040);
-	nv_mthd(dev, 0x9097, 0x11b4, 0x25e00040);
-	nv_mthd(dev, 0x9097, 0x11b8, 0x25e00040);
-	nv_mthd(dev, 0x9097, 0x11bc, 0x25e00040);
-	nv_mthd(dev, 0x9097, 0x11c0, 0x25e00040);
-	nv_mthd(dev, 0x9097, 0x11c4, 0x25e00040);
-	nv_mthd(dev, 0x9097, 0x11c8, 0x25e00040);
-	nv_mthd(dev, 0x9097, 0x11cc, 0x25e00040);
-	nv_mthd(dev, 0x9097, 0x11d0, 0x25e00040);
-	nv_mthd(dev, 0x9097, 0x11d4, 0x25e00040);
-	nv_mthd(dev, 0x9097, 0x11d8, 0x25e00040);
-	nv_mthd(dev, 0x9097, 0x11dc, 0x25e00040);
-	nv_mthd(dev, 0x9097, 0x1880, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x1884, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x1888, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x188c, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x1890, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x1894, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x1898, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x189c, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x18a0, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x18a4, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x18a8, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x18ac, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x18b0, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x18b4, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x18b8, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x18bc, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x18c0, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x18c4, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x18c8, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x18cc, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x18d0, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x18d4, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x18d8, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x18dc, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x18e0, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x18e4, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x18e8, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x18ec, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x18f0, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x18f4, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x18f8, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x18fc, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x0f84, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x0f88, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x17c8, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x17cc, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x17d0, 0x000000ff);
-	nv_mthd(dev, 0x9097, 0x17d4, 0xffffffff);
-	nv_mthd(dev, 0x9097, 0x17d8, 0x00000002);
-	nv_mthd(dev, 0x9097, 0x17dc, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x15f4, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x15f8, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x1434, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x1438, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x0d74, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x0dec, 0x00000001);
-	nv_mthd(dev, 0x9097, 0x13a4, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x1318, 0x00000001);
-	nv_mthd(dev, 0x9097, 0x1644, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x0748, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x0de8, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x1648, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x12a4, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x1120, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x1124, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x1128, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x112c, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x1118, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x164c, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x1658, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x1910, 0x00000290);
-	nv_mthd(dev, 0x9097, 0x1518, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x165c, 0x00000001);
-	nv_mthd(dev, 0x9097, 0x1520, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x1604, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x1570, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x13b0, 0x3f800000);
-	nv_mthd(dev, 0x9097, 0x13b4, 0x3f800000);
-	nv_mthd(dev, 0x9097, 0x020c, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x1670, 0x30201000);
-	nv_mthd(dev, 0x9097, 0x1674, 0x70605040);
-	nv_mthd(dev, 0x9097, 0x1678, 0xb8a89888);
-	nv_mthd(dev, 0x9097, 0x167c, 0xf8e8d8c8);
-	nv_mthd(dev, 0x9097, 0x166c, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x1680, 0x00ffff00);
-	nv_mthd(dev, 0x9097, 0x12d0, 0x00000003);
-	nv_mthd(dev, 0x9097, 0x12d4, 0x00000002);
-	nv_mthd(dev, 0x9097, 0x1684, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x1688, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x0dac, 0x00001b02);
-	nv_mthd(dev, 0x9097, 0x0db0, 0x00001b02);
-	nv_mthd(dev, 0x9097, 0x0db4, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x168c, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x15bc, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x156c, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x187c, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x1110, 0x00000001);
-	nv_mthd(dev, 0x9097, 0x0dc0, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x0dc4, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x0dc8, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x1234, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x1690, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x12ac, 0x00000001);
-	nv_mthd(dev, 0x9097, 0x02c4, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x0790, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x0794, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x0798, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x079c, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x07a0, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x077c, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x1000, 0x00000010);
-	nv_mthd(dev, 0x9097, 0x10fc, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x1290, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x0218, 0x00000010);
-	nv_mthd(dev, 0x9097, 0x12d8, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x12dc, 0x00000010);
-	nv_mthd(dev, 0x9097, 0x0d94, 0x00000001);
-	nv_mthd(dev, 0x9097, 0x155c, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x1560, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x1564, 0x00001fff);
-	nv_mthd(dev, 0x9097, 0x1574, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x1578, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x157c, 0x003fffff);
-	nv_mthd(dev, 0x9097, 0x1354, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x1664, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x1610, 0x00000012);
-	nv_mthd(dev, 0x9097, 0x1608, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x160c, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x162c, 0x00000003);
-	nv_mthd(dev, 0x9097, 0x0210, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x0320, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x0324, 0x3f800000);
-	nv_mthd(dev, 0x9097, 0x0328, 0x3f800000);
-	nv_mthd(dev, 0x9097, 0x032c, 0x3f800000);
-	nv_mthd(dev, 0x9097, 0x0330, 0x3f800000);
-	nv_mthd(dev, 0x9097, 0x0334, 0x3f800000);
-	nv_mthd(dev, 0x9097, 0x0338, 0x3f800000);
-	nv_mthd(dev, 0x9097, 0x0750, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x0760, 0x39291909);
-	nv_mthd(dev, 0x9097, 0x0764, 0x79695949);
-	nv_mthd(dev, 0x9097, 0x0768, 0xb9a99989);
-	nv_mthd(dev, 0x9097, 0x076c, 0xf9e9d9c9);
-	nv_mthd(dev, 0x9097, 0x0770, 0x30201000);
-	nv_mthd(dev, 0x9097, 0x0774, 0x70605040);
-	nv_mthd(dev, 0x9097, 0x0778, 0x00009080);
-	nv_mthd(dev, 0x9097, 0x0780, 0x39291909);
-	nv_mthd(dev, 0x9097, 0x0784, 0x79695949);
-	nv_mthd(dev, 0x9097, 0x0788, 0xb9a99989);
-	nv_mthd(dev, 0x9097, 0x078c, 0xf9e9d9c9);
-	nv_mthd(dev, 0x9097, 0x07d0, 0x30201000);
-	nv_mthd(dev, 0x9097, 0x07d4, 0x70605040);
-	nv_mthd(dev, 0x9097, 0x07d8, 0x00009080);
-	nv_mthd(dev, 0x9097, 0x037c, 0x00000001);
-	nv_mthd(dev, 0x9097, 0x0740, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x0744, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x2600, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x1918, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x191c, 0x00000900);
-	nv_mthd(dev, 0x9097, 0x1920, 0x00000405);
-	nv_mthd(dev, 0x9097, 0x1308, 0x00000001);
-	nv_mthd(dev, 0x9097, 0x1924, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x13ac, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x192c, 0x00000001);
-	nv_mthd(dev, 0x9097, 0x193c, 0x00002c1c);
-	nv_mthd(dev, 0x9097, 0x0d7c, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x0f8c, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x02c0, 0x00000001);
-	nv_mthd(dev, 0x9097, 0x1510, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x1940, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x0ff4, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x0ff8, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x194c, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x1950, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x1968, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x1590, 0x0000003f);
-	nv_mthd(dev, 0x9097, 0x07e8, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x07ec, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x07f0, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x07f4, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x196c, 0x00000011);
-	nv_mthd(dev, 0x9097, 0x197c, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x0fcc, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x0fd0, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x02d8, 0x00000040);
-	nv_mthd(dev, 0x9097, 0x1980, 0x00000080);
-	nv_mthd(dev, 0x9097, 0x1504, 0x00000080);
-	nv_mthd(dev, 0x9097, 0x1984, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x0300, 0x00000001);
-	nv_mthd(dev, 0x9097, 0x13a8, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x12ec, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x1310, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x1314, 0x00000001);
-	nv_mthd(dev, 0x9097, 0x1380, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x1384, 0x00000001);
-	nv_mthd(dev, 0x9097, 0x1388, 0x00000001);
-	nv_mthd(dev, 0x9097, 0x138c, 0x00000001);
-	nv_mthd(dev, 0x9097, 0x1390, 0x00000001);
-	nv_mthd(dev, 0x9097, 0x1394, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x139c, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x1398, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x1594, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x1598, 0x00000001);
-	nv_mthd(dev, 0x9097, 0x159c, 0x00000001);
-	nv_mthd(dev, 0x9097, 0x15a0, 0x00000001);
-	nv_mthd(dev, 0x9097, 0x15a4, 0x00000001);
-	nv_mthd(dev, 0x9097, 0x0f54, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x0f58, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x0f5c, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x19bc, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x0f9c, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x0fa0, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x12cc, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x12e8, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x130c, 0x00000001);
-	nv_mthd(dev, 0x9097, 0x1360, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x1364, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x1368, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x136c, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x1370, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x1374, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x1378, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x137c, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x133c, 0x00000001);
-	nv_mthd(dev, 0x9097, 0x1340, 0x00000001);
-	nv_mthd(dev, 0x9097, 0x1344, 0x00000002);
-	nv_mthd(dev, 0x9097, 0x1348, 0x00000001);
-	nv_mthd(dev, 0x9097, 0x134c, 0x00000001);
-	nv_mthd(dev, 0x9097, 0x1350, 0x00000002);
-	nv_mthd(dev, 0x9097, 0x1358, 0x00000001);
-	nv_mthd(dev, 0x9097, 0x12e4, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x131c, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x1320, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x1324, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x1328, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x19c0, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x1140, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x19c4, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x19c8, 0x00001500);
-	nv_mthd(dev, 0x9097, 0x135c, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x0f90, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x19e0, 0x00000001);
-	nv_mthd(dev, 0x9097, 0x19e4, 0x00000001);
-	nv_mthd(dev, 0x9097, 0x19e8, 0x00000001);
-	nv_mthd(dev, 0x9097, 0x19ec, 0x00000001);
-	nv_mthd(dev, 0x9097, 0x19f0, 0x00000001);
-	nv_mthd(dev, 0x9097, 0x19f4, 0x00000001);
-	nv_mthd(dev, 0x9097, 0x19f8, 0x00000001);
-	nv_mthd(dev, 0x9097, 0x19fc, 0x00000001);
-	nv_mthd(dev, 0x9097, 0x19cc, 0x00000001);
-	nv_mthd(dev, 0x9097, 0x15b8, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x1a00, 0x00001111);
-	nv_mthd(dev, 0x9097, 0x1a04, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x1a08, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x1a0c, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x1a10, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x1a14, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x1a18, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x1a1c, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x0d6c, 0xffff0000);
-	nv_mthd(dev, 0x9097, 0x0d70, 0xffff0000);
-	nv_mthd(dev, 0x9097, 0x10f8, 0x00001010);
-	nv_mthd(dev, 0x9097, 0x0d80, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x0d84, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x0d88, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x0d8c, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x0d90, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x0da0, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x1508, 0x80000000);
-	nv_mthd(dev, 0x9097, 0x150c, 0x40000000);
-	nv_mthd(dev, 0x9097, 0x1668, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x0318, 0x00000008);
-	nv_mthd(dev, 0x9097, 0x031c, 0x00000008);
-	nv_mthd(dev, 0x9097, 0x0d9c, 0x00000001);
-	nv_mthd(dev, 0x9097, 0x07dc, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x074c, 0x00000055);
-	nv_mthd(dev, 0x9097, 0x1420, 0x00000003);
-	nv_mthd(dev, 0x9097, 0x17bc, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x17c0, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x17c4, 0x00000001);
-	nv_mthd(dev, 0x9097, 0x1008, 0x00000008);
-	nv_mthd(dev, 0x9097, 0x100c, 0x00000040);
-	nv_mthd(dev, 0x9097, 0x1010, 0x0000012c);
-	nv_mthd(dev, 0x9097, 0x0d60, 0x00000040);
-	nv_mthd(dev, 0x9097, 0x075c, 0x00000003);
-	nv_mthd(dev, 0x9097, 0x1018, 0x00000020);
-	nv_mthd(dev, 0x9097, 0x101c, 0x00000001);
-	nv_mthd(dev, 0x9097, 0x1020, 0x00000020);
-	nv_mthd(dev, 0x9097, 0x1024, 0x00000001);
-	nv_mthd(dev, 0x9097, 0x1444, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x1448, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x144c, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x0360, 0x20164010);
-	nv_mthd(dev, 0x9097, 0x0364, 0x00000020);
-	nv_mthd(dev, 0x9097, 0x0368, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x0de4, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x0204, 0x00000006);
-	nv_mthd(dev, 0x9097, 0x0208, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x02cc, 0x003fffff);
-	nv_mthd(dev, 0x9097, 0x02d0, 0x00000c48);
-	nv_mthd(dev, 0x9097, 0x1220, 0x00000005);
-	nv_mthd(dev, 0x9097, 0x0fdc, 0x00000000);
-	nv_mthd(dev, 0x9097, 0x0f98, 0x00300008);
-	nv_mthd(dev, 0x9097, 0x1284, 0x04000080);
-	nv_mthd(dev, 0x9097, 0x1450, 0x00300008);
-	nv_mthd(dev, 0x9097, 0x1454, 0x04000080);
-	nv_mthd(dev, 0x9097, 0x0214, 0x00000000);
-	/* in trace, right after 0x90c0, not here */
-	nv_mthd(dev, 0x9097, 0x3410, 0x80002006);
-}
-
-static void
-nvc0_grctx_generate_9197(struct drm_device *dev)
-{
-	u32 fermi = nvc0_graph_class(dev);
-	u32 mthd;
-
-	if (fermi == 0x9197) {
-		for (mthd = 0x3400; mthd <= 0x35fc; mthd += 4)
-			nv_mthd(dev, 0x9197, mthd, 0x00000000);
-	}
-	nv_mthd(dev, 0x9197, 0x02e4, 0x0000b001);
-}
-
-static void
-nvc0_grctx_generate_9297(struct drm_device *dev)
-{
-	u32 fermi = nvc0_graph_class(dev);
-	u32 mthd;
-
-	if (fermi == 0x9297) {
-		for (mthd = 0x3400; mthd <= 0x35fc; mthd += 4)
-			nv_mthd(dev, 0x9297, mthd, 0x00000000);
-	}
-	nv_mthd(dev, 0x9297, 0x036c, 0x00000000);
-	nv_mthd(dev, 0x9297, 0x0370, 0x00000000);
-	nv_mthd(dev, 0x9297, 0x07a4, 0x00000000);
-	nv_mthd(dev, 0x9297, 0x07a8, 0x00000000);
-	nv_mthd(dev, 0x9297, 0x0374, 0x00000000);
-	nv_mthd(dev, 0x9297, 0x0378, 0x00000020);
-}
-
-static void
-nvc0_grctx_generate_902d(struct drm_device *dev)
-{
-	nv_mthd(dev, 0x902d, 0x0200, 0x000000cf);
-	nv_mthd(dev, 0x902d, 0x0204, 0x00000001);
-	nv_mthd(dev, 0x902d, 0x0208, 0x00000020);
-	nv_mthd(dev, 0x902d, 0x020c, 0x00000001);
-	nv_mthd(dev, 0x902d, 0x0210, 0x00000000);
-	nv_mthd(dev, 0x902d, 0x0214, 0x00000080);
-	nv_mthd(dev, 0x902d, 0x0218, 0x00000100);
-	nv_mthd(dev, 0x902d, 0x021c, 0x00000100);
-	nv_mthd(dev, 0x902d, 0x0220, 0x00000000);
-	nv_mthd(dev, 0x902d, 0x0224, 0x00000000);
-	nv_mthd(dev, 0x902d, 0x0230, 0x000000cf);
-	nv_mthd(dev, 0x902d, 0x0234, 0x00000001);
-	nv_mthd(dev, 0x902d, 0x0238, 0x00000020);
-	nv_mthd(dev, 0x902d, 0x023c, 0x00000001);
-	nv_mthd(dev, 0x902d, 0x0244, 0x00000080);
-	nv_mthd(dev, 0x902d, 0x0248, 0x00000100);
-	nv_mthd(dev, 0x902d, 0x024c, 0x00000100);
-}
-
-static void
-nvc0_grctx_generate_9039(struct drm_device *dev)
-{
-	nv_mthd(dev, 0x9039, 0x030c, 0x00000000);
-	nv_mthd(dev, 0x9039, 0x0310, 0x00000000);
-	nv_mthd(dev, 0x9039, 0x0314, 0x00000000);
-	nv_mthd(dev, 0x9039, 0x0320, 0x00000000);
-	nv_mthd(dev, 0x9039, 0x0238, 0x00000000);
-	nv_mthd(dev, 0x9039, 0x023c, 0x00000000);
-	nv_mthd(dev, 0x9039, 0x0318, 0x00000000);
-	nv_mthd(dev, 0x9039, 0x031c, 0x00000000);
-}
-
-static void
-nvc0_grctx_generate_90c0(struct drm_device *dev)
-{
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	int i;
-
-	for (i = 0; dev_priv->chipset == 0xd9 && i < 4; i++) {
-		nv_mthd(dev, 0x90c0, 0x2700 + (i * 0x40), 0x00000000);
-		nv_mthd(dev, 0x90c0, 0x2720 + (i * 0x40), 0x00000000);
-		nv_mthd(dev, 0x90c0, 0x2704 + (i * 0x40), 0x00000000);
-		nv_mthd(dev, 0x90c0, 0x2724 + (i * 0x40), 0x00000000);
-		nv_mthd(dev, 0x90c0, 0x2708 + (i * 0x40), 0x00000000);
-		nv_mthd(dev, 0x90c0, 0x2728 + (i * 0x40), 0x00000000);
-	}
-	nv_mthd(dev, 0x90c0, 0x270c, 0x00000000);
-	nv_mthd(dev, 0x90c0, 0x272c, 0x00000000);
-	nv_mthd(dev, 0x90c0, 0x274c, 0x00000000);
-	nv_mthd(dev, 0x90c0, 0x276c, 0x00000000);
-	nv_mthd(dev, 0x90c0, 0x278c, 0x00000000);
-	nv_mthd(dev, 0x90c0, 0x27ac, 0x00000000);
-	nv_mthd(dev, 0x90c0, 0x27cc, 0x00000000);
-	nv_mthd(dev, 0x90c0, 0x27ec, 0x00000000);
-	for (i = 0; dev_priv->chipset == 0xd9 && i < 4; i++) {
-		nv_mthd(dev, 0x90c0, 0x2710 + (i * 0x40), 0x00014000);
-		nv_mthd(dev, 0x90c0, 0x2730 + (i * 0x40), 0x00014000);
-		nv_mthd(dev, 0x90c0, 0x2714 + (i * 0x40), 0x00000040);
-		nv_mthd(dev, 0x90c0, 0x2734 + (i * 0x40), 0x00000040);
-	}
-	nv_mthd(dev, 0x90c0, 0x030c, 0x00000001);
-	nv_mthd(dev, 0x90c0, 0x1944, 0x00000000);
-	nv_mthd(dev, 0x90c0, 0x0758, 0x00000100);
-	nv_mthd(dev, 0x90c0, 0x02c4, 0x00000000);
-	nv_mthd(dev, 0x90c0, 0x0790, 0x00000000);
-	nv_mthd(dev, 0x90c0, 0x0794, 0x00000000);
-	nv_mthd(dev, 0x90c0, 0x0798, 0x00000000);
-	nv_mthd(dev, 0x90c0, 0x079c, 0x00000000);
-	nv_mthd(dev, 0x90c0, 0x07a0, 0x00000000);
-	nv_mthd(dev, 0x90c0, 0x077c, 0x00000000);
-	nv_mthd(dev, 0x90c0, 0x0204, 0x00000000);
-	nv_mthd(dev, 0x90c0, 0x0208, 0x00000000);
-	nv_mthd(dev, 0x90c0, 0x020c, 0x00000000);
-	nv_mthd(dev, 0x90c0, 0x0214, 0x00000000);
-	nv_mthd(dev, 0x90c0, 0x024c, 0x00000000);
-	nv_mthd(dev, 0x90c0, 0x0d94, 0x00000001);
-	nv_mthd(dev, 0x90c0, 0x1608, 0x00000000);
-	nv_mthd(dev, 0x90c0, 0x160c, 0x00000000);
-	nv_mthd(dev, 0x90c0, 0x1664, 0x00000000);
-}
-
-static void
-nvc0_grctx_generate_dispatch(struct drm_device *dev)
-{
-	int i;
-
-	nv_wr32(dev, 0x404004, 0x00000000);
-	nv_wr32(dev, 0x404008, 0x00000000);
-	nv_wr32(dev, 0x40400c, 0x00000000);
-	nv_wr32(dev, 0x404010, 0x00000000);
-	nv_wr32(dev, 0x404014, 0x00000000);
-	nv_wr32(dev, 0x404018, 0x00000000);
-	nv_wr32(dev, 0x40401c, 0x00000000);
-	nv_wr32(dev, 0x404020, 0x00000000);
-	nv_wr32(dev, 0x404024, 0x00000000);
-	nv_wr32(dev, 0x404028, 0x00000000);
-	nv_wr32(dev, 0x40402c, 0x00000000);
-	nv_wr32(dev, 0x404044, 0x00000000);
-	nv_wr32(dev, 0x404094, 0x00000000);
-	nv_wr32(dev, 0x404098, 0x00000000);
-	nv_wr32(dev, 0x40409c, 0x00000000);
-	nv_wr32(dev, 0x4040a0, 0x00000000);
-	nv_wr32(dev, 0x4040a4, 0x00000000);
-	nv_wr32(dev, 0x4040a8, 0x00000000);
-	nv_wr32(dev, 0x4040ac, 0x00000000);
-	nv_wr32(dev, 0x4040b0, 0x00000000);
-	nv_wr32(dev, 0x4040b4, 0x00000000);
-	nv_wr32(dev, 0x4040b8, 0x00000000);
-	nv_wr32(dev, 0x4040bc, 0x00000000);
-	nv_wr32(dev, 0x4040c0, 0x00000000);
-	nv_wr32(dev, 0x4040c4, 0x00000000);
-	nv_wr32(dev, 0x4040c8, 0xf0000087);
-	nv_wr32(dev, 0x4040d4, 0x00000000);
-	nv_wr32(dev, 0x4040d8, 0x00000000);
-	nv_wr32(dev, 0x4040dc, 0x00000000);
-	nv_wr32(dev, 0x4040e0, 0x00000000);
-	nv_wr32(dev, 0x4040e4, 0x00000000);
-	nv_wr32(dev, 0x4040e8, 0x00001000);
-	nv_wr32(dev, 0x4040f8, 0x00000000);
-	nv_wr32(dev, 0x404130, 0x00000000);
-	nv_wr32(dev, 0x404134, 0x00000000);
-	nv_wr32(dev, 0x404138, 0x20000040);
-	nv_wr32(dev, 0x404150, 0x0000002e);
-	nv_wr32(dev, 0x404154, 0x00000400);
-	nv_wr32(dev, 0x404158, 0x00000200);
-	nv_wr32(dev, 0x404164, 0x00000055);
-	nv_wr32(dev, 0x404168, 0x00000000);
-	nv_wr32(dev, 0x404174, 0x00000000);
-	nv_wr32(dev, 0x404178, 0x00000000);
-	nv_wr32(dev, 0x40417c, 0x00000000);
-	for (i = 0; i < 8; i++)
-		nv_wr32(dev, 0x404200 + (i * 4), 0x00000000); /* subc */
-}
-
-static void
-nvc0_grctx_generate_macro(struct drm_device *dev)
-{
-	nv_wr32(dev, 0x404404, 0x00000000);
-	nv_wr32(dev, 0x404408, 0x00000000);
-	nv_wr32(dev, 0x40440c, 0x00000000);
-	nv_wr32(dev, 0x404410, 0x00000000);
-	nv_wr32(dev, 0x404414, 0x00000000);
-	nv_wr32(dev, 0x404418, 0x00000000);
-	nv_wr32(dev, 0x40441c, 0x00000000);
-	nv_wr32(dev, 0x404420, 0x00000000);
-	nv_wr32(dev, 0x404424, 0x00000000);
-	nv_wr32(dev, 0x404428, 0x00000000);
-	nv_wr32(dev, 0x40442c, 0x00000000);
-	nv_wr32(dev, 0x404430, 0x00000000);
-	nv_wr32(dev, 0x404434, 0x00000000);
-	nv_wr32(dev, 0x404438, 0x00000000);
-	nv_wr32(dev, 0x404460, 0x00000000);
-	nv_wr32(dev, 0x404464, 0x00000000);
-	nv_wr32(dev, 0x404468, 0x00ffffff);
-	nv_wr32(dev, 0x40446c, 0x00000000);
-	nv_wr32(dev, 0x404480, 0x00000001);
-	nv_wr32(dev, 0x404498, 0x00000001);
-}
-
-static void
-nvc0_grctx_generate_m2mf(struct drm_device *dev)
-{
-	nv_wr32(dev, 0x404604, 0x00000015);
-	nv_wr32(dev, 0x404608, 0x00000000);
-	nv_wr32(dev, 0x40460c, 0x00002e00);
-	nv_wr32(dev, 0x404610, 0x00000100);
-	nv_wr32(dev, 0x404618, 0x00000000);
-	nv_wr32(dev, 0x40461c, 0x00000000);
-	nv_wr32(dev, 0x404620, 0x00000000);
-	nv_wr32(dev, 0x404624, 0x00000000);
-	nv_wr32(dev, 0x404628, 0x00000000);
-	nv_wr32(dev, 0x40462c, 0x00000000);
-	nv_wr32(dev, 0x404630, 0x00000000);
-	nv_wr32(dev, 0x404634, 0x00000000);
-	nv_wr32(dev, 0x404638, 0x00000004);
-	nv_wr32(dev, 0x40463c, 0x00000000);
-	nv_wr32(dev, 0x404640, 0x00000000);
-	nv_wr32(dev, 0x404644, 0x00000000);
-	nv_wr32(dev, 0x404648, 0x00000000);
-	nv_wr32(dev, 0x40464c, 0x00000000);
-	nv_wr32(dev, 0x404650, 0x00000000);
-	nv_wr32(dev, 0x404654, 0x00000000);
-	nv_wr32(dev, 0x404658, 0x00000000);
-	nv_wr32(dev, 0x40465c, 0x007f0100);
-	nv_wr32(dev, 0x404660, 0x00000000);
-	nv_wr32(dev, 0x404664, 0x00000000);
-	nv_wr32(dev, 0x404668, 0x00000000);
-	nv_wr32(dev, 0x40466c, 0x00000000);
-	nv_wr32(dev, 0x404670, 0x00000000);
-	nv_wr32(dev, 0x404674, 0x00000000);
-	nv_wr32(dev, 0x404678, 0x00000000);
-	nv_wr32(dev, 0x40467c, 0x00000002);
-	nv_wr32(dev, 0x404680, 0x00000000);
-	nv_wr32(dev, 0x404684, 0x00000000);
-	nv_wr32(dev, 0x404688, 0x00000000);
-	nv_wr32(dev, 0x40468c, 0x00000000);
-	nv_wr32(dev, 0x404690, 0x00000000);
-	nv_wr32(dev, 0x404694, 0x00000000);
-	nv_wr32(dev, 0x404698, 0x00000000);
-	nv_wr32(dev, 0x40469c, 0x00000000);
-	nv_wr32(dev, 0x4046a0, 0x007f0080);
-	nv_wr32(dev, 0x4046a4, 0x00000000);
-	nv_wr32(dev, 0x4046a8, 0x00000000);
-	nv_wr32(dev, 0x4046ac, 0x00000000);
-	nv_wr32(dev, 0x4046b0, 0x00000000);
-	nv_wr32(dev, 0x4046b4, 0x00000000);
-	nv_wr32(dev, 0x4046b8, 0x00000000);
-	nv_wr32(dev, 0x4046bc, 0x00000000);
-	nv_wr32(dev, 0x4046c0, 0x00000000);
-	nv_wr32(dev, 0x4046c4, 0x00000000);
-	nv_wr32(dev, 0x4046c8, 0x00000000);
-	nv_wr32(dev, 0x4046cc, 0x00000000);
-	nv_wr32(dev, 0x4046d0, 0x00000000);
-	nv_wr32(dev, 0x4046d4, 0x00000000);
-	nv_wr32(dev, 0x4046d8, 0x00000000);
-	nv_wr32(dev, 0x4046dc, 0x00000000);
-	nv_wr32(dev, 0x4046e0, 0x00000000);
-	nv_wr32(dev, 0x4046e4, 0x00000000);
-	nv_wr32(dev, 0x4046e8, 0x00000000);
-	nv_wr32(dev, 0x4046f0, 0x00000000);
-	nv_wr32(dev, 0x4046f4, 0x00000000);
-}
-
-static void
-nvc0_grctx_generate_unk47xx(struct drm_device *dev)
-{
-	nv_wr32(dev, 0x404700, 0x00000000);
-	nv_wr32(dev, 0x404704, 0x00000000);
-	nv_wr32(dev, 0x404708, 0x00000000);
-	nv_wr32(dev, 0x40470c, 0x00000000);
-	nv_wr32(dev, 0x404710, 0x00000000);
-	nv_wr32(dev, 0x404714, 0x00000000);
-	nv_wr32(dev, 0x404718, 0x00000000);
-	nv_wr32(dev, 0x40471c, 0x00000000);
-	nv_wr32(dev, 0x404720, 0x00000000);
-	nv_wr32(dev, 0x404724, 0x00000000);
-	nv_wr32(dev, 0x404728, 0x00000000);
-	nv_wr32(dev, 0x40472c, 0x00000000);
-	nv_wr32(dev, 0x404730, 0x00000000);
-	nv_wr32(dev, 0x404734, 0x00000100);
-	nv_wr32(dev, 0x404738, 0x00000000);
-	nv_wr32(dev, 0x40473c, 0x00000000);
-	nv_wr32(dev, 0x404740, 0x00000000);
-	nv_wr32(dev, 0x404744, 0x00000000);
-	nv_wr32(dev, 0x404748, 0x00000000);
-	nv_wr32(dev, 0x40474c, 0x00000000);
-	nv_wr32(dev, 0x404750, 0x00000000);
-	nv_wr32(dev, 0x404754, 0x00000000);
-}
-
-static void
-nvc0_grctx_generate_shaders(struct drm_device *dev)
-{
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-
-	if (dev_priv->chipset == 0xd9) {
-		nv_wr32(dev, 0x405800, 0x0f8000bf);
-		nv_wr32(dev, 0x405830, 0x02180218);
-		nv_wr32(dev, 0x405834, 0x08000000);
-	} else
-	if (dev_priv->chipset == 0xc1) {
-		nv_wr32(dev, 0x405800, 0x0f8000bf);
-		nv_wr32(dev, 0x405830, 0x02180218);
-		nv_wr32(dev, 0x405834, 0x00000000);
-	} else {
-		nv_wr32(dev, 0x405800, 0x078000bf);
-		nv_wr32(dev, 0x405830, 0x02180000);
-		nv_wr32(dev, 0x405834, 0x00000000);
-	}
-	nv_wr32(dev, 0x405838, 0x00000000);
-	nv_wr32(dev, 0x405854, 0x00000000);
-	nv_wr32(dev, 0x405870, 0x00000001);
-	nv_wr32(dev, 0x405874, 0x00000001);
-	nv_wr32(dev, 0x405878, 0x00000001);
-	nv_wr32(dev, 0x40587c, 0x00000001);
-	nv_wr32(dev, 0x405a00, 0x00000000);
-	nv_wr32(dev, 0x405a04, 0x00000000);
-	nv_wr32(dev, 0x405a18, 0x00000000);
-}
-
-static void
-nvc0_grctx_generate_unk60xx(struct drm_device *dev)
-{
-	nv_wr32(dev, 0x406020, 0x000103c1);
-	nv_wr32(dev, 0x406028, 0x00000001);
-	nv_wr32(dev, 0x40602c, 0x00000001);
-	nv_wr32(dev, 0x406030, 0x00000001);
-	nv_wr32(dev, 0x406034, 0x00000001);
-}
-
-static void
-nvc0_grctx_generate_unk64xx(struct drm_device *dev)
-{
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-
-	nv_wr32(dev, 0x4064a8, 0x00000000);
-	nv_wr32(dev, 0x4064ac, 0x00003fff);
-	nv_wr32(dev, 0x4064b4, 0x00000000);
-	nv_wr32(dev, 0x4064b8, 0x00000000);
-	if (dev_priv->chipset == 0xd9)
-		nv_wr32(dev, 0x4064bc, 0x00000000);
-	if (dev_priv->chipset == 0xc1 ||
-	    dev_priv->chipset == 0xd9) {
-		nv_wr32(dev, 0x4064c0, 0x80140078);
-		nv_wr32(dev, 0x4064c4, 0x0086ffff);
-	}
-}
-
-static void
-nvc0_grctx_generate_tpbus(struct drm_device *dev)
-{
-	nv_wr32(dev, 0x407804, 0x00000023);
-	nv_wr32(dev, 0x40780c, 0x0a418820);
-	nv_wr32(dev, 0x407810, 0x062080e6);
-	nv_wr32(dev, 0x407814, 0x020398a4);
-	nv_wr32(dev, 0x407818, 0x0e629062);
-	nv_wr32(dev, 0x40781c, 0x0a418820);
-	nv_wr32(dev, 0x407820, 0x000000e6);
-	nv_wr32(dev, 0x4078bc, 0x00000103);
-}
-
-static void
-nvc0_grctx_generate_ccache(struct drm_device *dev)
-{
-	nv_wr32(dev, 0x408000, 0x00000000);
-	nv_wr32(dev, 0x408004, 0x00000000);
-	nv_wr32(dev, 0x408008, 0x00000018);
-	nv_wr32(dev, 0x40800c, 0x00000000);
-	nv_wr32(dev, 0x408010, 0x00000000);
-	nv_wr32(dev, 0x408014, 0x00000069);
-	nv_wr32(dev, 0x408018, 0xe100e100);
-	nv_wr32(dev, 0x408064, 0x00000000);
-}
-
-static void
-nvc0_grctx_generate_rop(struct drm_device *dev)
-{
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	int chipset = dev_priv->chipset;
-
-	/* ROPC_BROADCAST */
-	nv_wr32(dev, 0x408800, 0x02802a3c);
-	nv_wr32(dev, 0x408804, 0x00000040);
-	if (chipset == 0xd9) {
-		nv_wr32(dev, 0x408808, 0x1043e005);
-		nv_wr32(dev, 0x408900, 0x3080b801);
-		nv_wr32(dev, 0x408904, 0x1043e005);
-		nv_wr32(dev, 0x408908, 0x00c8102f);
-	} else
-	if (chipset == 0xc1) {
-		nv_wr32(dev, 0x408808, 0x1003e005);
-		nv_wr32(dev, 0x408900, 0x3080b801);
-		nv_wr32(dev, 0x408904, 0x62000001);
-		nv_wr32(dev, 0x408908, 0x00c80929);
-	} else {
-		nv_wr32(dev, 0x408808, 0x0003e00d);
-		nv_wr32(dev, 0x408900, 0x3080b801);
-		nv_wr32(dev, 0x408904, 0x02000001);
-		nv_wr32(dev, 0x408908, 0x00c80929);
-	}
-	nv_wr32(dev, 0x40890c, 0x00000000);
-	nv_wr32(dev, 0x408980, 0x0000011d);
-}
-
-static void
-nvc0_grctx_generate_gpc(struct drm_device *dev)
-{
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	int chipset = dev_priv->chipset;
-	int i;
-
-	/* GPC_BROADCAST */
-	nv_wr32(dev, 0x418380, 0x00000016);
-	nv_wr32(dev, 0x418400, 0x38004e00);
-	nv_wr32(dev, 0x418404, 0x71e0ffff);
-	nv_wr32(dev, 0x418408, 0x00000000);
-	nv_wr32(dev, 0x41840c, 0x00001008);
-	nv_wr32(dev, 0x418410, 0x0fff0fff);
-	nv_wr32(dev, 0x418414, chipset != 0xd9 ? 0x00200fff : 0x02200fff);
-	nv_wr32(dev, 0x418450, 0x00000000);
-	nv_wr32(dev, 0x418454, 0x00000000);
-	nv_wr32(dev, 0x418458, 0x00000000);
-	nv_wr32(dev, 0x41845c, 0x00000000);
-	nv_wr32(dev, 0x418460, 0x00000000);
-	nv_wr32(dev, 0x418464, 0x00000000);
-	nv_wr32(dev, 0x418468, 0x00000001);
-	nv_wr32(dev, 0x41846c, 0x00000000);
-	nv_wr32(dev, 0x418470, 0x00000000);
-	nv_wr32(dev, 0x418600, 0x0000001f);
-	nv_wr32(dev, 0x418684, 0x0000000f);
-	nv_wr32(dev, 0x418700, 0x00000002);
-	nv_wr32(dev, 0x418704, 0x00000080);
-	nv_wr32(dev, 0x418708, 0x00000000);
-	nv_wr32(dev, 0x41870c, chipset != 0xd9 ? 0x07c80000 : 0x00000000);
-	nv_wr32(dev, 0x418710, 0x00000000);
-	nv_wr32(dev, 0x418800, chipset != 0xd9 ? 0x0006860a : 0x7006860a);
-	nv_wr32(dev, 0x418808, 0x00000000);
-	nv_wr32(dev, 0x41880c, 0x00000000);
-	nv_wr32(dev, 0x418810, 0x00000000);
-	nv_wr32(dev, 0x418828, 0x00008442);
-	if (chipset == 0xc1 || chipset == 0xd9)
-		nv_wr32(dev, 0x418830, 0x10000001);
-	else
-		nv_wr32(dev, 0x418830, 0x00000001);
-	nv_wr32(dev, 0x4188d8, 0x00000008);
-	nv_wr32(dev, 0x4188e0, 0x01000000);
-	nv_wr32(dev, 0x4188e8, 0x00000000);
-	nv_wr32(dev, 0x4188ec, 0x00000000);
-	nv_wr32(dev, 0x4188f0, 0x00000000);
-	nv_wr32(dev, 0x4188f4, 0x00000000);
-	nv_wr32(dev, 0x4188f8, 0x00000000);
-	if (chipset == 0xd9)
-		nv_wr32(dev, 0x4188fc, 0x20100008);
-	else if (chipset == 0xc1)
-		nv_wr32(dev, 0x4188fc, 0x00100018);
-	else
-		nv_wr32(dev, 0x4188fc, 0x00100000);
-	nv_wr32(dev, 0x41891c, 0x00ff00ff);
-	nv_wr32(dev, 0x418924, 0x00000000);
-	nv_wr32(dev, 0x418928, 0x00ffff00);
-	nv_wr32(dev, 0x41892c, 0x0000ff00);
-	for (i = 0; i < 8; i++) {
-		nv_wr32(dev, 0x418a00 + (i * 0x20), 0x00000000);
-		nv_wr32(dev, 0x418a04 + (i * 0x20), 0x00000000);
-		nv_wr32(dev, 0x418a08 + (i * 0x20), 0x00000000);
-		nv_wr32(dev, 0x418a0c + (i * 0x20), 0x00010000);
-		nv_wr32(dev, 0x418a10 + (i * 0x20), 0x00000000);
-		nv_wr32(dev, 0x418a14 + (i * 0x20), 0x00000000);
-		nv_wr32(dev, 0x418a18 + (i * 0x20), 0x00000000);
-	}
-	nv_wr32(dev, 0x418b00, chipset != 0xd9 ? 0x00000000 : 0x00000006);
-	nv_wr32(dev, 0x418b08, 0x0a418820);
-	nv_wr32(dev, 0x418b0c, 0x062080e6);
-	nv_wr32(dev, 0x418b10, 0x020398a4);
-	nv_wr32(dev, 0x418b14, 0x0e629062);
-	nv_wr32(dev, 0x418b18, 0x0a418820);
-	nv_wr32(dev, 0x418b1c, 0x000000e6);
-	nv_wr32(dev, 0x418bb8, 0x00000103);
-	nv_wr32(dev, 0x418c08, 0x00000001);
-	nv_wr32(dev, 0x418c10, 0x00000000);
-	nv_wr32(dev, 0x418c14, 0x00000000);
-	nv_wr32(dev, 0x418c18, 0x00000000);
-	nv_wr32(dev, 0x418c1c, 0x00000000);
-	nv_wr32(dev, 0x418c20, 0x00000000);
-	nv_wr32(dev, 0x418c24, 0x00000000);
-	nv_wr32(dev, 0x418c28, 0x00000000);
-	nv_wr32(dev, 0x418c2c, 0x00000000);
-	if (chipset == 0xc1 || chipset == 0xd9)
-		nv_wr32(dev, 0x418c6c, 0x00000001);
-	nv_wr32(dev, 0x418c80, 0x20200004);
-	nv_wr32(dev, 0x418c8c, 0x00000001);
-	nv_wr32(dev, 0x419000, 0x00000780);
-	nv_wr32(dev, 0x419004, 0x00000000);
-	nv_wr32(dev, 0x419008, 0x00000000);
-	nv_wr32(dev, 0x419014, 0x00000004);
-}
-
-static void
-nvc0_grctx_generate_tp(struct drm_device *dev)
-{
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	int chipset = dev_priv->chipset;
-
-	/* GPC_BROADCAST.TP_BROADCAST */
-	nv_wr32(dev, 0x419818, 0x00000000);
-	nv_wr32(dev, 0x41983c, 0x00038bc7);
-	nv_wr32(dev, 0x419848, 0x00000000);
-	if (chipset == 0xc1 || chipset == 0xd9)
-		nv_wr32(dev, 0x419864, 0x00000129);
-	else
-		nv_wr32(dev, 0x419864, 0x0000012a);
-	nv_wr32(dev, 0x419888, 0x00000000);
-	nv_wr32(dev, 0x419a00, 0x000001f0);
-	nv_wr32(dev, 0x419a04, 0x00000001);
-	nv_wr32(dev, 0x419a08, 0x00000023);
-	nv_wr32(dev, 0x419a0c, 0x00020000);
-	nv_wr32(dev, 0x419a10, 0x00000000);
-	nv_wr32(dev, 0x419a14, 0x00000200);
-	nv_wr32(dev, 0x419a1c, 0x00000000);
-	nv_wr32(dev, 0x419a20, 0x00000800);
-	if (chipset == 0xd9)
-		nv_wr32(dev, 0x00419ac4, 0x0017f440);
-	else if (chipset != 0xc0 && chipset != 0xc8)
-		nv_wr32(dev, 0x00419ac4, 0x0007f440);
-	nv_wr32(dev, 0x419b00, 0x0a418820);
-	nv_wr32(dev, 0x419b04, 0x062080e6);
-	nv_wr32(dev, 0x419b08, 0x020398a4);
-	nv_wr32(dev, 0x419b0c, 0x0e629062);
-	nv_wr32(dev, 0x419b10, 0x0a418820);
-	nv_wr32(dev, 0x419b14, 0x000000e6);
-	nv_wr32(dev, 0x419bd0, 0x00900103);
-	if (chipset == 0xc1 || chipset == 0xd9)
-		nv_wr32(dev, 0x419be0, 0x00400001);
-	else
-		nv_wr32(dev, 0x419be0, 0x00000001);
-	nv_wr32(dev, 0x419be4, 0x00000000);
-	nv_wr32(dev, 0x419c00, chipset != 0xd9 ? 0x00000002 : 0x0000000a);
-	nv_wr32(dev, 0x419c04, 0x00000006);
-	nv_wr32(dev, 0x419c08, 0x00000002);
-	nv_wr32(dev, 0x419c20, 0x00000000);
-	if (dev_priv->chipset == 0xd9) {
-		nv_wr32(dev, 0x419c24, 0x00084210);
-		nv_wr32(dev, 0x419c28, 0x3cf3cf3c);
-		nv_wr32(dev, 0x419cb0, 0x00020048);
-	} else
-	if (chipset == 0xce || chipset == 0xcf) {
-		nv_wr32(dev, 0x419cb0, 0x00020048);
-	} else {
-		nv_wr32(dev, 0x419cb0, 0x00060048);
-	}
-	nv_wr32(dev, 0x419ce8, 0x00000000);
-	nv_wr32(dev, 0x419cf4, 0x00000183);
-	if (chipset == 0xc1 || chipset == 0xd9)
-		nv_wr32(dev, 0x419d20, 0x12180000);
-	else
-		nv_wr32(dev, 0x419d20, 0x02180000);
-	nv_wr32(dev, 0x419d24, 0x00001fff);
-	if (chipset == 0xc1 || chipset == 0xd9)
-		nv_wr32(dev, 0x419d44, 0x02180218);
-	nv_wr32(dev, 0x419e04, 0x00000000);
-	nv_wr32(dev, 0x419e08, 0x00000000);
-	nv_wr32(dev, 0x419e0c, 0x00000000);
-	nv_wr32(dev, 0x419e10, 0x00000002);
-	nv_wr32(dev, 0x419e44, 0x001beff2);
-	nv_wr32(dev, 0x419e48, 0x00000000);
-	nv_wr32(dev, 0x419e4c, 0x0000000f);
-	nv_wr32(dev, 0x419e50, 0x00000000);
-	nv_wr32(dev, 0x419e54, 0x00000000);
-	nv_wr32(dev, 0x419e58, 0x00000000);
-	nv_wr32(dev, 0x419e5c, 0x00000000);
-	nv_wr32(dev, 0x419e60, 0x00000000);
-	nv_wr32(dev, 0x419e64, 0x00000000);
-	nv_wr32(dev, 0x419e68, 0x00000000);
-	nv_wr32(dev, 0x419e6c, 0x00000000);
-	nv_wr32(dev, 0x419e70, 0x00000000);
-	nv_wr32(dev, 0x419e74, 0x00000000);
-	nv_wr32(dev, 0x419e78, 0x00000000);
-	nv_wr32(dev, 0x419e7c, 0x00000000);
-	nv_wr32(dev, 0x419e80, 0x00000000);
-	nv_wr32(dev, 0x419e84, 0x00000000);
-	nv_wr32(dev, 0x419e88, 0x00000000);
-	nv_wr32(dev, 0x419e8c, 0x00000000);
-	nv_wr32(dev, 0x419e90, 0x00000000);
-	nv_wr32(dev, 0x419e98, 0x00000000);
-	if (chipset != 0xc0 && chipset != 0xc8)
-		nv_wr32(dev, 0x419ee0, 0x00011110);
-	nv_wr32(dev, 0x419f50, 0x00000000);
-	nv_wr32(dev, 0x419f54, 0x00000000);
-	if (chipset != 0xc0 && chipset != 0xc8)
-		nv_wr32(dev, 0x419f58, 0x00000000);
-}
-
-int
-nvc0_grctx_generate(struct nouveau_channel *chan)
-{
-	struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
-	struct nvc0_graph_priv *priv = nv_engine(chan->dev, NVOBJ_ENGINE_GR);
-	struct nvc0_graph_chan *grch = chan->engctx[NVOBJ_ENGINE_GR];
-	struct drm_device *dev = chan->dev;
-	int i, gpc, tp, id;
-	u32 fermi = nvc0_graph_class(dev);
-	u32 r000260, tmp;
-
-	r000260 = nv_rd32(dev, 0x000260);
-	nv_wr32(dev, 0x000260, r000260 & ~1);
-	nv_wr32(dev, 0x400208, 0x00000000);
-
-	nvc0_grctx_generate_dispatch(dev);
-	nvc0_grctx_generate_macro(dev);
-	nvc0_grctx_generate_m2mf(dev);
-	nvc0_grctx_generate_unk47xx(dev);
-	nvc0_grctx_generate_shaders(dev);
-	nvc0_grctx_generate_unk60xx(dev);
-	nvc0_grctx_generate_unk64xx(dev);
-	nvc0_grctx_generate_tpbus(dev);
-	nvc0_grctx_generate_ccache(dev);
-	nvc0_grctx_generate_rop(dev);
-	nvc0_grctx_generate_gpc(dev);
-	nvc0_grctx_generate_tp(dev);
-
-	nv_wr32(dev, 0x404154, 0x00000000);
-
-	/* fuc "mmio list" writes */
-	for (i = 0; i < grch->mmio_nr * 8; i += 8) {
-		u32 reg = nv_ro32(grch->mmio, i + 0);
-		nv_wr32(dev, reg, nv_ro32(grch->mmio, i + 4));
-	}
-
-	for (tp = 0, id = 0; tp < 4; tp++) {
-		for (gpc = 0; gpc < priv->gpc_nr; gpc++) {
-			if (tp < priv->tp_nr[gpc]) {
-				nv_wr32(dev, TP_UNIT(gpc, tp, 0x698), id);
-				nv_wr32(dev, TP_UNIT(gpc, tp, 0x4e8), id);
-				nv_wr32(dev, GPC_UNIT(gpc, 0x0c10 + tp * 4), id);
-				nv_wr32(dev, TP_UNIT(gpc, tp, 0x088), id);
-				id++;
-			}
-
-			nv_wr32(dev, GPC_UNIT(gpc, 0x0c08), priv->tp_nr[gpc]);
-			nv_wr32(dev, GPC_UNIT(gpc, 0x0c8c), priv->tp_nr[gpc]);
-		}
-	}
-
-	tmp = 0;
-	for (i = 0; i < priv->gpc_nr; i++)
-		tmp |= priv->tp_nr[i] << (i * 4);
-	nv_wr32(dev, 0x406028, tmp);
-	nv_wr32(dev, 0x405870, tmp);
-
-	nv_wr32(dev, 0x40602c, 0x00000000);
-	nv_wr32(dev, 0x405874, 0x00000000);
-	nv_wr32(dev, 0x406030, 0x00000000);
-	nv_wr32(dev, 0x405878, 0x00000000);
-	nv_wr32(dev, 0x406034, 0x00000000);
-	nv_wr32(dev, 0x40587c, 0x00000000);
-
-	if (1) {
-		u8 tpnr[GPC_MAX], data[TP_MAX];
-
-		memcpy(tpnr, priv->tp_nr, sizeof(priv->tp_nr));
-		memset(data, 0x1f, sizeof(data));
-
-		gpc = -1;
-		for (tp = 0; tp < priv->tp_total; tp++) {
-			do {
-				gpc = (gpc + 1) % priv->gpc_nr;
-			} while (!tpnr[gpc]);
-			tpnr[gpc]--;
-			data[tp] = gpc;
-		}
-
-		for (i = 0; i < 4; i++)
-			nv_wr32(dev, 0x4060a8 + (i * 4), ((u32 *)data)[i]);
-	}
-
-	if (1) {
-		u32 data[6] = {}, data2[2] = {};
-		u8 tpnr[GPC_MAX];
-		u8 shift, ntpcv;
-
-		/* calculate first set of magics */
-		memcpy(tpnr, priv->tp_nr, sizeof(priv->tp_nr));
-
-		gpc = -1;
-		for (tp = 0; tp < priv->tp_total; tp++) {
-			do {
-				gpc = (gpc + 1) % priv->gpc_nr;
-			} while (!tpnr[gpc]);
-			tpnr[gpc]--;
-
-			data[tp / 6] |= gpc << ((tp % 6) * 5);
-		}
-
-		for (; tp < 32; tp++)
-			data[tp / 6] |= 7 << ((tp % 6) * 5);
-
-		/* and the second... */
-		shift = 0;
-		ntpcv = priv->tp_total;
-		while (!(ntpcv & (1 << 4))) {
-			ntpcv <<= 1;
-			shift++;
-		}
-
-		data2[0]  = (ntpcv << 16);
-		data2[0] |= (shift << 21);
-		data2[0] |= (((1 << (0 + 5)) % ntpcv) << 24);
-		for (i = 1; i < 7; i++)
-			data2[1] |= ((1 << (i + 5)) % ntpcv) << ((i - 1) * 5);
-
-		/* GPC_BROADCAST */
-		nv_wr32(dev, 0x418bb8, (priv->tp_total << 8) |
-					priv->magic_not_rop_nr);
-		for (i = 0; i < 6; i++)
-			nv_wr32(dev, 0x418b08 + (i * 4), data[i]);
-
-		/* GPC_BROADCAST.TP_BROADCAST */
-		nv_wr32(dev, 0x419bd0, (priv->tp_total << 8) |
-				       priv->magic_not_rop_nr |
-				       data2[0]);
-		nv_wr32(dev, 0x419be4, data2[1]);
-		for (i = 0; i < 6; i++)
-			nv_wr32(dev, 0x419b00 + (i * 4), data[i]);
-
-		/* UNK78xx */
-		nv_wr32(dev, 0x4078bc, (priv->tp_total << 8) |
-					priv->magic_not_rop_nr);
-		for (i = 0; i < 6; i++)
-			nv_wr32(dev, 0x40780c + (i * 4), data[i]);
-	}
-
-	if (1) {
-		u32 tp_mask = 0, tp_set = 0;
-		u8  tpnr[GPC_MAX], a, b;
-
-		memcpy(tpnr, priv->tp_nr, sizeof(priv->tp_nr));
-		for (gpc = 0; gpc < priv->gpc_nr; gpc++)
-			tp_mask |= ((1 << priv->tp_nr[gpc]) - 1) << (gpc * 8);
-
-		for (i = 0, gpc = -1, b = -1; i < 32; i++) {
-			a = (i * (priv->tp_total - 1)) / 32;
-			if (a != b) {
-				b = a;
-				do {
-					gpc = (gpc + 1) % priv->gpc_nr;
-				} while (!tpnr[gpc]);
-				tp = priv->tp_nr[gpc] - tpnr[gpc]--;
-
-				tp_set |= 1 << ((gpc * 8) + tp);
-			}
-
-			nv_wr32(dev, 0x406800 + (i * 0x20), tp_set);
-			nv_wr32(dev, 0x406c00 + (i * 0x20), tp_set ^ tp_mask);
-		}
-	}
-
-	nv_wr32(dev, 0x400208, 0x80000000);
-
-	nv_icmd(dev, 0x00001000, 0x00000004);
-	nv_icmd(dev, 0x000000a9, 0x0000ffff);
-	nv_icmd(dev, 0x00000038, 0x0fac6881);
-	nv_icmd(dev, 0x0000003d, 0x00000001);
-	nv_icmd(dev, 0x000000e8, 0x00000400);
-	nv_icmd(dev, 0x000000e9, 0x00000400);
-	nv_icmd(dev, 0x000000ea, 0x00000400);
-	nv_icmd(dev, 0x000000eb, 0x00000400);
-	nv_icmd(dev, 0x000000ec, 0x00000400);
-	nv_icmd(dev, 0x000000ed, 0x00000400);
-	nv_icmd(dev, 0x000000ee, 0x00000400);
-	nv_icmd(dev, 0x000000ef, 0x00000400);
-	nv_icmd(dev, 0x00000078, 0x00000300);
-	nv_icmd(dev, 0x00000079, 0x00000300);
-	nv_icmd(dev, 0x0000007a, 0x00000300);
-	nv_icmd(dev, 0x0000007b, 0x00000300);
-	nv_icmd(dev, 0x0000007c, 0x00000300);
-	nv_icmd(dev, 0x0000007d, 0x00000300);
-	nv_icmd(dev, 0x0000007e, 0x00000300);
-	nv_icmd(dev, 0x0000007f, 0x00000300);
-	nv_icmd(dev, 0x00000050, 0x00000011);
-	nv_icmd(dev, 0x00000058, 0x00000008);
-	nv_icmd(dev, 0x00000059, 0x00000008);
-	nv_icmd(dev, 0x0000005a, 0x00000008);
-	nv_icmd(dev, 0x0000005b, 0x00000008);
-	nv_icmd(dev, 0x0000005c, 0x00000008);
-	nv_icmd(dev, 0x0000005d, 0x00000008);
-	nv_icmd(dev, 0x0000005e, 0x00000008);
-	nv_icmd(dev, 0x0000005f, 0x00000008);
-	nv_icmd(dev, 0x00000208, 0x00000001);
-	nv_icmd(dev, 0x00000209, 0x00000001);
-	nv_icmd(dev, 0x0000020a, 0x00000001);
-	nv_icmd(dev, 0x0000020b, 0x00000001);
-	nv_icmd(dev, 0x0000020c, 0x00000001);
-	nv_icmd(dev, 0x0000020d, 0x00000001);
-	nv_icmd(dev, 0x0000020e, 0x00000001);
-	nv_icmd(dev, 0x0000020f, 0x00000001);
-	nv_icmd(dev, 0x00000081, 0x00000001);
-	nv_icmd(dev, 0x00000085, 0x00000004);
-	nv_icmd(dev, 0x00000088, 0x00000400);
-	nv_icmd(dev, 0x00000090, 0x00000300);
-	nv_icmd(dev, 0x00000098, 0x00001001);
-	nv_icmd(dev, 0x000000e3, 0x00000001);
-	nv_icmd(dev, 0x000000da, 0x00000001);
-	nv_icmd(dev, 0x000000f8, 0x00000003);
-	nv_icmd(dev, 0x000000fa, 0x00000001);
-	nv_icmd(dev, 0x0000009f, 0x0000ffff);
-	nv_icmd(dev, 0x000000a0, 0x0000ffff);
-	nv_icmd(dev, 0x000000a1, 0x0000ffff);
-	nv_icmd(dev, 0x000000a2, 0x0000ffff);
-	nv_icmd(dev, 0x000000b1, 0x00000001);
-	nv_icmd(dev, 0x000000b2, 0x00000000);
-	nv_icmd(dev, 0x000000b3, 0x00000000);
-	nv_icmd(dev, 0x000000b4, 0x00000000);
-	nv_icmd(dev, 0x000000b5, 0x00000000);
-	nv_icmd(dev, 0x000000b6, 0x00000000);
-	nv_icmd(dev, 0x000000b7, 0x00000000);
-	nv_icmd(dev, 0x000000b8, 0x00000000);
-	nv_icmd(dev, 0x000000b9, 0x00000000);
-	nv_icmd(dev, 0x000000ba, 0x00000000);
-	nv_icmd(dev, 0x000000bb, 0x00000000);
-	nv_icmd(dev, 0x000000bc, 0x00000000);
-	nv_icmd(dev, 0x000000bd, 0x00000000);
-	nv_icmd(dev, 0x000000be, 0x00000000);
-	nv_icmd(dev, 0x000000bf, 0x00000000);
-	nv_icmd(dev, 0x000000c0, 0x00000000);
-	nv_icmd(dev, 0x000000c1, 0x00000000);
-	nv_icmd(dev, 0x000000c2, 0x00000000);
-	nv_icmd(dev, 0x000000c3, 0x00000000);
-	nv_icmd(dev, 0x000000c4, 0x00000000);
-	nv_icmd(dev, 0x000000c5, 0x00000000);
-	nv_icmd(dev, 0x000000c6, 0x00000000);
-	nv_icmd(dev, 0x000000c7, 0x00000000);
-	nv_icmd(dev, 0x000000c8, 0x00000000);
-	nv_icmd(dev, 0x000000c9, 0x00000000);
-	nv_icmd(dev, 0x000000ca, 0x00000000);
-	nv_icmd(dev, 0x000000cb, 0x00000000);
-	nv_icmd(dev, 0x000000cc, 0x00000000);
-	nv_icmd(dev, 0x000000cd, 0x00000000);
-	nv_icmd(dev, 0x000000ce, 0x00000000);
-	nv_icmd(dev, 0x000000cf, 0x00000000);
-	nv_icmd(dev, 0x000000d0, 0x00000000);
-	nv_icmd(dev, 0x000000d1, 0x00000000);
-	nv_icmd(dev, 0x000000d2, 0x00000000);
-	nv_icmd(dev, 0x000000d3, 0x00000000);
-	nv_icmd(dev, 0x000000d4, 0x00000000);
-	nv_icmd(dev, 0x000000d5, 0x00000000);
-	nv_icmd(dev, 0x000000d6, 0x00000000);
-	nv_icmd(dev, 0x000000d7, 0x00000000);
-	nv_icmd(dev, 0x000000d8, 0x00000000);
-	nv_icmd(dev, 0x000000d9, 0x00000000);
-	nv_icmd(dev, 0x00000210, 0x00000040);
-	nv_icmd(dev, 0x00000211, 0x00000040);
-	nv_icmd(dev, 0x00000212, 0x00000040);
-	nv_icmd(dev, 0x00000213, 0x00000040);
-	nv_icmd(dev, 0x00000214, 0x00000040);
-	nv_icmd(dev, 0x00000215, 0x00000040);
-	nv_icmd(dev, 0x00000216, 0x00000040);
-	nv_icmd(dev, 0x00000217, 0x00000040);
-	if (dev_priv->chipset == 0xd9) {
-		for (i = 0x0400; i <= 0x0417; i++)
-			nv_icmd(dev, i, 0x00000040);
-	}
-	nv_icmd(dev, 0x00000218, 0x0000c080);
-	nv_icmd(dev, 0x00000219, 0x0000c080);
-	nv_icmd(dev, 0x0000021a, 0x0000c080);
-	nv_icmd(dev, 0x0000021b, 0x0000c080);
-	nv_icmd(dev, 0x0000021c, 0x0000c080);
-	nv_icmd(dev, 0x0000021d, 0x0000c080);
-	nv_icmd(dev, 0x0000021e, 0x0000c080);
-	nv_icmd(dev, 0x0000021f, 0x0000c080);
-	if (dev_priv->chipset == 0xd9) {
-		for (i = 0x0440; i <= 0x0457; i++)
-			nv_icmd(dev, i, 0x0000c080);
-	}
-	nv_icmd(dev, 0x000000ad, 0x0000013e);
-	nv_icmd(dev, 0x000000e1, 0x00000010);
-	nv_icmd(dev, 0x00000290, 0x00000000);
-	nv_icmd(dev, 0x00000291, 0x00000000);
-	nv_icmd(dev, 0x00000292, 0x00000000);
-	nv_icmd(dev, 0x00000293, 0x00000000);
-	nv_icmd(dev, 0x00000294, 0x00000000);
-	nv_icmd(dev, 0x00000295, 0x00000000);
-	nv_icmd(dev, 0x00000296, 0x00000000);
-	nv_icmd(dev, 0x00000297, 0x00000000);
-	nv_icmd(dev, 0x00000298, 0x00000000);
-	nv_icmd(dev, 0x00000299, 0x00000000);
-	nv_icmd(dev, 0x0000029a, 0x00000000);
-	nv_icmd(dev, 0x0000029b, 0x00000000);
-	nv_icmd(dev, 0x0000029c, 0x00000000);
-	nv_icmd(dev, 0x0000029d, 0x00000000);
-	nv_icmd(dev, 0x0000029e, 0x00000000);
-	nv_icmd(dev, 0x0000029f, 0x00000000);
-	nv_icmd(dev, 0x000003b0, 0x00000000);
-	nv_icmd(dev, 0x000003b1, 0x00000000);
-	nv_icmd(dev, 0x000003b2, 0x00000000);
-	nv_icmd(dev, 0x000003b3, 0x00000000);
-	nv_icmd(dev, 0x000003b4, 0x00000000);
-	nv_icmd(dev, 0x000003b5, 0x00000000);
-	nv_icmd(dev, 0x000003b6, 0x00000000);
-	nv_icmd(dev, 0x000003b7, 0x00000000);
-	nv_icmd(dev, 0x000003b8, 0x00000000);
-	nv_icmd(dev, 0x000003b9, 0x00000000);
-	nv_icmd(dev, 0x000003ba, 0x00000000);
-	nv_icmd(dev, 0x000003bb, 0x00000000);
-	nv_icmd(dev, 0x000003bc, 0x00000000);
-	nv_icmd(dev, 0x000003bd, 0x00000000);
-	nv_icmd(dev, 0x000003be, 0x00000000);
-	nv_icmd(dev, 0x000003bf, 0x00000000);
-	nv_icmd(dev, 0x000002a0, 0x00000000);
-	nv_icmd(dev, 0x000002a1, 0x00000000);
-	nv_icmd(dev, 0x000002a2, 0x00000000);
-	nv_icmd(dev, 0x000002a3, 0x00000000);
-	nv_icmd(dev, 0x000002a4, 0x00000000);
-	nv_icmd(dev, 0x000002a5, 0x00000000);
-	nv_icmd(dev, 0x000002a6, 0x00000000);
-	nv_icmd(dev, 0x000002a7, 0x00000000);
-	nv_icmd(dev, 0x000002a8, 0x00000000);
-	nv_icmd(dev, 0x000002a9, 0x00000000);
-	nv_icmd(dev, 0x000002aa, 0x00000000);
-	nv_icmd(dev, 0x000002ab, 0x00000000);
-	nv_icmd(dev, 0x000002ac, 0x00000000);
-	nv_icmd(dev, 0x000002ad, 0x00000000);
-	nv_icmd(dev, 0x000002ae, 0x00000000);
-	nv_icmd(dev, 0x000002af, 0x00000000);
-	nv_icmd(dev, 0x00000420, 0x00000000);
-	nv_icmd(dev, 0x00000421, 0x00000000);
-	nv_icmd(dev, 0x00000422, 0x00000000);
-	nv_icmd(dev, 0x00000423, 0x00000000);
-	nv_icmd(dev, 0x00000424, 0x00000000);
-	nv_icmd(dev, 0x00000425, 0x00000000);
-	nv_icmd(dev, 0x00000426, 0x00000000);
-	nv_icmd(dev, 0x00000427, 0x00000000);
-	nv_icmd(dev, 0x00000428, 0x00000000);
-	nv_icmd(dev, 0x00000429, 0x00000000);
-	nv_icmd(dev, 0x0000042a, 0x00000000);
-	nv_icmd(dev, 0x0000042b, 0x00000000);
-	nv_icmd(dev, 0x0000042c, 0x00000000);
-	nv_icmd(dev, 0x0000042d, 0x00000000);
-	nv_icmd(dev, 0x0000042e, 0x00000000);
-	nv_icmd(dev, 0x0000042f, 0x00000000);
-	nv_icmd(dev, 0x000002b0, 0x00000000);
-	nv_icmd(dev, 0x000002b1, 0x00000000);
-	nv_icmd(dev, 0x000002b2, 0x00000000);
-	nv_icmd(dev, 0x000002b3, 0x00000000);
-	nv_icmd(dev, 0x000002b4, 0x00000000);
-	nv_icmd(dev, 0x000002b5, 0x00000000);
-	nv_icmd(dev, 0x000002b6, 0x00000000);
-	nv_icmd(dev, 0x000002b7, 0x00000000);
-	nv_icmd(dev, 0x000002b8, 0x00000000);
-	nv_icmd(dev, 0x000002b9, 0x00000000);
-	nv_icmd(dev, 0x000002ba, 0x00000000);
-	nv_icmd(dev, 0x000002bb, 0x00000000);
-	nv_icmd(dev, 0x000002bc, 0x00000000);
-	nv_icmd(dev, 0x000002bd, 0x00000000);
-	nv_icmd(dev, 0x000002be, 0x00000000);
-	nv_icmd(dev, 0x000002bf, 0x00000000);
-	nv_icmd(dev, 0x00000430, 0x00000000);
-	nv_icmd(dev, 0x00000431, 0x00000000);
-	nv_icmd(dev, 0x00000432, 0x00000000);
-	nv_icmd(dev, 0x00000433, 0x00000000);
-	nv_icmd(dev, 0x00000434, 0x00000000);
-	nv_icmd(dev, 0x00000435, 0x00000000);
-	nv_icmd(dev, 0x00000436, 0x00000000);
-	nv_icmd(dev, 0x00000437, 0x00000000);
-	nv_icmd(dev, 0x00000438, 0x00000000);
-	nv_icmd(dev, 0x00000439, 0x00000000);
-	nv_icmd(dev, 0x0000043a, 0x00000000);
-	nv_icmd(dev, 0x0000043b, 0x00000000);
-	nv_icmd(dev, 0x0000043c, 0x00000000);
-	nv_icmd(dev, 0x0000043d, 0x00000000);
-	nv_icmd(dev, 0x0000043e, 0x00000000);
-	nv_icmd(dev, 0x0000043f, 0x00000000);
-	nv_icmd(dev, 0x000002c0, 0x00000000);
-	nv_icmd(dev, 0x000002c1, 0x00000000);
-	nv_icmd(dev, 0x000002c2, 0x00000000);
-	nv_icmd(dev, 0x000002c3, 0x00000000);
-	nv_icmd(dev, 0x000002c4, 0x00000000);
-	nv_icmd(dev, 0x000002c5, 0x00000000);
-	nv_icmd(dev, 0x000002c6, 0x00000000);
-	nv_icmd(dev, 0x000002c7, 0x00000000);
-	nv_icmd(dev, 0x000002c8, 0x00000000);
-	nv_icmd(dev, 0x000002c9, 0x00000000);
-	nv_icmd(dev, 0x000002ca, 0x00000000);
-	nv_icmd(dev, 0x000002cb, 0x00000000);
-	nv_icmd(dev, 0x000002cc, 0x00000000);
-	nv_icmd(dev, 0x000002cd, 0x00000000);
-	nv_icmd(dev, 0x000002ce, 0x00000000);
-	nv_icmd(dev, 0x000002cf, 0x00000000);
-	nv_icmd(dev, 0x000004d0, 0x00000000);
-	nv_icmd(dev, 0x000004d1, 0x00000000);
-	nv_icmd(dev, 0x000004d2, 0x00000000);
-	nv_icmd(dev, 0x000004d3, 0x00000000);
-	nv_icmd(dev, 0x000004d4, 0x00000000);
-	nv_icmd(dev, 0x000004d5, 0x00000000);
-	nv_icmd(dev, 0x000004d6, 0x00000000);
-	nv_icmd(dev, 0x000004d7, 0x00000000);
-	nv_icmd(dev, 0x000004d8, 0x00000000);
-	nv_icmd(dev, 0x000004d9, 0x00000000);
-	nv_icmd(dev, 0x000004da, 0x00000000);
-	nv_icmd(dev, 0x000004db, 0x00000000);
-	nv_icmd(dev, 0x000004dc, 0x00000000);
-	nv_icmd(dev, 0x000004dd, 0x00000000);
-	nv_icmd(dev, 0x000004de, 0x00000000);
-	nv_icmd(dev, 0x000004df, 0x00000000);
-	nv_icmd(dev, 0x00000720, 0x00000000);
-	nv_icmd(dev, 0x00000721, 0x00000000);
-	nv_icmd(dev, 0x00000722, 0x00000000);
-	nv_icmd(dev, 0x00000723, 0x00000000);
-	nv_icmd(dev, 0x00000724, 0x00000000);
-	nv_icmd(dev, 0x00000725, 0x00000000);
-	nv_icmd(dev, 0x00000726, 0x00000000);
-	nv_icmd(dev, 0x00000727, 0x00000000);
-	nv_icmd(dev, 0x00000728, 0x00000000);
-	nv_icmd(dev, 0x00000729, 0x00000000);
-	nv_icmd(dev, 0x0000072a, 0x00000000);
-	nv_icmd(dev, 0x0000072b, 0x00000000);
-	nv_icmd(dev, 0x0000072c, 0x00000000);
-	nv_icmd(dev, 0x0000072d, 0x00000000);
-	nv_icmd(dev, 0x0000072e, 0x00000000);
-	nv_icmd(dev, 0x0000072f, 0x00000000);
-	nv_icmd(dev, 0x000008c0, 0x00000000);
-	nv_icmd(dev, 0x000008c1, 0x00000000);
-	nv_icmd(dev, 0x000008c2, 0x00000000);
-	nv_icmd(dev, 0x000008c3, 0x00000000);
-	nv_icmd(dev, 0x000008c4, 0x00000000);
-	nv_icmd(dev, 0x000008c5, 0x00000000);
-	nv_icmd(dev, 0x000008c6, 0x00000000);
-	nv_icmd(dev, 0x000008c7, 0x00000000);
-	nv_icmd(dev, 0x000008c8, 0x00000000);
-	nv_icmd(dev, 0x000008c9, 0x00000000);
-	nv_icmd(dev, 0x000008ca, 0x00000000);
-	nv_icmd(dev, 0x000008cb, 0x00000000);
-	nv_icmd(dev, 0x000008cc, 0x00000000);
-	nv_icmd(dev, 0x000008cd, 0x00000000);
-	nv_icmd(dev, 0x000008ce, 0x00000000);
-	nv_icmd(dev, 0x000008cf, 0x00000000);
-	nv_icmd(dev, 0x00000890, 0x00000000);
-	nv_icmd(dev, 0x00000891, 0x00000000);
-	nv_icmd(dev, 0x00000892, 0x00000000);
-	nv_icmd(dev, 0x00000893, 0x00000000);
-	nv_icmd(dev, 0x00000894, 0x00000000);
-	nv_icmd(dev, 0x00000895, 0x00000000);
-	nv_icmd(dev, 0x00000896, 0x00000000);
-	nv_icmd(dev, 0x00000897, 0x00000000);
-	nv_icmd(dev, 0x00000898, 0x00000000);
-	nv_icmd(dev, 0x00000899, 0x00000000);
-	nv_icmd(dev, 0x0000089a, 0x00000000);
-	nv_icmd(dev, 0x0000089b, 0x00000000);
-	nv_icmd(dev, 0x0000089c, 0x00000000);
-	nv_icmd(dev, 0x0000089d, 0x00000000);
-	nv_icmd(dev, 0x0000089e, 0x00000000);
-	nv_icmd(dev, 0x0000089f, 0x00000000);
-	nv_icmd(dev, 0x000008e0, 0x00000000);
-	nv_icmd(dev, 0x000008e1, 0x00000000);
-	nv_icmd(dev, 0x000008e2, 0x00000000);
-	nv_icmd(dev, 0x000008e3, 0x00000000);
-	nv_icmd(dev, 0x000008e4, 0x00000000);
-	nv_icmd(dev, 0x000008e5, 0x00000000);
-	nv_icmd(dev, 0x000008e6, 0x00000000);
-	nv_icmd(dev, 0x000008e7, 0x00000000);
-	nv_icmd(dev, 0x000008e8, 0x00000000);
-	nv_icmd(dev, 0x000008e9, 0x00000000);
-	nv_icmd(dev, 0x000008ea, 0x00000000);
-	nv_icmd(dev, 0x000008eb, 0x00000000);
-	nv_icmd(dev, 0x000008ec, 0x00000000);
-	nv_icmd(dev, 0x000008ed, 0x00000000);
-	nv_icmd(dev, 0x000008ee, 0x00000000);
-	nv_icmd(dev, 0x000008ef, 0x00000000);
-	nv_icmd(dev, 0x000008a0, 0x00000000);
-	nv_icmd(dev, 0x000008a1, 0x00000000);
-	nv_icmd(dev, 0x000008a2, 0x00000000);
-	nv_icmd(dev, 0x000008a3, 0x00000000);
-	nv_icmd(dev, 0x000008a4, 0x00000000);
-	nv_icmd(dev, 0x000008a5, 0x00000000);
-	nv_icmd(dev, 0x000008a6, 0x00000000);
-	nv_icmd(dev, 0x000008a7, 0x00000000);
-	nv_icmd(dev, 0x000008a8, 0x00000000);
-	nv_icmd(dev, 0x000008a9, 0x00000000);
-	nv_icmd(dev, 0x000008aa, 0x00000000);
-	nv_icmd(dev, 0x000008ab, 0x00000000);
-	nv_icmd(dev, 0x000008ac, 0x00000000);
-	nv_icmd(dev, 0x000008ad, 0x00000000);
-	nv_icmd(dev, 0x000008ae, 0x00000000);
-	nv_icmd(dev, 0x000008af, 0x00000000);
-	nv_icmd(dev, 0x000008f0, 0x00000000);
-	nv_icmd(dev, 0x000008f1, 0x00000000);
-	nv_icmd(dev, 0x000008f2, 0x00000000);
-	nv_icmd(dev, 0x000008f3, 0x00000000);
-	nv_icmd(dev, 0x000008f4, 0x00000000);
-	nv_icmd(dev, 0x000008f5, 0x00000000);
-	nv_icmd(dev, 0x000008f6, 0x00000000);
-	nv_icmd(dev, 0x000008f7, 0x00000000);
-	nv_icmd(dev, 0x000008f8, 0x00000000);
-	nv_icmd(dev, 0x000008f9, 0x00000000);
-	nv_icmd(dev, 0x000008fa, 0x00000000);
-	nv_icmd(dev, 0x000008fb, 0x00000000);
-	nv_icmd(dev, 0x000008fc, 0x00000000);
-	nv_icmd(dev, 0x000008fd, 0x00000000);
-	nv_icmd(dev, 0x000008fe, 0x00000000);
-	nv_icmd(dev, 0x000008ff, 0x00000000);
-	nv_icmd(dev, 0x0000094c, 0x000000ff);
-	nv_icmd(dev, 0x0000094d, 0xffffffff);
-	nv_icmd(dev, 0x0000094e, 0x00000002);
-	nv_icmd(dev, 0x000002ec, 0x00000001);
-	nv_icmd(dev, 0x00000303, 0x00000001);
-	nv_icmd(dev, 0x000002e6, 0x00000001);
-	nv_icmd(dev, 0x00000466, 0x00000052);
-	nv_icmd(dev, 0x00000301, 0x3f800000);
-	nv_icmd(dev, 0x00000304, 0x30201000);
-	nv_icmd(dev, 0x00000305, 0x70605040);
-	nv_icmd(dev, 0x00000306, 0xb8a89888);
-	nv_icmd(dev, 0x00000307, 0xf8e8d8c8);
-	nv_icmd(dev, 0x0000030a, 0x00ffff00);
-	nv_icmd(dev, 0x0000030b, 0x0000001a);
-	nv_icmd(dev, 0x0000030c, 0x00000001);
-	nv_icmd(dev, 0x00000318, 0x00000001);
-	nv_icmd(dev, 0x00000340, 0x00000000);
-	nv_icmd(dev, 0x00000375, 0x00000001);
-	nv_icmd(dev, 0x00000351, 0x00000100);
-	nv_icmd(dev, 0x0000037d, 0x00000006);
-	nv_icmd(dev, 0x000003a0, 0x00000002);
-	nv_icmd(dev, 0x000003aa, 0x00000001);
-	nv_icmd(dev, 0x000003a9, 0x00000001);
-	nv_icmd(dev, 0x00000380, 0x00000001);
-	nv_icmd(dev, 0x00000360, 0x00000040);
-	nv_icmd(dev, 0x00000366, 0x00000000);
-	nv_icmd(dev, 0x00000367, 0x00000000);
-	nv_icmd(dev, 0x00000368, 0x00001fff);
-	nv_icmd(dev, 0x00000370, 0x00000000);
-	nv_icmd(dev, 0x00000371, 0x00000000);
-	nv_icmd(dev, 0x00000372, 0x003fffff);
-	nv_icmd(dev, 0x0000037a, 0x00000012);
-	nv_icmd(dev, 0x000005e0, 0x00000022);
-	nv_icmd(dev, 0x000005e1, 0x00000022);
-	nv_icmd(dev, 0x000005e2, 0x00000022);
-	nv_icmd(dev, 0x000005e3, 0x00000022);
-	nv_icmd(dev, 0x000005e4, 0x00000022);
-	nv_icmd(dev, 0x00000619, 0x00000003);
-	nv_icmd(dev, 0x00000811, 0x00000003);
-	nv_icmd(dev, 0x00000812, 0x00000004);
-	nv_icmd(dev, 0x00000813, 0x00000006);
-	nv_icmd(dev, 0x00000814, 0x00000008);
-	nv_icmd(dev, 0x00000815, 0x0000000b);
-	nv_icmd(dev, 0x00000800, 0x00000001);
-	nv_icmd(dev, 0x00000801, 0x00000001);
-	nv_icmd(dev, 0x00000802, 0x00000001);
-	nv_icmd(dev, 0x00000803, 0x00000001);
-	nv_icmd(dev, 0x00000804, 0x00000001);
-	nv_icmd(dev, 0x00000805, 0x00000001);
-	nv_icmd(dev, 0x00000632, 0x00000001);
-	nv_icmd(dev, 0x00000633, 0x00000002);
-	nv_icmd(dev, 0x00000634, 0x00000003);
-	nv_icmd(dev, 0x00000635, 0x00000004);
-	nv_icmd(dev, 0x00000654, 0x3f800000);
-	nv_icmd(dev, 0x00000657, 0x3f800000);
-	nv_icmd(dev, 0x00000655, 0x3f800000);
-	nv_icmd(dev, 0x00000656, 0x3f800000);
-	nv_icmd(dev, 0x000006cd, 0x3f800000);
-	nv_icmd(dev, 0x000007f5, 0x3f800000);
-	nv_icmd(dev, 0x000007dc, 0x39291909);
-	nv_icmd(dev, 0x000007dd, 0x79695949);
-	nv_icmd(dev, 0x000007de, 0xb9a99989);
-	nv_icmd(dev, 0x000007df, 0xf9e9d9c9);
-	nv_icmd(dev, 0x000007e8, 0x00003210);
-	nv_icmd(dev, 0x000007e9, 0x00007654);
-	nv_icmd(dev, 0x000007ea, 0x00000098);
-	nv_icmd(dev, 0x000007ec, 0x39291909);
-	nv_icmd(dev, 0x000007ed, 0x79695949);
-	nv_icmd(dev, 0x000007ee, 0xb9a99989);
-	nv_icmd(dev, 0x000007ef, 0xf9e9d9c9);
-	nv_icmd(dev, 0x000007f0, 0x00003210);
-	nv_icmd(dev, 0x000007f1, 0x00007654);
-	nv_icmd(dev, 0x000007f2, 0x00000098);
-	nv_icmd(dev, 0x000005a5, 0x00000001);
-	nv_icmd(dev, 0x00000980, 0x00000000);
-	nv_icmd(dev, 0x00000981, 0x00000000);
-	nv_icmd(dev, 0x00000982, 0x00000000);
-	nv_icmd(dev, 0x00000983, 0x00000000);
-	nv_icmd(dev, 0x00000984, 0x00000000);
-	nv_icmd(dev, 0x00000985, 0x00000000);
-	nv_icmd(dev, 0x00000986, 0x00000000);
-	nv_icmd(dev, 0x00000987, 0x00000000);
-	nv_icmd(dev, 0x00000988, 0x00000000);
-	nv_icmd(dev, 0x00000989, 0x00000000);
-	nv_icmd(dev, 0x0000098a, 0x00000000);
-	nv_icmd(dev, 0x0000098b, 0x00000000);
-	nv_icmd(dev, 0x0000098c, 0x00000000);
-	nv_icmd(dev, 0x0000098d, 0x00000000);
-	nv_icmd(dev, 0x0000098e, 0x00000000);
-	nv_icmd(dev, 0x0000098f, 0x00000000);
-	nv_icmd(dev, 0x00000990, 0x00000000);
-	nv_icmd(dev, 0x00000991, 0x00000000);
-	nv_icmd(dev, 0x00000992, 0x00000000);
-	nv_icmd(dev, 0x00000993, 0x00000000);
-	nv_icmd(dev, 0x00000994, 0x00000000);
-	nv_icmd(dev, 0x00000995, 0x00000000);
-	nv_icmd(dev, 0x00000996, 0x00000000);
-	nv_icmd(dev, 0x00000997, 0x00000000);
-	nv_icmd(dev, 0x00000998, 0x00000000);
-	nv_icmd(dev, 0x00000999, 0x00000000);
-	nv_icmd(dev, 0x0000099a, 0x00000000);
-	nv_icmd(dev, 0x0000099b, 0x00000000);
-	nv_icmd(dev, 0x0000099c, 0x00000000);
-	nv_icmd(dev, 0x0000099d, 0x00000000);
-	nv_icmd(dev, 0x0000099e, 0x00000000);
-	nv_icmd(dev, 0x0000099f, 0x00000000);
-	nv_icmd(dev, 0x000009a0, 0x00000000);
-	nv_icmd(dev, 0x000009a1, 0x00000000);
-	nv_icmd(dev, 0x000009a2, 0x00000000);
-	nv_icmd(dev, 0x000009a3, 0x00000000);
-	nv_icmd(dev, 0x000009a4, 0x00000000);
-	nv_icmd(dev, 0x000009a5, 0x00000000);
-	nv_icmd(dev, 0x000009a6, 0x00000000);
-	nv_icmd(dev, 0x000009a7, 0x00000000);
-	nv_icmd(dev, 0x000009a8, 0x00000000);
-	nv_icmd(dev, 0x000009a9, 0x00000000);
-	nv_icmd(dev, 0x000009aa, 0x00000000);
-	nv_icmd(dev, 0x000009ab, 0x00000000);
-	nv_icmd(dev, 0x000009ac, 0x00000000);
-	nv_icmd(dev, 0x000009ad, 0x00000000);
-	nv_icmd(dev, 0x000009ae, 0x00000000);
-	nv_icmd(dev, 0x000009af, 0x00000000);
-	nv_icmd(dev, 0x000009b0, 0x00000000);
-	nv_icmd(dev, 0x000009b1, 0x00000000);
-	nv_icmd(dev, 0x000009b2, 0x00000000);
-	nv_icmd(dev, 0x000009b3, 0x00000000);
-	nv_icmd(dev, 0x000009b4, 0x00000000);
-	nv_icmd(dev, 0x000009b5, 0x00000000);
-	nv_icmd(dev, 0x000009b6, 0x00000000);
-	nv_icmd(dev, 0x000009b7, 0x00000000);
-	nv_icmd(dev, 0x000009b8, 0x00000000);
-	nv_icmd(dev, 0x000009b9, 0x00000000);
-	nv_icmd(dev, 0x000009ba, 0x00000000);
-	nv_icmd(dev, 0x000009bb, 0x00000000);
-	nv_icmd(dev, 0x000009bc, 0x00000000);
-	nv_icmd(dev, 0x000009bd, 0x00000000);
-	nv_icmd(dev, 0x000009be, 0x00000000);
-	nv_icmd(dev, 0x000009bf, 0x00000000);
-	nv_icmd(dev, 0x000009c0, 0x00000000);
-	nv_icmd(dev, 0x000009c1, 0x00000000);
-	nv_icmd(dev, 0x000009c2, 0x00000000);
-	nv_icmd(dev, 0x000009c3, 0x00000000);
-	nv_icmd(dev, 0x000009c4, 0x00000000);
-	nv_icmd(dev, 0x000009c5, 0x00000000);
-	nv_icmd(dev, 0x000009c6, 0x00000000);
-	nv_icmd(dev, 0x000009c7, 0x00000000);
-	nv_icmd(dev, 0x000009c8, 0x00000000);
-	nv_icmd(dev, 0x000009c9, 0x00000000);
-	nv_icmd(dev, 0x000009ca, 0x00000000);
-	nv_icmd(dev, 0x000009cb, 0x00000000);
-	nv_icmd(dev, 0x000009cc, 0x00000000);
-	nv_icmd(dev, 0x000009cd, 0x00000000);
-	nv_icmd(dev, 0x000009ce, 0x00000000);
-	nv_icmd(dev, 0x000009cf, 0x00000000);
-	nv_icmd(dev, 0x000009d0, 0x00000000);
-	nv_icmd(dev, 0x000009d1, 0x00000000);
-	nv_icmd(dev, 0x000009d2, 0x00000000);
-	nv_icmd(dev, 0x000009d3, 0x00000000);
-	nv_icmd(dev, 0x000009d4, 0x00000000);
-	nv_icmd(dev, 0x000009d5, 0x00000000);
-	nv_icmd(dev, 0x000009d6, 0x00000000);
-	nv_icmd(dev, 0x000009d7, 0x00000000);
-	nv_icmd(dev, 0x000009d8, 0x00000000);
-	nv_icmd(dev, 0x000009d9, 0x00000000);
-	nv_icmd(dev, 0x000009da, 0x00000000);
-	nv_icmd(dev, 0x000009db, 0x00000000);
-	nv_icmd(dev, 0x000009dc, 0x00000000);
-	nv_icmd(dev, 0x000009dd, 0x00000000);
-	nv_icmd(dev, 0x000009de, 0x00000000);
-	nv_icmd(dev, 0x000009df, 0x00000000);
-	nv_icmd(dev, 0x000009e0, 0x00000000);
-	nv_icmd(dev, 0x000009e1, 0x00000000);
-	nv_icmd(dev, 0x000009e2, 0x00000000);
-	nv_icmd(dev, 0x000009e3, 0x00000000);
-	nv_icmd(dev, 0x000009e4, 0x00000000);
-	nv_icmd(dev, 0x000009e5, 0x00000000);
-	nv_icmd(dev, 0x000009e6, 0x00000000);
-	nv_icmd(dev, 0x000009e7, 0x00000000);
-	nv_icmd(dev, 0x000009e8, 0x00000000);
-	nv_icmd(dev, 0x000009e9, 0x00000000);
-	nv_icmd(dev, 0x000009ea, 0x00000000);
-	nv_icmd(dev, 0x000009eb, 0x00000000);
-	nv_icmd(dev, 0x000009ec, 0x00000000);
-	nv_icmd(dev, 0x000009ed, 0x00000000);
-	nv_icmd(dev, 0x000009ee, 0x00000000);
-	nv_icmd(dev, 0x000009ef, 0x00000000);
-	nv_icmd(dev, 0x000009f0, 0x00000000);
-	nv_icmd(dev, 0x000009f1, 0x00000000);
-	nv_icmd(dev, 0x000009f2, 0x00000000);
-	nv_icmd(dev, 0x000009f3, 0x00000000);
-	nv_icmd(dev, 0x000009f4, 0x00000000);
-	nv_icmd(dev, 0x000009f5, 0x00000000);
-	nv_icmd(dev, 0x000009f6, 0x00000000);
-	nv_icmd(dev, 0x000009f7, 0x00000000);
-	nv_icmd(dev, 0x000009f8, 0x00000000);
-	nv_icmd(dev, 0x000009f9, 0x00000000);
-	nv_icmd(dev, 0x000009fa, 0x00000000);
-	nv_icmd(dev, 0x000009fb, 0x00000000);
-	nv_icmd(dev, 0x000009fc, 0x00000000);
-	nv_icmd(dev, 0x000009fd, 0x00000000);
-	nv_icmd(dev, 0x000009fe, 0x00000000);
-	nv_icmd(dev, 0x000009ff, 0x00000000);
-	nv_icmd(dev, 0x00000468, 0x00000004);
-	nv_icmd(dev, 0x0000046c, 0x00000001);
-	nv_icmd(dev, 0x00000470, 0x00000000);
-	nv_icmd(dev, 0x00000471, 0x00000000);
-	nv_icmd(dev, 0x00000472, 0x00000000);
-	nv_icmd(dev, 0x00000473, 0x00000000);
-	nv_icmd(dev, 0x00000474, 0x00000000);
-	nv_icmd(dev, 0x00000475, 0x00000000);
-	nv_icmd(dev, 0x00000476, 0x00000000);
-	nv_icmd(dev, 0x00000477, 0x00000000);
-	nv_icmd(dev, 0x00000478, 0x00000000);
-	nv_icmd(dev, 0x00000479, 0x00000000);
-	nv_icmd(dev, 0x0000047a, 0x00000000);
-	nv_icmd(dev, 0x0000047b, 0x00000000);
-	nv_icmd(dev, 0x0000047c, 0x00000000);
-	nv_icmd(dev, 0x0000047d, 0x00000000);
-	nv_icmd(dev, 0x0000047e, 0x00000000);
-	nv_icmd(dev, 0x0000047f, 0x00000000);
-	nv_icmd(dev, 0x00000480, 0x00000000);
-	nv_icmd(dev, 0x00000481, 0x00000000);
-	nv_icmd(dev, 0x00000482, 0x00000000);
-	nv_icmd(dev, 0x00000483, 0x00000000);
-	nv_icmd(dev, 0x00000484, 0x00000000);
-	nv_icmd(dev, 0x00000485, 0x00000000);
-	nv_icmd(dev, 0x00000486, 0x00000000);
-	nv_icmd(dev, 0x00000487, 0x00000000);
-	nv_icmd(dev, 0x00000488, 0x00000000);
-	nv_icmd(dev, 0x00000489, 0x00000000);
-	nv_icmd(dev, 0x0000048a, 0x00000000);
-	nv_icmd(dev, 0x0000048b, 0x00000000);
-	nv_icmd(dev, 0x0000048c, 0x00000000);
-	nv_icmd(dev, 0x0000048d, 0x00000000);
-	nv_icmd(dev, 0x0000048e, 0x00000000);
-	nv_icmd(dev, 0x0000048f, 0x00000000);
-	nv_icmd(dev, 0x00000490, 0x00000000);
-	nv_icmd(dev, 0x00000491, 0x00000000);
-	nv_icmd(dev, 0x00000492, 0x00000000);
-	nv_icmd(dev, 0x00000493, 0x00000000);
-	nv_icmd(dev, 0x00000494, 0x00000000);
-	nv_icmd(dev, 0x00000495, 0x00000000);
-	nv_icmd(dev, 0x00000496, 0x00000000);
-	nv_icmd(dev, 0x00000497, 0x00000000);
-	nv_icmd(dev, 0x00000498, 0x00000000);
-	nv_icmd(dev, 0x00000499, 0x00000000);
-	nv_icmd(dev, 0x0000049a, 0x00000000);
-	nv_icmd(dev, 0x0000049b, 0x00000000);
-	nv_icmd(dev, 0x0000049c, 0x00000000);
-	nv_icmd(dev, 0x0000049d, 0x00000000);
-	nv_icmd(dev, 0x0000049e, 0x00000000);
-	nv_icmd(dev, 0x0000049f, 0x00000000);
-	nv_icmd(dev, 0x000004a0, 0x00000000);
-	nv_icmd(dev, 0x000004a1, 0x00000000);
-	nv_icmd(dev, 0x000004a2, 0x00000000);
-	nv_icmd(dev, 0x000004a3, 0x00000000);
-	nv_icmd(dev, 0x000004a4, 0x00000000);
-	nv_icmd(dev, 0x000004a5, 0x00000000);
-	nv_icmd(dev, 0x000004a6, 0x00000000);
-	nv_icmd(dev, 0x000004a7, 0x00000000);
-	nv_icmd(dev, 0x000004a8, 0x00000000);
-	nv_icmd(dev, 0x000004a9, 0x00000000);
-	nv_icmd(dev, 0x000004aa, 0x00000000);
-	nv_icmd(dev, 0x000004ab, 0x00000000);
-	nv_icmd(dev, 0x000004ac, 0x00000000);
-	nv_icmd(dev, 0x000004ad, 0x00000000);
-	nv_icmd(dev, 0x000004ae, 0x00000000);
-	nv_icmd(dev, 0x000004af, 0x00000000);
-	nv_icmd(dev, 0x000004b0, 0x00000000);
-	nv_icmd(dev, 0x000004b1, 0x00000000);
-	nv_icmd(dev, 0x000004b2, 0x00000000);
-	nv_icmd(dev, 0x000004b3, 0x00000000);
-	nv_icmd(dev, 0x000004b4, 0x00000000);
-	nv_icmd(dev, 0x000004b5, 0x00000000);
-	nv_icmd(dev, 0x000004b6, 0x00000000);
-	nv_icmd(dev, 0x000004b7, 0x00000000);
-	nv_icmd(dev, 0x000004b8, 0x00000000);
-	nv_icmd(dev, 0x000004b9, 0x00000000);
-	nv_icmd(dev, 0x000004ba, 0x00000000);
-	nv_icmd(dev, 0x000004bb, 0x00000000);
-	nv_icmd(dev, 0x000004bc, 0x00000000);
-	nv_icmd(dev, 0x000004bd, 0x00000000);
-	nv_icmd(dev, 0x000004be, 0x00000000);
-	nv_icmd(dev, 0x000004bf, 0x00000000);
-	nv_icmd(dev, 0x000004c0, 0x00000000);
-	nv_icmd(dev, 0x000004c1, 0x00000000);
-	nv_icmd(dev, 0x000004c2, 0x00000000);
-	nv_icmd(dev, 0x000004c3, 0x00000000);
-	nv_icmd(dev, 0x000004c4, 0x00000000);
-	nv_icmd(dev, 0x000004c5, 0x00000000);
-	nv_icmd(dev, 0x000004c6, 0x00000000);
-	nv_icmd(dev, 0x000004c7, 0x00000000);
-	nv_icmd(dev, 0x000004c8, 0x00000000);
-	nv_icmd(dev, 0x000004c9, 0x00000000);
-	nv_icmd(dev, 0x000004ca, 0x00000000);
-	nv_icmd(dev, 0x000004cb, 0x00000000);
-	nv_icmd(dev, 0x000004cc, 0x00000000);
-	nv_icmd(dev, 0x000004cd, 0x00000000);
-	nv_icmd(dev, 0x000004ce, 0x00000000);
-	nv_icmd(dev, 0x000004cf, 0x00000000);
-	nv_icmd(dev, 0x00000510, 0x3f800000);
-	nv_icmd(dev, 0x00000511, 0x3f800000);
-	nv_icmd(dev, 0x00000512, 0x3f800000);
-	nv_icmd(dev, 0x00000513, 0x3f800000);
-	nv_icmd(dev, 0x00000514, 0x3f800000);
-	nv_icmd(dev, 0x00000515, 0x3f800000);
-	nv_icmd(dev, 0x00000516, 0x3f800000);
-	nv_icmd(dev, 0x00000517, 0x3f800000);
-	nv_icmd(dev, 0x00000518, 0x3f800000);
-	nv_icmd(dev, 0x00000519, 0x3f800000);
-	nv_icmd(dev, 0x0000051a, 0x3f800000);
-	nv_icmd(dev, 0x0000051b, 0x3f800000);
-	nv_icmd(dev, 0x0000051c, 0x3f800000);
-	nv_icmd(dev, 0x0000051d, 0x3f800000);
-	nv_icmd(dev, 0x0000051e, 0x3f800000);
-	nv_icmd(dev, 0x0000051f, 0x3f800000);
-	nv_icmd(dev, 0x00000520, 0x000002b6);
-	nv_icmd(dev, 0x00000529, 0x00000001);
-	nv_icmd(dev, 0x00000530, 0xffff0000);
-	nv_icmd(dev, 0x00000531, 0xffff0000);
-	nv_icmd(dev, 0x00000532, 0xffff0000);
-	nv_icmd(dev, 0x00000533, 0xffff0000);
-	nv_icmd(dev, 0x00000534, 0xffff0000);
-	nv_icmd(dev, 0x00000535, 0xffff0000);
-	nv_icmd(dev, 0x00000536, 0xffff0000);
-	nv_icmd(dev, 0x00000537, 0xffff0000);
-	nv_icmd(dev, 0x00000538, 0xffff0000);
-	nv_icmd(dev, 0x00000539, 0xffff0000);
-	nv_icmd(dev, 0x0000053a, 0xffff0000);
-	nv_icmd(dev, 0x0000053b, 0xffff0000);
-	nv_icmd(dev, 0x0000053c, 0xffff0000);
-	nv_icmd(dev, 0x0000053d, 0xffff0000);
-	nv_icmd(dev, 0x0000053e, 0xffff0000);
-	nv_icmd(dev, 0x0000053f, 0xffff0000);
-	nv_icmd(dev, 0x00000585, 0x0000003f);
-	nv_icmd(dev, 0x00000576, 0x00000003);
-	if (dev_priv->chipset == 0xc1 ||
-	    dev_priv->chipset == 0xd9)
-		nv_icmd(dev, 0x0000057b, 0x00000059);
-	nv_icmd(dev, 0x00000586, 0x00000040);
-	nv_icmd(dev, 0x00000582, 0x00000080);
-	nv_icmd(dev, 0x00000583, 0x00000080);
-	nv_icmd(dev, 0x000005c2, 0x00000001);
-	nv_icmd(dev, 0x00000638, 0x00000001);
-	nv_icmd(dev, 0x00000639, 0x00000001);
-	nv_icmd(dev, 0x0000063a, 0x00000002);
-	nv_icmd(dev, 0x0000063b, 0x00000001);
-	nv_icmd(dev, 0x0000063c, 0x00000001);
-	nv_icmd(dev, 0x0000063d, 0x00000002);
-	nv_icmd(dev, 0x0000063e, 0x00000001);
-	nv_icmd(dev, 0x000008b8, 0x00000001);
-	nv_icmd(dev, 0x000008b9, 0x00000001);
-	nv_icmd(dev, 0x000008ba, 0x00000001);
-	nv_icmd(dev, 0x000008bb, 0x00000001);
-	nv_icmd(dev, 0x000008bc, 0x00000001);
-	nv_icmd(dev, 0x000008bd, 0x00000001);
-	nv_icmd(dev, 0x000008be, 0x00000001);
-	nv_icmd(dev, 0x000008bf, 0x00000001);
-	nv_icmd(dev, 0x00000900, 0x00000001);
-	nv_icmd(dev, 0x00000901, 0x00000001);
-	nv_icmd(dev, 0x00000902, 0x00000001);
-	nv_icmd(dev, 0x00000903, 0x00000001);
-	nv_icmd(dev, 0x00000904, 0x00000001);
-	nv_icmd(dev, 0x00000905, 0x00000001);
-	nv_icmd(dev, 0x00000906, 0x00000001);
-	nv_icmd(dev, 0x00000907, 0x00000001);
-	nv_icmd(dev, 0x00000908, 0x00000002);
-	nv_icmd(dev, 0x00000909, 0x00000002);
-	nv_icmd(dev, 0x0000090a, 0x00000002);
-	nv_icmd(dev, 0x0000090b, 0x00000002);
-	nv_icmd(dev, 0x0000090c, 0x00000002);
-	nv_icmd(dev, 0x0000090d, 0x00000002);
-	nv_icmd(dev, 0x0000090e, 0x00000002);
-	nv_icmd(dev, 0x0000090f, 0x00000002);
-	nv_icmd(dev, 0x00000910, 0x00000001);
-	nv_icmd(dev, 0x00000911, 0x00000001);
-	nv_icmd(dev, 0x00000912, 0x00000001);
-	nv_icmd(dev, 0x00000913, 0x00000001);
-	nv_icmd(dev, 0x00000914, 0x00000001);
-	nv_icmd(dev, 0x00000915, 0x00000001);
-	nv_icmd(dev, 0x00000916, 0x00000001);
-	nv_icmd(dev, 0x00000917, 0x00000001);
-	nv_icmd(dev, 0x00000918, 0x00000001);
-	nv_icmd(dev, 0x00000919, 0x00000001);
-	nv_icmd(dev, 0x0000091a, 0x00000001);
-	nv_icmd(dev, 0x0000091b, 0x00000001);
-	nv_icmd(dev, 0x0000091c, 0x00000001);
-	nv_icmd(dev, 0x0000091d, 0x00000001);
-	nv_icmd(dev, 0x0000091e, 0x00000001);
-	nv_icmd(dev, 0x0000091f, 0x00000001);
-	nv_icmd(dev, 0x00000920, 0x00000002);
-	nv_icmd(dev, 0x00000921, 0x00000002);
-	nv_icmd(dev, 0x00000922, 0x00000002);
-	nv_icmd(dev, 0x00000923, 0x00000002);
-	nv_icmd(dev, 0x00000924, 0x00000002);
-	nv_icmd(dev, 0x00000925, 0x00000002);
-	nv_icmd(dev, 0x00000926, 0x00000002);
-	nv_icmd(dev, 0x00000927, 0x00000002);
-	nv_icmd(dev, 0x00000928, 0x00000001);
-	nv_icmd(dev, 0x00000929, 0x00000001);
-	nv_icmd(dev, 0x0000092a, 0x00000001);
-	nv_icmd(dev, 0x0000092b, 0x00000001);
-	nv_icmd(dev, 0x0000092c, 0x00000001);
-	nv_icmd(dev, 0x0000092d, 0x00000001);
-	nv_icmd(dev, 0x0000092e, 0x00000001);
-	nv_icmd(dev, 0x0000092f, 0x00000001);
-	nv_icmd(dev, 0x00000648, 0x00000001);
-	nv_icmd(dev, 0x00000649, 0x00000001);
-	nv_icmd(dev, 0x0000064a, 0x00000001);
-	nv_icmd(dev, 0x0000064b, 0x00000001);
-	nv_icmd(dev, 0x0000064c, 0x00000001);
-	nv_icmd(dev, 0x0000064d, 0x00000001);
-	nv_icmd(dev, 0x0000064e, 0x00000001);
-	nv_icmd(dev, 0x0000064f, 0x00000001);
-	nv_icmd(dev, 0x00000650, 0x00000001);
-	nv_icmd(dev, 0x00000658, 0x0000000f);
-	nv_icmd(dev, 0x000007ff, 0x0000000a);
-	nv_icmd(dev, 0x0000066a, 0x40000000);
-	nv_icmd(dev, 0x0000066b, 0x10000000);
-	nv_icmd(dev, 0x0000066c, 0xffff0000);
-	nv_icmd(dev, 0x0000066d, 0xffff0000);
-	nv_icmd(dev, 0x000007af, 0x00000008);
-	nv_icmd(dev, 0x000007b0, 0x00000008);
-	nv_icmd(dev, 0x000007f6, 0x00000001);
-	nv_icmd(dev, 0x000006b2, 0x00000055);
-	nv_icmd(dev, 0x000007ad, 0x00000003);
-	nv_icmd(dev, 0x00000937, 0x00000001);
-	nv_icmd(dev, 0x00000971, 0x00000008);
-	nv_icmd(dev, 0x00000972, 0x00000040);
-	nv_icmd(dev, 0x00000973, 0x0000012c);
-	nv_icmd(dev, 0x0000097c, 0x00000040);
-	nv_icmd(dev, 0x00000979, 0x00000003);
-	nv_icmd(dev, 0x00000975, 0x00000020);
-	nv_icmd(dev, 0x00000976, 0x00000001);
-	nv_icmd(dev, 0x00000977, 0x00000020);
-	nv_icmd(dev, 0x00000978, 0x00000001);
-	nv_icmd(dev, 0x00000957, 0x00000003);
-	nv_icmd(dev, 0x0000095e, 0x20164010);
-	nv_icmd(dev, 0x0000095f, 0x00000020);
-	if (dev_priv->chipset == 0xd9)
-		nv_icmd(dev, 0x0000097d, 0x00000020);
-	nv_icmd(dev, 0x00000683, 0x00000006);
-	nv_icmd(dev, 0x00000685, 0x003fffff);
-	nv_icmd(dev, 0x00000687, 0x00000c48);
-	nv_icmd(dev, 0x000006a0, 0x00000005);
-	nv_icmd(dev, 0x00000840, 0x00300008);
-	nv_icmd(dev, 0x00000841, 0x04000080);
-	nv_icmd(dev, 0x00000842, 0x00300008);
-	nv_icmd(dev, 0x00000843, 0x04000080);
-	nv_icmd(dev, 0x00000818, 0x00000000);
-	nv_icmd(dev, 0x00000819, 0x00000000);
-	nv_icmd(dev, 0x0000081a, 0x00000000);
-	nv_icmd(dev, 0x0000081b, 0x00000000);
-	nv_icmd(dev, 0x0000081c, 0x00000000);
-	nv_icmd(dev, 0x0000081d, 0x00000000);
-	nv_icmd(dev, 0x0000081e, 0x00000000);
-	nv_icmd(dev, 0x0000081f, 0x00000000);
-	nv_icmd(dev, 0x00000848, 0x00000000);
-	nv_icmd(dev, 0x00000849, 0x00000000);
-	nv_icmd(dev, 0x0000084a, 0x00000000);
-	nv_icmd(dev, 0x0000084b, 0x00000000);
-	nv_icmd(dev, 0x0000084c, 0x00000000);
-	nv_icmd(dev, 0x0000084d, 0x00000000);
-	nv_icmd(dev, 0x0000084e, 0x00000000);
-	nv_icmd(dev, 0x0000084f, 0x00000000);
-	nv_icmd(dev, 0x00000850, 0x00000000);
-	nv_icmd(dev, 0x00000851, 0x00000000);
-	nv_icmd(dev, 0x00000852, 0x00000000);
-	nv_icmd(dev, 0x00000853, 0x00000000);
-	nv_icmd(dev, 0x00000854, 0x00000000);
-	nv_icmd(dev, 0x00000855, 0x00000000);
-	nv_icmd(dev, 0x00000856, 0x00000000);
-	nv_icmd(dev, 0x00000857, 0x00000000);
-	nv_icmd(dev, 0x00000738, 0x00000000);
-	nv_icmd(dev, 0x000006aa, 0x00000001);
-	nv_icmd(dev, 0x000006ab, 0x00000002);
-	nv_icmd(dev, 0x000006ac, 0x00000080);
-	nv_icmd(dev, 0x000006ad, 0x00000100);
-	nv_icmd(dev, 0x000006ae, 0x00000100);
-	nv_icmd(dev, 0x000006b1, 0x00000011);
-	nv_icmd(dev, 0x000006bb, 0x000000cf);
-	nv_icmd(dev, 0x000006ce, 0x2a712488);
-	nv_icmd(dev, 0x00000739, 0x4085c000);
-	nv_icmd(dev, 0x0000073a, 0x00000080);
-	nv_icmd(dev, 0x00000786, 0x80000100);
-	nv_icmd(dev, 0x0000073c, 0x00010100);
-	nv_icmd(dev, 0x0000073d, 0x02800000);
-	nv_icmd(dev, 0x00000787, 0x000000cf);
-	nv_icmd(dev, 0x0000078c, 0x00000008);
-	nv_icmd(dev, 0x00000792, 0x00000001);
-	nv_icmd(dev, 0x00000794, 0x00000001);
-	nv_icmd(dev, 0x00000795, 0x00000001);
-	nv_icmd(dev, 0x00000796, 0x00000001);
-	nv_icmd(dev, 0x00000797, 0x000000cf);
-	nv_icmd(dev, 0x00000836, 0x00000001);
-	nv_icmd(dev, 0x0000079a, 0x00000002);
-	nv_icmd(dev, 0x00000833, 0x04444480);
-	nv_icmd(dev, 0x000007a1, 0x00000001);
-	nv_icmd(dev, 0x000007a3, 0x00000001);
-	nv_icmd(dev, 0x000007a4, 0x00000001);
-	nv_icmd(dev, 0x000007a5, 0x00000001);
-	nv_icmd(dev, 0x00000831, 0x00000004);
-	nv_icmd(dev, 0x0000080c, 0x00000002);
-	nv_icmd(dev, 0x0000080d, 0x00000100);
-	nv_icmd(dev, 0x0000080e, 0x00000100);
-	nv_icmd(dev, 0x0000080f, 0x00000001);
-	nv_icmd(dev, 0x00000823, 0x00000002);
-	nv_icmd(dev, 0x00000824, 0x00000100);
-	nv_icmd(dev, 0x00000825, 0x00000100);
-	nv_icmd(dev, 0x00000826, 0x00000001);
-	nv_icmd(dev, 0x0000095d, 0x00000001);
-	nv_icmd(dev, 0x0000082b, 0x00000004);
-	nv_icmd(dev, 0x00000942, 0x00010001);
-	nv_icmd(dev, 0x00000943, 0x00000001);
-	nv_icmd(dev, 0x00000944, 0x00000022);
-	nv_icmd(dev, 0x000007c5, 0x00010001);
-	nv_icmd(dev, 0x00000834, 0x00000001);
-	nv_icmd(dev, 0x000007c7, 0x00000001);
-	nv_icmd(dev, 0x0000c1b0, 0x0000000f);
-	nv_icmd(dev, 0x0000c1b1, 0x0000000f);
-	nv_icmd(dev, 0x0000c1b2, 0x0000000f);
-	nv_icmd(dev, 0x0000c1b3, 0x0000000f);
-	nv_icmd(dev, 0x0000c1b4, 0x0000000f);
-	nv_icmd(dev, 0x0000c1b5, 0x0000000f);
-	nv_icmd(dev, 0x0000c1b6, 0x0000000f);
-	nv_icmd(dev, 0x0000c1b7, 0x0000000f);
-	nv_icmd(dev, 0x0000c1b8, 0x0fac6881);
-	nv_icmd(dev, 0x0000c1b9, 0x00fac688);
-	nv_icmd(dev, 0x0001e100, 0x00000001);
-	nv_icmd(dev, 0x00001000, 0x00000002);
-	nv_icmd(dev, 0x000006aa, 0x00000001);
-	nv_icmd(dev, 0x000006ad, 0x00000100);
-	nv_icmd(dev, 0x000006ae, 0x00000100);
-	nv_icmd(dev, 0x000006b1, 0x00000011);
-	nv_icmd(dev, 0x0000078c, 0x00000008);
-	nv_icmd(dev, 0x00000792, 0x00000001);
-	nv_icmd(dev, 0x00000794, 0x00000001);
-	nv_icmd(dev, 0x00000795, 0x00000001);
-	nv_icmd(dev, 0x00000796, 0x00000001);
-	nv_icmd(dev, 0x00000797, 0x000000cf);
-	nv_icmd(dev, 0x0000079a, 0x00000002);
-	nv_icmd(dev, 0x00000833, 0x04444480);
-	nv_icmd(dev, 0x000007a1, 0x00000001);
-	nv_icmd(dev, 0x000007a3, 0x00000001);
-	nv_icmd(dev, 0x000007a4, 0x00000001);
-	nv_icmd(dev, 0x000007a5, 0x00000001);
-	nv_icmd(dev, 0x00000831, 0x00000004);
-	nv_icmd(dev, 0x0001e100, 0x00000001);
-	nv_icmd(dev, 0x00001000, 0x00000014);
-	nv_icmd(dev, 0x00000351, 0x00000100);
-	nv_icmd(dev, 0x00000957, 0x00000003);
-	nv_icmd(dev, 0x0000095d, 0x00000001);
-	nv_icmd(dev, 0x0000082b, 0x00000004);
-	nv_icmd(dev, 0x00000942, 0x00010001);
-	nv_icmd(dev, 0x00000943, 0x00000001);
-	nv_icmd(dev, 0x000007c5, 0x00010001);
-	nv_icmd(dev, 0x00000834, 0x00000001);
-	nv_icmd(dev, 0x000007c7, 0x00000001);
-	nv_icmd(dev, 0x0001e100, 0x00000001);
-	nv_icmd(dev, 0x00001000, 0x00000001);
-	nv_icmd(dev, 0x0000080c, 0x00000002);
-	nv_icmd(dev, 0x0000080d, 0x00000100);
-	nv_icmd(dev, 0x0000080e, 0x00000100);
-	nv_icmd(dev, 0x0000080f, 0x00000001);
-	nv_icmd(dev, 0x00000823, 0x00000002);
-	nv_icmd(dev, 0x00000824, 0x00000100);
-	nv_icmd(dev, 0x00000825, 0x00000100);
-	nv_icmd(dev, 0x00000826, 0x00000001);
-	nv_icmd(dev, 0x0001e100, 0x00000001);
-	nv_wr32(dev, 0x400208, 0x00000000);
-	nv_wr32(dev, 0x404154, 0x00000400);
-
-	nvc0_grctx_generate_9097(dev);
-	if (fermi >= 0x9197)
-		nvc0_grctx_generate_9197(dev);
-	if (fermi >= 0x9297)
-		nvc0_grctx_generate_9297(dev);
-	nvc0_grctx_generate_902d(dev);
-	nvc0_grctx_generate_9039(dev);
-	nvc0_grctx_generate_90c0(dev);
-
-	nv_wr32(dev, 0x000260, r000260);
-	return 0;
-}
diff --git a/drivers/gpu/drm/nouveau/nvc0_instmem.c b/drivers/gpu/drm/nouveau/nvc0_instmem.c
deleted file mode 100644
index f5fac7cbb78d..000000000000
--- a/drivers/gpu/drm/nouveau/nvc0_instmem.c
+++ /dev/null
@@ -1,223 +0,0 @@
-/*
- * Copyright 2010 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Ben Skeggs
- */
-
-#include <drm/drmP.h>
-
-#include "nouveau_drv.h"
-#include "nouveau_vm.h"
-
-struct nvc0_instmem_priv {
-	struct nouveau_gpuobj  *bar1_pgd;
-	struct nouveau_channel *bar1;
-	struct nouveau_gpuobj  *bar3_pgd;
-	struct nouveau_channel *bar3;
-};
-
-int
-nvc0_instmem_suspend(struct drm_device *dev)
-{
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-
-	dev_priv->ramin_available = false;
-	return 0;
-}
-
-void
-nvc0_instmem_resume(struct drm_device *dev)
-{
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct nvc0_instmem_priv *priv = dev_priv->engine.instmem.priv;
-
-	nv_mask(dev, 0x100c80, 0x00000001, 0x00000000);
-	nv_wr32(dev, 0x001704, 0x80000000 | priv->bar1->ramin->vinst >> 12);
-	nv_wr32(dev, 0x001714, 0xc0000000 | priv->bar3->ramin->vinst >> 12);
-	dev_priv->ramin_available = true;
-}
-
-static void
-nvc0_channel_del(struct nouveau_channel **pchan)
-{
-	struct nouveau_channel *chan;
-
-	chan = *pchan;
-	*pchan = NULL;
-	if (!chan)
-		return;
-
-	nouveau_vm_ref(NULL, &chan->vm, NULL);
-	if (drm_mm_initialized(&chan->ramin_heap))
-		drm_mm_takedown(&chan->ramin_heap);
-	nouveau_gpuobj_ref(NULL, &chan->ramin);
-	kfree(chan);
-}
-
-static int
-nvc0_channel_new(struct drm_device *dev, u32 size, struct nouveau_vm *vm,
-		 struct nouveau_channel **pchan,
-		 struct nouveau_gpuobj *pgd, u64 vm_size)
-{
-	struct nouveau_channel *chan;
-	int ret;
-
-	chan = kzalloc(sizeof(*chan), GFP_KERNEL);
-	if (!chan)
-		return -ENOMEM;
-	chan->dev = dev;
-
-	ret = nouveau_gpuobj_new(dev, NULL, size, 0x1000, 0, &chan->ramin);
-	if (ret) {
-		nvc0_channel_del(&chan);
-		return ret;
-	}
-
-	ret = drm_mm_init(&chan->ramin_heap, 0x1000, size - 0x1000);
-	if (ret) {
-		nvc0_channel_del(&chan);
-		return ret;
-	}
-
-	ret = nouveau_vm_ref(vm, &chan->vm, NULL);
-	if (ret) {
-		nvc0_channel_del(&chan);
-		return ret;
-	}
-
-	nv_wo32(chan->ramin, 0x0200, lower_32_bits(pgd->vinst));
-	nv_wo32(chan->ramin, 0x0204, upper_32_bits(pgd->vinst));
-	nv_wo32(chan->ramin, 0x0208, lower_32_bits(vm_size - 1));
-	nv_wo32(chan->ramin, 0x020c, upper_32_bits(vm_size - 1));
-
-	*pchan = chan;
-	return 0;
-}
-
-int
-nvc0_instmem_init(struct drm_device *dev)
-{
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct nouveau_instmem_engine *pinstmem = &dev_priv->engine.instmem;
-	struct pci_dev *pdev = dev->pdev;
-	struct nvc0_instmem_priv *priv;
-	struct nouveau_vm *vm = NULL;
-	int ret;
-
-	priv = kzalloc(sizeof(*priv), GFP_KERNEL);
-	if (!priv)
-		return -ENOMEM;
-	pinstmem->priv = priv;
-
-	/* BAR3 VM */
-	ret = nouveau_vm_new(dev, 0, pci_resource_len(pdev, 3), 0,
-			     &dev_priv->bar3_vm);
-	if (ret)
-		goto error;
-
-	ret = nouveau_gpuobj_new(dev, NULL,
-				 (pci_resource_len(pdev, 3) >> 12) * 8, 0,
-				 NVOBJ_FLAG_DONT_MAP |
-				 NVOBJ_FLAG_ZERO_ALLOC,
-				 &dev_priv->bar3_vm->pgt[0].obj[0]);
-	if (ret)
-		goto error;
-	dev_priv->bar3_vm->pgt[0].refcount[0] = 1;
-
-	nv50_instmem_map(dev_priv->bar3_vm->pgt[0].obj[0]);
-
-	ret = nouveau_gpuobj_new(dev, NULL, 0x8000, 4096,
-				 NVOBJ_FLAG_ZERO_ALLOC, &priv->bar3_pgd);
-	if (ret)
-		goto error;
-
-	ret = nouveau_vm_ref(dev_priv->bar3_vm, &vm, priv->bar3_pgd);
-	if (ret)
-		goto error;
-	nouveau_vm_ref(NULL, &vm, NULL);
-
-	ret = nvc0_channel_new(dev, 8192, dev_priv->bar3_vm, &priv->bar3,
-			       priv->bar3_pgd, pci_resource_len(dev->pdev, 3));
-	if (ret)
-		goto error;
-
-	/* BAR1 VM */
-	ret = nouveau_vm_new(dev, 0, pci_resource_len(pdev, 1), 0, &vm);
-	if (ret)
-		goto error;
-
-	ret = nouveau_gpuobj_new(dev, NULL, 0x8000, 4096,
-				 NVOBJ_FLAG_ZERO_ALLOC, &priv->bar1_pgd);
-	if (ret)
-		goto error;
-
-	ret = nouveau_vm_ref(vm, &dev_priv->bar1_vm, priv->bar1_pgd);
-	if (ret)
-		goto error;
-	nouveau_vm_ref(NULL, &vm, NULL);
-
-	ret = nvc0_channel_new(dev, 8192, dev_priv->bar1_vm, &priv->bar1,
-			       priv->bar1_pgd, pci_resource_len(dev->pdev, 1));
-	if (ret)
-		goto error;
-
-	/* channel vm */
-	ret = nouveau_vm_new(dev, 0, (1ULL << 40), 0x0008000000ULL,
-			     &dev_priv->chan_vm);
-	if (ret)
-		goto error;
-
-	nvc0_instmem_resume(dev);
-	return 0;
-error:
-	nvc0_instmem_takedown(dev);
-	return ret;
-}
-
-void
-nvc0_instmem_takedown(struct drm_device *dev)
-{
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct nvc0_instmem_priv *priv = dev_priv->engine.instmem.priv;
-	struct nouveau_vm *vm = NULL;
-
-	nvc0_instmem_suspend(dev);
-
-	nv_wr32(dev, 0x1704, 0x00000000);
-	nv_wr32(dev, 0x1714, 0x00000000);
-
-	nouveau_vm_ref(NULL, &dev_priv->chan_vm, NULL);
-
-	nvc0_channel_del(&priv->bar1);
-	nouveau_vm_ref(NULL, &dev_priv->bar1_vm, priv->bar1_pgd);
-	nouveau_gpuobj_ref(NULL, &priv->bar1_pgd);
-
-	nvc0_channel_del(&priv->bar3);
-	nouveau_vm_ref(dev_priv->bar3_vm, &vm, NULL);
-	nouveau_vm_ref(NULL, &vm, priv->bar3_pgd);
-	nouveau_gpuobj_ref(NULL, &priv->bar3_pgd);
-	nouveau_gpuobj_ref(NULL, &dev_priv->bar3_vm->pgt[0].obj[0]);
-	nouveau_vm_ref(NULL, &dev_priv->bar3_vm, NULL);
-
-	dev_priv->engine.instmem.priv = NULL;
-	kfree(priv);
-}
-
diff --git a/drivers/gpu/drm/nouveau/nvc0_pm.c b/drivers/gpu/drm/nouveau/nvc0_pm.c
index 51cee2103544..0d34eb581179 100644
--- a/drivers/gpu/drm/nouveau/nvc0_pm.c
+++ b/drivers/gpu/drm/nouveau/nvc0_pm.c
@@ -22,18 +22,24 @@
  * Authors: Ben Skeggs
  */
 
-#include <drm/drmP.h>
-#include "nouveau_drv.h"
+#include "nouveau_drm.h"
 #include "nouveau_bios.h"
 #include "nouveau_pm.h"
 
+#include <subdev/bios/pll.h>
+#include <subdev/bios.h>
+#include <subdev/clock.h>
+#include <subdev/timer.h>
+#include <subdev/fb.h>
+
 static u32 read_div(struct drm_device *, int, u32, u32);
 static u32 read_pll(struct drm_device *, u32);
 
 static u32
 read_vco(struct drm_device *dev, u32 dsrc)
 {
-	u32 ssrc = nv_rd32(dev, dsrc);
+	struct nouveau_device *device = nouveau_dev(dev);
+	u32 ssrc = nv_rd32(device, dsrc);
 	if (!(ssrc & 0x00000100))
 		return read_pll(dev, 0x00e800);
 	return read_pll(dev, 0x00e820);
@@ -42,8 +48,9 @@ read_vco(struct drm_device *dev, u32 dsrc)
 static u32
 read_pll(struct drm_device *dev, u32 pll)
 {
-	u32 ctrl = nv_rd32(dev, pll + 0);
-	u32 coef = nv_rd32(dev, pll + 4);
+	struct nouveau_device *device = nouveau_dev(dev);
+	u32 ctrl = nv_rd32(device, pll + 0);
+	u32 coef = nv_rd32(device, pll + 4);
 	u32 P = (coef & 0x003f0000) >> 16;
 	u32 N = (coef & 0x0000ff00) >> 8;
 	u32 M = (coef & 0x000000ff) >> 0;
@@ -83,8 +90,9 @@ read_pll(struct drm_device *dev, u32 pll)
 static u32
 read_div(struct drm_device *dev, int doff, u32 dsrc, u32 dctl)
 {
-	u32 ssrc = nv_rd32(dev, dsrc + (doff * 4));
-	u32 sctl = nv_rd32(dev, dctl + (doff * 4));
+	struct nouveau_device *device = nouveau_dev(dev);
+	u32 ssrc = nv_rd32(device, dsrc + (doff * 4));
+	u32 sctl = nv_rd32(device, dctl + (doff * 4));
 
 	switch (ssrc & 0x00000003) {
 	case 0:
@@ -109,7 +117,8 @@ read_div(struct drm_device *dev, int doff, u32 dsrc, u32 dctl)
 static u32
 read_mem(struct drm_device *dev)
 {
-	u32 ssel = nv_rd32(dev, 0x1373f0);
+	struct nouveau_device *device = nouveau_dev(dev);
+	u32 ssel = nv_rd32(device, 0x1373f0);
 	if (ssel & 0x00000001)
 		return read_div(dev, 0, 0x137300, 0x137310);
 	return read_pll(dev, 0x132000);
@@ -118,8 +127,9 @@ read_mem(struct drm_device *dev)
 static u32
 read_clk(struct drm_device *dev, int clk)
 {
-	u32 sctl = nv_rd32(dev, 0x137250 + (clk * 4));
-	u32 ssel = nv_rd32(dev, 0x137100);
+	struct nouveau_device *device = nouveau_dev(dev);
+	u32 sctl = nv_rd32(device, 0x137250 + (clk * 4));
+	u32 ssel = nv_rd32(device, 0x137100);
 	u32 sclk, sdiv;
 
 	if (ssel & (1 << clk)) {
@@ -212,10 +222,12 @@ calc_src(struct drm_device *dev, int clk, u32 freq, u32 *dsrc, u32 *ddiv)
 static u32
 calc_pll(struct drm_device *dev, int clk, u32 freq, u32 *coef)
 {
-	struct pll_lims limits;
+	struct nouveau_device *device = nouveau_dev(dev);
+	struct nouveau_bios *bios = nouveau_bios(device);
+	struct nvbios_pll limits;
 	int N, M, P, ret;
 
-	ret = get_pll_limits(dev, 0x137000 + (clk * 0x20), &limits);
+	ret = nvbios_pll_parse(bios, 0x137000 + (clk * 0x20), &limits);
 	if (ret)
 		return 0;
 
@@ -308,31 +320,33 @@ calc_clk(struct drm_device *dev, int clk, struct nvc0_pm_clock *info, u32 freq)
 static int
 calc_mem(struct drm_device *dev, struct nvc0_pm_clock *info, u32 freq)
 {
-	struct pll_lims pll;
+	struct nouveau_device *device = nouveau_dev(dev);
+	struct nouveau_bios *bios = nouveau_bios(device);
+	struct nvbios_pll pll;
 	int N, M, P, ret;
 	u32 ctrl;
 
 	/* mclk pll input freq comes from another pll, make sure it's on */
-	ctrl = nv_rd32(dev, 0x132020);
+	ctrl = nv_rd32(device, 0x132020);
 	if (!(ctrl & 0x00000001)) {
 		/* if not, program it to 567MHz.  nfi where this value comes
 		 * from - it looks like it's in the pll limits table for
 		 * 132000 but the binary driver ignores all my attempts to
 		 * change this value.
 		 */
-		nv_wr32(dev, 0x137320, 0x00000103);
-		nv_wr32(dev, 0x137330, 0x81200606);
-		nv_wait(dev, 0x132020, 0x00010000, 0x00010000);
-		nv_wr32(dev, 0x132024, 0x0001150f);
-		nv_mask(dev, 0x132020, 0x00000001, 0x00000001);
-		nv_wait(dev, 0x137390, 0x00020000, 0x00020000);
-		nv_mask(dev, 0x132020, 0x00000004, 0x00000004);
+		nv_wr32(device, 0x137320, 0x00000103);
+		nv_wr32(device, 0x137330, 0x81200606);
+		nv_wait(device, 0x132020, 0x00010000, 0x00010000);
+		nv_wr32(device, 0x132024, 0x0001150f);
+		nv_mask(device, 0x132020, 0x00000001, 0x00000001);
+		nv_wait(device, 0x137390, 0x00020000, 0x00020000);
+		nv_mask(device, 0x132020, 0x00000004, 0x00000004);
 	}
 
 	/* for the moment, until the clock tree is better understood, use
 	 * pll mode for all clock frequencies
 	 */
-	ret = get_pll_limits(dev, 0x132000, &pll);
+	ret = nvbios_pll_parse(bios, 0x132000, &pll);
 	if (ret == 0) {
 		pll.refclk = read_pll(dev, 0x132020);
 		if (pll.refclk) {
@@ -350,7 +364,7 @@ calc_mem(struct drm_device *dev, struct nvc0_pm_clock *info, u32 freq)
 void *
 nvc0_pm_clocks_pre(struct drm_device *dev, struct nouveau_pm_level *perflvl)
 {
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_device *device = nouveau_dev(dev);
 	struct nvc0_pm_state *info;
 	int ret;
 
@@ -364,7 +378,7 @@ nvc0_pm_clocks_pre(struct drm_device *dev, struct nouveau_pm_level *perflvl)
 	 * are always the same freq with the binary driver even when the
 	 * performance table says they should differ.
 	 */
-	if (dev_priv->chipset == 0xd9)
+	if (device->chipset == 0xd9)
 		perflvl->rop = 0;
 
 	if ((ret = calc_clk(dev, 0x00, &info->eng[0x00], perflvl->shader)) ||
@@ -394,38 +408,40 @@ nvc0_pm_clocks_pre(struct drm_device *dev, struct nouveau_pm_level *perflvl)
 static void
 prog_clk(struct drm_device *dev, int clk, struct nvc0_pm_clock *info)
 {
+	struct nouveau_device *device = nouveau_dev(dev);
+
 	/* program dividers at 137160/1371d0 first */
 	if (clk < 7 && !info->ssel) {
-		nv_mask(dev, 0x1371d0 + (clk * 0x04), 0x80003f3f, info->ddiv);
-		nv_wr32(dev, 0x137160 + (clk * 0x04), info->dsrc);
+		nv_mask(device, 0x1371d0 + (clk * 0x04), 0x80003f3f, info->ddiv);
+		nv_wr32(device, 0x137160 + (clk * 0x04), info->dsrc);
 	}
 
 	/* switch clock to non-pll mode */
-	nv_mask(dev, 0x137100, (1 << clk), 0x00000000);
-	nv_wait(dev, 0x137100, (1 << clk), 0x00000000);
+	nv_mask(device, 0x137100, (1 << clk), 0x00000000);
+	nv_wait(device, 0x137100, (1 << clk), 0x00000000);
 
 	/* reprogram pll */
 	if (clk < 7) {
 		/* make sure it's disabled first... */
 		u32 base = 0x137000 + (clk * 0x20);
-		u32 ctrl = nv_rd32(dev, base + 0x00);
+		u32 ctrl = nv_rd32(device, base + 0x00);
 		if (ctrl & 0x00000001) {
-			nv_mask(dev, base + 0x00, 0x00000004, 0x00000000);
-			nv_mask(dev, base + 0x00, 0x00000001, 0x00000000);
+			nv_mask(device, base + 0x00, 0x00000004, 0x00000000);
+			nv_mask(device, base + 0x00, 0x00000001, 0x00000000);
 		}
 		/* program it to new values, if necessary */
 		if (info->ssel) {
-			nv_wr32(dev, base + 0x04, info->coef);
-			nv_mask(dev, base + 0x00, 0x00000001, 0x00000001);
-			nv_wait(dev, base + 0x00, 0x00020000, 0x00020000);
-			nv_mask(dev, base + 0x00, 0x00020004, 0x00000004);
+			nv_wr32(device, base + 0x04, info->coef);
+			nv_mask(device, base + 0x00, 0x00000001, 0x00000001);
+			nv_wait(device, base + 0x00, 0x00020000, 0x00020000);
+			nv_mask(device, base + 0x00, 0x00020004, 0x00000004);
 		}
 	}
 
 	/* select pll/non-pll mode, and program final clock divider */
-	nv_mask(dev, 0x137100, (1 << clk), info->ssel);
-	nv_wait(dev, 0x137100, (1 << clk), info->ssel);
-	nv_mask(dev, 0x137250 + (clk * 0x04), 0x00003f3f, info->mdiv);
+	nv_mask(device, 0x137100, (1 << clk), info->ssel);
+	nv_wait(device, 0x137100, (1 << clk), info->ssel);
+	nv_mask(device, 0x137250 + (clk * 0x04), 0x00003f3f, info->mdiv);
 }
 
 static void
@@ -441,7 +457,8 @@ mclk_refresh(struct nouveau_mem_exec_func *exec)
 static void
 mclk_refresh_auto(struct nouveau_mem_exec_func *exec, bool enable)
 {
-	nv_wr32(exec->dev, 0x10f210, enable ? 0x80000000 : 0x00000000);
+	struct nouveau_device *device = nouveau_dev(exec->dev);
+	nv_wr32(device, 0x10f210, enable ? 0x80000000 : 0x00000000);
 }
 
 static void
@@ -458,83 +475,84 @@ mclk_wait(struct nouveau_mem_exec_func *exec, u32 nsec)
 static u32
 mclk_mrg(struct nouveau_mem_exec_func *exec, int mr)
 {
-	struct drm_device *dev = exec->dev;
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	if (dev_priv->vram_type != NV_MEM_TYPE_GDDR5) {
+	struct nouveau_device *device = nouveau_dev(exec->dev);
+	struct nouveau_fb *pfb = nouveau_fb(device);
+	if (pfb->ram.type != NV_MEM_TYPE_GDDR5) {
 		if (mr <= 1)
-			return nv_rd32(dev, 0x10f300 + ((mr - 0) * 4));
-		return nv_rd32(dev, 0x10f320 + ((mr - 2) * 4));
+			return nv_rd32(device, 0x10f300 + ((mr - 0) * 4));
+		return nv_rd32(device, 0x10f320 + ((mr - 2) * 4));
 	} else {
 		if (mr == 0)
-			return nv_rd32(dev, 0x10f300 + (mr * 4));
+			return nv_rd32(device, 0x10f300 + (mr * 4));
 		else
 		if (mr <= 7)
-			return nv_rd32(dev, 0x10f32c + (mr * 4));
-		return nv_rd32(dev, 0x10f34c);
+			return nv_rd32(device, 0x10f32c + (mr * 4));
+		return nv_rd32(device, 0x10f34c);
 	}
 }
 
 static void
 mclk_mrs(struct nouveau_mem_exec_func *exec, int mr, u32 data)
 {
-	struct drm_device *dev = exec->dev;
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	if (dev_priv->vram_type != NV_MEM_TYPE_GDDR5) {
+	struct nouveau_device *device = nouveau_dev(exec->dev);
+	struct nouveau_fb *pfb = nouveau_fb(device);
+	if (pfb->ram.type != NV_MEM_TYPE_GDDR5) {
 		if (mr <= 1) {
-			nv_wr32(dev, 0x10f300 + ((mr - 0) * 4), data);
-			if (dev_priv->vram_rank_B)
-				nv_wr32(dev, 0x10f308 + ((mr - 0) * 4), data);
+			nv_wr32(device, 0x10f300 + ((mr - 0) * 4), data);
+			if (pfb->ram.ranks > 1)
+				nv_wr32(device, 0x10f308 + ((mr - 0) * 4), data);
 		} else
 		if (mr <= 3) {
-			nv_wr32(dev, 0x10f320 + ((mr - 2) * 4), data);
-			if (dev_priv->vram_rank_B)
-				nv_wr32(dev, 0x10f328 + ((mr - 2) * 4), data);
+			nv_wr32(device, 0x10f320 + ((mr - 2) * 4), data);
+			if (pfb->ram.ranks > 1)
+				nv_wr32(device, 0x10f328 + ((mr - 2) * 4), data);
 		}
 	} else {
-		if      (mr ==  0) nv_wr32(dev, 0x10f300 + (mr * 4), data);
-		else if (mr <=  7) nv_wr32(dev, 0x10f32c + (mr * 4), data);
-		else if (mr == 15) nv_wr32(dev, 0x10f34c, data);
+		if      (mr ==  0) nv_wr32(device, 0x10f300 + (mr * 4), data);
+		else if (mr <=  7) nv_wr32(device, 0x10f32c + (mr * 4), data);
+		else if (mr == 15) nv_wr32(device, 0x10f34c, data);
 	}
 }
 
 static void
 mclk_clock_set(struct nouveau_mem_exec_func *exec)
 {
+	struct nouveau_device *device = nouveau_dev(exec->dev);
 	struct nvc0_pm_state *info = exec->priv;
-	struct drm_device *dev = exec->dev;
-	u32 ctrl = nv_rd32(dev, 0x132000);
+	u32 ctrl = nv_rd32(device, 0x132000);
 
-	nv_wr32(dev, 0x137360, 0x00000001);
-	nv_wr32(dev, 0x137370, 0x00000000);
-	nv_wr32(dev, 0x137380, 0x00000000);
+	nv_wr32(device, 0x137360, 0x00000001);
+	nv_wr32(device, 0x137370, 0x00000000);
+	nv_wr32(device, 0x137380, 0x00000000);
 	if (ctrl & 0x00000001)
-		nv_wr32(dev, 0x132000, (ctrl &= ~0x00000001));
+		nv_wr32(device, 0x132000, (ctrl &= ~0x00000001));
 
-	nv_wr32(dev, 0x132004, info->mem.coef);
-	nv_wr32(dev, 0x132000, (ctrl |= 0x00000001));
-	nv_wait(dev, 0x137390, 0x00000002, 0x00000002);
-	nv_wr32(dev, 0x132018, 0x00005000);
+	nv_wr32(device, 0x132004, info->mem.coef);
+	nv_wr32(device, 0x132000, (ctrl |= 0x00000001));
+	nv_wait(device, 0x137390, 0x00000002, 0x00000002);
+	nv_wr32(device, 0x132018, 0x00005000);
 
-	nv_wr32(dev, 0x137370, 0x00000001);
-	nv_wr32(dev, 0x137380, 0x00000001);
-	nv_wr32(dev, 0x137360, 0x00000000);
+	nv_wr32(device, 0x137370, 0x00000001);
+	nv_wr32(device, 0x137380, 0x00000001);
+	nv_wr32(device, 0x137360, 0x00000000);
 }
 
 static void
 mclk_timing_set(struct nouveau_mem_exec_func *exec)
 {
+	struct nouveau_device *device = nouveau_dev(exec->dev);
 	struct nvc0_pm_state *info = exec->priv;
 	struct nouveau_pm_level *perflvl = info->perflvl;
 	int i;
 
 	for (i = 0; i < 5; i++)
-		nv_wr32(exec->dev, 0x10f290 + (i * 4), perflvl->timing.reg[i]);
+		nv_wr32(device, 0x10f290 + (i * 4), perflvl->timing.reg[i]);
 }
 
 static void
 prog_mem(struct drm_device *dev, struct nvc0_pm_state *info)
 {
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_device *device = nouveau_dev(dev);
 	struct nouveau_mem_exec_func exec = {
 		.dev = dev,
 		.precharge = mclk_precharge,
@@ -549,17 +567,17 @@ prog_mem(struct drm_device *dev, struct nvc0_pm_state *info)
 		.priv = info
 	};
 
-	if (dev_priv->chipset < 0xd0)
-		nv_wr32(dev, 0x611200, 0x00003300);
+	if (device->chipset < 0xd0)
+		nv_wr32(device, 0x611200, 0x00003300);
 	else
-		nv_wr32(dev, 0x62c000, 0x03030000);
+		nv_wr32(device, 0x62c000, 0x03030000);
 
 	nouveau_mem_exec(&exec, info->perflvl);
 
-	if (dev_priv->chipset < 0xd0)
-		nv_wr32(dev, 0x611200, 0x00003330);
+	if (device->chipset < 0xd0)
+		nv_wr32(device, 0x611200, 0x00003330);
 	else
-		nv_wr32(dev, 0x62c000, 0x03030300);
+		nv_wr32(device, 0x62c000, 0x03030300);
 }
 int
 nvc0_pm_clocks_set(struct drm_device *dev, void *data)
diff --git a/drivers/gpu/drm/nouveau/nvc0_software.c b/drivers/gpu/drm/nouveau/nvc0_software.c
deleted file mode 100644
index 940652e7fafa..000000000000
--- a/drivers/gpu/drm/nouveau/nvc0_software.c
+++ /dev/null
@@ -1,153 +0,0 @@
-/*
- * Copyright 2012 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Ben Skeggs
- */
-
-#include <drm/drmP.h>
-
-#include "nouveau_drv.h"
-#include "nouveau_ramht.h"
-#include "nouveau_software.h"
-
-#include "nv50_display.h"
-
-struct nvc0_software_priv {
-	struct nouveau_software_priv base;
-};
-
-struct nvc0_software_chan {
-	struct nouveau_software_chan base;
-	struct nouveau_vma dispc_vma[4];
-};
-
-u64
-nvc0_software_crtc(struct nouveau_channel *chan, int crtc)
-{
-	struct nvc0_software_chan *pch = chan->engctx[NVOBJ_ENGINE_SW];
-	return pch->dispc_vma[crtc].offset;
-}
-
-static int
-nvc0_software_context_new(struct nouveau_channel *chan, int engine)
-{
-	struct drm_device *dev = chan->dev;
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct nvc0_software_priv *psw = nv_engine(dev, NVOBJ_ENGINE_SW);
-	struct nvc0_software_chan *pch;
-	int ret = 0, i;
-
-	pch = kzalloc(sizeof(*pch), GFP_KERNEL);
-	if (!pch)
-		return -ENOMEM;
-
-	nouveau_software_context_new(&pch->base);
-	chan->engctx[engine] = pch;
-
-	/* map display semaphore buffers into channel's vm */
-	for (i = 0; !ret && i < dev->mode_config.num_crtc; i++) {
-		struct nouveau_bo *bo;
-		if (dev_priv->card_type >= NV_D0)
-			bo = nvd0_display_crtc_sema(dev, i);
-		else
-			bo = nv50_display(dev)->crtc[i].sem.bo;
-
-		ret = nouveau_bo_vma_add(bo, chan->vm, &pch->dispc_vma[i]);
-	}
-
-	if (ret)
-		psw->base.base.context_del(chan, engine);
-	return ret;
-}
-
-static void
-nvc0_software_context_del(struct nouveau_channel *chan, int engine)
-{
-	struct drm_device *dev = chan->dev;
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct nvc0_software_chan *pch = chan->engctx[engine];
-	int i;
-
-	if (dev_priv->card_type >= NV_D0) {
-		for (i = 0; i < dev->mode_config.num_crtc; i++) {
-			struct nouveau_bo *bo = nvd0_display_crtc_sema(dev, i);
-			nouveau_bo_vma_del(bo, &pch->dispc_vma[i]);
-		}
-	} else
-	if (dev_priv->card_type >= NV_50) {
-		struct nv50_display *disp = nv50_display(dev);
-		for (i = 0; i < dev->mode_config.num_crtc; i++) {
-			struct nv50_display_crtc *dispc = &disp->crtc[i];
-			nouveau_bo_vma_del(dispc->sem.bo, &pch->dispc_vma[i]);
-		}
-	}
-
-	chan->engctx[engine] = NULL;
-	kfree(pch);
-}
-
-static int
-nvc0_software_object_new(struct nouveau_channel *chan, int engine,
-			 u32 handle, u16 class)
-{
-	return 0;
-}
-
-static int
-nvc0_software_init(struct drm_device *dev, int engine)
-{
-	return 0;
-}
-
-static int
-nvc0_software_fini(struct drm_device *dev, int engine, bool suspend)
-{
-	return 0;
-}
-
-static void
-nvc0_software_destroy(struct drm_device *dev, int engine)
-{
-	struct nvc0_software_priv *psw = nv_engine(dev, engine);
-
-	NVOBJ_ENGINE_DEL(dev, SW);
-	kfree(psw);
-}
-
-int
-nvc0_software_create(struct drm_device *dev)
-{
-	struct nvc0_software_priv *psw = kzalloc(sizeof(*psw), GFP_KERNEL);
-	if (!psw)
-		return -ENOMEM;
-
-	psw->base.base.destroy = nvc0_software_destroy;
-	psw->base.base.init = nvc0_software_init;
-	psw->base.base.fini = nvc0_software_fini;
-	psw->base.base.context_new = nvc0_software_context_new;
-	psw->base.base.context_del = nvc0_software_context_del;
-	psw->base.base.object_new = nvc0_software_object_new;
-	nouveau_software_create(&psw->base);
-
-	NVOBJ_ENGINE_ADD(dev, SW, &psw->base.base);
-	NVOBJ_CLASS(dev, 0x906e, SW);
-	return 0;
-}
diff --git a/drivers/gpu/drm/nouveau/nvc0_vram.c b/drivers/gpu/drm/nouveau/nvc0_vram.c
deleted file mode 100644
index 4d62a1d95782..000000000000
--- a/drivers/gpu/drm/nouveau/nvc0_vram.c
+++ /dev/null
@@ -1,160 +0,0 @@
-/*
- * Copyright 2010 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Ben Skeggs
- */
-
-#include <drm/drmP.h>
-#include "nouveau_drv.h"
-#include "nouveau_mm.h"
-
-/* 0 = unsupported
- * 1 = non-compressed
- * 3 = compressed
- */
-static const u8 types[256] = {
-	1, 1, 3, 3, 3, 3, 0, 3, 3, 3, 3, 0, 0, 0, 0, 0,
-	0, 1, 0, 0, 0, 0, 0, 3, 3, 3, 3, 0, 0, 0, 0, 0,
-	0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0,
-	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 3, 3, 3, 3, 3,
-	3, 3, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0,
-	0, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
-	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
-	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0,
-	0, 0, 0, 0, 0, 0, 3, 3, 3, 3, 0, 1, 1, 1, 1, 0,
-	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
-	0, 0, 0, 3, 3, 3, 3, 1, 1, 1, 1, 0, 0, 0, 0, 0,
-	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3,
-	3, 3, 3, 1, 0, 0, 0, 0, 0, 0, 0, 0, 3, 3, 3, 3,
-	3, 3, 0, 0, 0, 0, 0, 0, 3, 0, 0, 3, 0, 3, 0, 3,
-	3, 0, 3, 3, 3, 3, 3, 0, 0, 3, 0, 3, 0, 3, 3, 0,
-	3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 1, 1, 0
-};
-
-bool
-nvc0_vram_flags_valid(struct drm_device *dev, u32 tile_flags)
-{
-	u8 memtype = (tile_flags & NOUVEAU_GEM_TILE_LAYOUT_MASK) >> 8;
-	return likely((types[memtype] == 1));
-}
-
-int
-nvc0_vram_new(struct drm_device *dev, u64 size, u32 align, u32 ncmin,
-	      u32 type, struct nouveau_mem **pmem)
-{
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct nouveau_mm *mm = &dev_priv->engine.vram.mm;
-	struct nouveau_mm_node *r;
-	struct nouveau_mem *mem;
-	int ret;
-
-	size  >>= 12;
-	align >>= 12;
-	ncmin >>= 12;
-
-	mem = kzalloc(sizeof(*mem), GFP_KERNEL);
-	if (!mem)
-		return -ENOMEM;
-
-	INIT_LIST_HEAD(&mem->regions);
-	mem->dev = dev_priv->dev;
-	mem->memtype = (type & 0xff);
-	mem->size = size;
-
-	mutex_lock(&mm->mutex);
-	do {
-		ret = nouveau_mm_get(mm, 1, size, ncmin, align, &r);
-		if (ret) {
-			mutex_unlock(&mm->mutex);
-			nv50_vram_del(dev, &mem);
-			return ret;
-		}
-
-		list_add_tail(&r->rl_entry, &mem->regions);
-		size -= r->length;
-	} while (size);
-	mutex_unlock(&mm->mutex);
-
-	r = list_first_entry(&mem->regions, struct nouveau_mm_node, rl_entry);
-	mem->offset = (u64)r->offset << 12;
-	*pmem = mem;
-	return 0;
-}
-
-int
-nvc0_vram_init(struct drm_device *dev)
-{
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct nouveau_vram_engine *vram = &dev_priv->engine.vram;
-	const u32 rsvd_head = ( 256 * 1024) >> 12; /* vga memory */
-	const u32 rsvd_tail = (1024 * 1024) >> 12; /* vbios etc */
-	u32 parts = nv_rd32(dev, 0x022438);
-	u32 pmask = nv_rd32(dev, 0x022554);
-	u32 bsize = nv_rd32(dev, 0x10f20c);
-	u32 offset, length;
-	bool uniform = true;
-	int ret, part;
-
-	NV_DEBUG(dev, "0x100800: 0x%08x\n", nv_rd32(dev, 0x100800));
-	NV_DEBUG(dev, "parts 0x%08x mask 0x%08x\n", parts, pmask);
-
-	dev_priv->vram_type = nouveau_mem_vbios_type(dev);
-	dev_priv->vram_rank_B = !!(nv_rd32(dev, 0x10f200) & 0x00000004);
-
-	/* read amount of vram attached to each memory controller */
-	for (part = 0; part < parts; part++) {
-		if (!(pmask & (1 << part))) {
-			u32 psize = nv_rd32(dev, 0x11020c + (part * 0x1000));
-			if (psize != bsize) {
-				if (psize < bsize)
-					bsize = psize;
-				uniform = false;
-			}
-
-			NV_DEBUG(dev, "%d: mem_amount 0x%08x\n", part, psize);
-			dev_priv->vram_size += (u64)psize << 20;
-		}
-	}
-
-	/* if all controllers have the same amount attached, there's no holes */
-	if (uniform) {
-		offset = rsvd_head;
-		length = (dev_priv->vram_size >> 12) - rsvd_head - rsvd_tail;
-		return nouveau_mm_init(&vram->mm, offset, length, 1);
-	}
-
-	/* otherwise, address lowest common amount from 0GiB */
-	ret = nouveau_mm_init(&vram->mm, rsvd_head, (bsize << 8) * parts, 1);
-	if (ret)
-		return ret;
-
-	/* and the rest starting from (8GiB + common_size) */
-	offset = (0x0200000000ULL >> 12) + (bsize << 8);
-	length = (dev_priv->vram_size >> 12) - (bsize << 8) - rsvd_tail;
-
-	ret = nouveau_mm_init(&vram->mm, offset, length, 0);
-	if (ret) {
-		nouveau_mm_fini(&vram->mm);
-		return ret;
-	}
-
-	return 0;
-}
diff --git a/drivers/gpu/drm/nouveau/nvd0_display.c b/drivers/gpu/drm/nouveau/nvd0_display.c
index 4b44a3250d4b..c402fca2b2b8 100644
--- a/drivers/gpu/drm/nouveau/nvd0_display.c
+++ b/drivers/gpu/drm/nouveau/nvd0_display.c
@@ -27,15 +27,21 @@
 #include <drm/drmP.h>
 #include <drm/drm_crtc_helper.h>
 
-#include "nouveau_drv.h"
+#include "nouveau_drm.h"
+#include "nouveau_dma.h"
+#include "nouveau_gem.h"
 #include "nouveau_connector.h"
 #include "nouveau_encoder.h"
 #include "nouveau_crtc.h"
-#include "nouveau_dma.h"
-#include "nouveau_fb.h"
-#include "nouveau_software.h"
+#include "nouveau_fence.h"
 #include "nv50_display.h"
 
+#include <core/gpuobj.h>
+
+#include <subdev/timer.h>
+#include <subdev/bar.h>
+#include <subdev/fb.h>
+
 #define EVO_DMA_NR 9
 
 #define EVO_MASTER  (0x00)
@@ -72,8 +78,7 @@ struct nvd0_display {
 static struct nvd0_display *
 nvd0_display(struct drm_device *dev)
 {
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	return dev_priv->engine.display.priv;
+	return nouveau_display(dev)->priv;
 }
 
 static struct drm_crtc *
@@ -88,55 +93,47 @@ nvd0_display_crtc_get(struct drm_encoder *encoder)
 static inline int
 evo_icmd(struct drm_device *dev, int id, u32 mthd, u32 data)
 {
+	struct nouveau_device *device = nouveau_dev(dev);
 	int ret = 0;
-	nv_mask(dev, 0x610700 + (id * 0x10), 0x00000001, 0x00000001);
-	nv_wr32(dev, 0x610704 + (id * 0x10), data);
-	nv_mask(dev, 0x610704 + (id * 0x10), 0x80000ffc, 0x80000000 | mthd);
-	if (!nv_wait(dev, 0x610704 + (id * 0x10), 0x80000000, 0x00000000))
+	nv_mask(device, 0x610700 + (id * 0x10), 0x00000001, 0x00000001);
+	nv_wr32(device, 0x610704 + (id * 0x10), data);
+	nv_mask(device, 0x610704 + (id * 0x10), 0x80000ffc, 0x80000000 | mthd);
+	if (!nv_wait(device, 0x610704 + (id * 0x10), 0x80000000, 0x00000000))
 		ret = -EBUSY;
-	nv_mask(dev, 0x610700 + (id * 0x10), 0x00000001, 0x00000000);
+	nv_mask(device, 0x610700 + (id * 0x10), 0x00000001, 0x00000000);
 	return ret;
 }
 
 static u32 *
 evo_wait(struct drm_device *dev, int id, int nr)
 {
+	struct nouveau_device *device = nouveau_dev(dev);
+	struct nouveau_drm *drm = nouveau_drm(dev);
 	struct nvd0_display *disp = nvd0_display(dev);
-	u32 put = nv_rd32(dev, 0x640000 + (id * 0x1000)) / 4;
+	u32 put = nv_rd32(device, 0x640000 + (id * 0x1000)) / 4;
 
 	if (put + nr >= (PAGE_SIZE / 4)) {
 		disp->evo[id].ptr[put] = 0x20000000;
 
-		nv_wr32(dev, 0x640000 + (id * 0x1000), 0x00000000);
-		if (!nv_wait(dev, 0x640004 + (id * 0x1000), ~0, 0x00000000)) {
-			NV_ERROR(dev, "evo %d dma stalled\n", id);
+		nv_wr32(device, 0x640000 + (id * 0x1000), 0x00000000);
+		if (!nv_wait(device, 0x640004 + (id * 0x1000), ~0, 0x00000000)) {
+			NV_ERROR(drm, "evo %d dma stalled\n", id);
 			return NULL;
 		}
 
 		put = 0;
 	}
 
-	if (nouveau_reg_debug & NOUVEAU_REG_DEBUG_EVO)
-		NV_INFO(dev, "Evo%d: %p START\n", id, disp->evo[id].ptr + put);
-
 	return disp->evo[id].ptr + put;
 }
 
 static void
 evo_kick(u32 *push, struct drm_device *dev, int id)
 {
+	struct nouveau_device *device = nouveau_dev(dev);
 	struct nvd0_display *disp = nvd0_display(dev);
 
-	if (nouveau_reg_debug & NOUVEAU_REG_DEBUG_EVO) {
-		u32 curp = nv_rd32(dev, 0x640000 + (id * 0x1000)) >> 2;
-		u32 *cur = disp->evo[id].ptr + curp;
-
-		while (cur < push)
-			NV_INFO(dev, "Evo%d: 0x%08x\n", id, *cur++);
-		NV_INFO(dev, "Evo%d: %p KICK!\n", id, push);
-	}
-
-	nv_wr32(dev, 0x640000 + (id * 0x1000), (push - disp->evo[id].ptr) << 2);
+	nv_wr32(device, 0x640000 + (id * 0x1000), (push - disp->evo[id].ptr) << 2);
 }
 
 #define evo_mthd(p,m,s) *((p)++) = (((s) << 18) | (m))
@@ -145,6 +142,8 @@ evo_kick(u32 *push, struct drm_device *dev, int id)
 static int
 evo_init_dma(struct drm_device *dev, int ch)
 {
+	struct nouveau_device *device = nouveau_dev(dev);
+	struct nouveau_drm *drm = nouveau_drm(dev);
 	struct nvd0_display *disp = nvd0_display(dev);
 	u32 flags;
 
@@ -152,68 +151,76 @@ evo_init_dma(struct drm_device *dev, int ch)
 	if (ch == EVO_MASTER)
 		flags |= 0x01000000;
 
-	nv_wr32(dev, 0x610494 + (ch * 0x0010), (disp->evo[ch].handle >> 8) | 3);
-	nv_wr32(dev, 0x610498 + (ch * 0x0010), 0x00010000);
-	nv_wr32(dev, 0x61049c + (ch * 0x0010), 0x00000001);
-	nv_mask(dev, 0x610490 + (ch * 0x0010), 0x00000010, 0x00000010);
-	nv_wr32(dev, 0x640000 + (ch * 0x1000), 0x00000000);
-	nv_wr32(dev, 0x610490 + (ch * 0x0010), 0x00000013 | flags);
-	if (!nv_wait(dev, 0x610490 + (ch * 0x0010), 0x80000000, 0x00000000)) {
-		NV_ERROR(dev, "PDISP: ch%d 0x%08x\n", ch,
-			      nv_rd32(dev, 0x610490 + (ch * 0x0010)));
+	nv_wr32(device, 0x610494 + (ch * 0x0010), (disp->evo[ch].handle >> 8) | 3);
+	nv_wr32(device, 0x610498 + (ch * 0x0010), 0x00010000);
+	nv_wr32(device, 0x61049c + (ch * 0x0010), 0x00000001);
+	nv_mask(device, 0x610490 + (ch * 0x0010), 0x00000010, 0x00000010);
+	nv_wr32(device, 0x640000 + (ch * 0x1000), 0x00000000);
+	nv_wr32(device, 0x610490 + (ch * 0x0010), 0x00000013 | flags);
+	if (!nv_wait(device, 0x610490 + (ch * 0x0010), 0x80000000, 0x00000000)) {
+		NV_ERROR(drm, "PDISP: ch%d 0x%08x\n", ch,
+			      nv_rd32(device, 0x610490 + (ch * 0x0010)));
 		return -EBUSY;
 	}
 
-	nv_mask(dev, 0x610090, (1 << ch), (1 << ch));
-	nv_mask(dev, 0x6100a0, (1 << ch), (1 << ch));
+	nv_mask(device, 0x610090, (1 << ch), (1 << ch));
+	nv_mask(device, 0x6100a0, (1 << ch), (1 << ch));
 	return 0;
 }
 
 static void
 evo_fini_dma(struct drm_device *dev, int ch)
 {
-	if (!(nv_rd32(dev, 0x610490 + (ch * 0x0010)) & 0x00000010))
+	struct nouveau_device *device = nouveau_dev(dev);
+
+	if (!(nv_rd32(device, 0x610490 + (ch * 0x0010)) & 0x00000010))
 		return;
 
-	nv_mask(dev, 0x610490 + (ch * 0x0010), 0x00000010, 0x00000000);
-	nv_mask(dev, 0x610490 + (ch * 0x0010), 0x00000003, 0x00000000);
-	nv_wait(dev, 0x610490 + (ch * 0x0010), 0x80000000, 0x00000000);
-	nv_mask(dev, 0x610090, (1 << ch), 0x00000000);
-	nv_mask(dev, 0x6100a0, (1 << ch), 0x00000000);
+	nv_mask(device, 0x610490 + (ch * 0x0010), 0x00000010, 0x00000000);
+	nv_mask(device, 0x610490 + (ch * 0x0010), 0x00000003, 0x00000000);
+	nv_wait(device, 0x610490 + (ch * 0x0010), 0x80000000, 0x00000000);
+	nv_mask(device, 0x610090, (1 << ch), 0x00000000);
+	nv_mask(device, 0x6100a0, (1 << ch), 0x00000000);
 }
 
 static inline void
 evo_piow(struct drm_device *dev, int ch, u16 mthd, u32 data)
 {
-	nv_wr32(dev, 0x640000 + (ch * 0x1000) + mthd, data);
+	struct nouveau_device *device = nouveau_dev(dev);
+	nv_wr32(device, 0x640000 + (ch * 0x1000) + mthd, data);
 }
 
 static int
 evo_init_pio(struct drm_device *dev, int ch)
 {
-	nv_wr32(dev, 0x610490 + (ch * 0x0010), 0x00000001);
-	if (!nv_wait(dev, 0x610490 + (ch * 0x0010), 0x00010000, 0x00010000)) {
-		NV_ERROR(dev, "PDISP: ch%d 0x%08x\n", ch,
-			      nv_rd32(dev, 0x610490 + (ch * 0x0010)));
+	struct nouveau_device *device = nouveau_dev(dev);
+	struct nouveau_drm *drm = nouveau_drm(dev);
+
+	nv_wr32(device, 0x610490 + (ch * 0x0010), 0x00000001);
+	if (!nv_wait(device, 0x610490 + (ch * 0x0010), 0x00010000, 0x00010000)) {
+		NV_ERROR(drm, "PDISP: ch%d 0x%08x\n", ch,
+			      nv_rd32(device, 0x610490 + (ch * 0x0010)));
 		return -EBUSY;
 	}
 
-	nv_mask(dev, 0x610090, (1 << ch), (1 << ch));
-	nv_mask(dev, 0x6100a0, (1 << ch), (1 << ch));
+	nv_mask(device, 0x610090, (1 << ch), (1 << ch));
+	nv_mask(device, 0x6100a0, (1 << ch), (1 << ch));
 	return 0;
 }
 
 static void
 evo_fini_pio(struct drm_device *dev, int ch)
 {
-	if (!(nv_rd32(dev, 0x610490 + (ch * 0x0010)) & 0x00000001))
+	struct nouveau_device *device = nouveau_dev(dev);
+
+	if (!(nv_rd32(device, 0x610490 + (ch * 0x0010)) & 0x00000001))
 		return;
 
-	nv_mask(dev, 0x610490 + (ch * 0x0010), 0x00000010, 0x00000010);
-	nv_mask(dev, 0x610490 + (ch * 0x0010), 0x00000001, 0x00000000);
-	nv_wait(dev, 0x610490 + (ch * 0x0010), 0x00010000, 0x00000000);
-	nv_mask(dev, 0x610090, (1 << ch), 0x00000000);
-	nv_mask(dev, 0x6100a0, (1 << ch), 0x00000000);
+	nv_mask(device, 0x610490 + (ch * 0x0010), 0x00000010, 0x00000010);
+	nv_mask(device, 0x610490 + (ch * 0x0010), 0x00000001, 0x00000000);
+	nv_wait(device, 0x610490 + (ch * 0x0010), 0x00010000, 0x00000000);
+	nv_mask(device, 0x610090, (1 << ch), 0x00000000);
+	nv_mask(device, 0x6100a0, (1 << ch), 0x00000000);
 }
 
 static bool
@@ -225,6 +232,7 @@ evo_sync_wait(void *data)
 static int
 evo_sync(struct drm_device *dev, int ch)
 {
+	struct nouveau_device *device = nouveau_dev(dev);
 	struct nvd0_display *disp = nvd0_display(dev);
 	u32 *push = evo_wait(dev, ch, 8);
 	if (push) {
@@ -235,7 +243,7 @@ evo_sync(struct drm_device *dev, int ch)
 		evo_data(push, 0x00000000);
 		evo_data(push, 0x00000000);
 		evo_kick(push, dev, ch);
-		if (nv_wait_cb(dev, evo_sync_wait, disp->sync))
+		if (nv_wait_cb(device, evo_sync_wait, disp->sync))
 			return 0;
 	}
 
@@ -300,7 +308,7 @@ nvd0_display_flip_next(struct drm_crtc *crtc, struct drm_framebuffer *fb,
 			return ret;
 
 
-		offset  = nvc0_software_crtc(chan, nv_crtc->index);
+		offset  = nvc0_fence_crtc(chan, nv_crtc->index);
 		offset += evo->sem.offset;
 
 		BEGIN_NVC0(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4);
@@ -363,7 +371,7 @@ nvd0_display_flip_next(struct drm_crtc *crtc, struct drm_framebuffer *fb,
 static int
 nvd0_crtc_set_dither(struct nouveau_crtc *nv_crtc, bool update)
 {
-	struct drm_nouveau_private *dev_priv = nv_crtc->base.dev->dev_private;
+	struct nouveau_drm *drm = nouveau_drm(nv_crtc->base.dev);
 	struct drm_device *dev = nv_crtc->base.dev;
 	struct nouveau_connector *nv_connector;
 	struct drm_connector *connector;
@@ -386,7 +394,7 @@ nvd0_crtc_set_dither(struct nouveau_crtc *nv_crtc, bool update)
 		mode |= nv_connector->dithering_depth;
 	}
 
-	if (dev_priv->card_type < NV_E0)
+	if (nv_device(drm->device)->card_type < NV_E0)
 		mthd = 0x0490 + (nv_crtc->index * 0x0300);
 	else
 		mthd = 0x04a0 + (nv_crtc->index * 0x0300);
@@ -701,11 +709,12 @@ static int
 nvd0_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
 			struct drm_framebuffer *old_fb)
 {
+	struct nouveau_drm *drm = nouveau_drm(crtc->dev);
 	struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
 	int ret;
 
 	if (!crtc->fb) {
-		NV_DEBUG_KMS(crtc->dev, "No FB bound\n");
+		NV_DEBUG(drm, "No FB bound\n");
 		return 0;
 	}
 
@@ -923,6 +932,7 @@ nvd0_dac_dpms(struct drm_encoder *encoder, int mode)
 {
 	struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
 	struct drm_device *dev = encoder->dev;
+	struct nouveau_device *device = nouveau_dev(dev);
 	int or = nv_encoder->or;
 	u32 dpms_ctrl;
 
@@ -932,9 +942,9 @@ nvd0_dac_dpms(struct drm_encoder *encoder, int mode)
 	if (mode == DRM_MODE_DPMS_SUSPEND || mode == DRM_MODE_DPMS_OFF)
 		dpms_ctrl |= 0x00000004;
 
-	nv_wait(dev, 0x61a004 + (or * 0x0800), 0x80000000, 0x00000000);
-	nv_mask(dev, 0x61a004 + (or * 0x0800), 0xc000007f, dpms_ctrl);
-	nv_wait(dev, 0x61a004 + (or * 0x0800), 0x80000000, 0x00000000);
+	nv_wait(device, 0x61a004 + (or * 0x0800), 0x80000000, 0x00000000);
+	nv_mask(device, 0x61a004 + (or * 0x0800), 0xc000007f, dpms_ctrl);
+	nv_wait(device, 0x61a004 + (or * 0x0800), 0x80000000, 0x00000000);
 }
 
 static bool
@@ -1025,18 +1035,19 @@ nvd0_dac_detect(struct drm_encoder *encoder, struct drm_connector *connector)
 	enum drm_connector_status status = connector_status_disconnected;
 	struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
 	struct drm_device *dev = encoder->dev;
+	struct nouveau_device *device = nouveau_dev(dev);
 	int or = nv_encoder->or;
 	u32 load;
 
-	nv_wr32(dev, 0x61a00c + (or * 0x800), 0x00100000);
+	nv_wr32(device, 0x61a00c + (or * 0x800), 0x00100000);
 	udelay(9500);
-	nv_wr32(dev, 0x61a00c + (or * 0x800), 0x80000000);
+	nv_wr32(device, 0x61a00c + (or * 0x800), 0x80000000);
 
-	load = nv_rd32(dev, 0x61a00c + (or * 0x800));
+	load = nv_rd32(device, 0x61a00c + (or * 0x800));
 	if ((load & 0x38000000) == 0x38000000)
 		status = connector_status_connected;
 
-	nv_wr32(dev, 0x61a00c + (or * 0x800), 0x00000000);
+	nv_wr32(device, 0x61a00c + (or * 0x800), 0x00000000);
 	return status;
 }
 
@@ -1063,7 +1074,7 @@ static const struct drm_encoder_funcs nvd0_dac_func = {
 };
 
 static int
-nvd0_dac_create(struct drm_connector *connector, struct dcb_entry *dcbe)
+nvd0_dac_create(struct drm_connector *connector, struct dcb_output *dcbe)
 {
 	struct drm_device *dev = connector->dev;
 	struct nouveau_encoder *nv_encoder;
@@ -1094,24 +1105,25 @@ nvd0_audio_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode)
 	struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
 	struct nouveau_connector *nv_connector;
 	struct drm_device *dev = encoder->dev;
+	struct nouveau_device *device = nouveau_dev(dev);
 	int i, or = nv_encoder->or * 0x30;
 
 	nv_connector = nouveau_encoder_connector_get(nv_encoder);
 	if (!drm_detect_monitor_audio(nv_connector->edid))
 		return;
 
-	nv_mask(dev, 0x10ec10 + or, 0x80000003, 0x80000001);
+	nv_mask(device, 0x10ec10 + or, 0x80000003, 0x80000001);
 
 	drm_edid_to_eld(&nv_connector->base, nv_connector->edid);
 	if (nv_connector->base.eld[0]) {
 		u8 *eld = nv_connector->base.eld;
 
 		for (i = 0; i < eld[2] * 4; i++)
-			nv_wr32(dev, 0x10ec00 + or, (i << 8) | eld[i]);
+			nv_wr32(device, 0x10ec00 + or, (i << 8) | eld[i]);
 		for (i = eld[2] * 4; i < 0x60; i++)
-			nv_wr32(dev, 0x10ec00 + or, (i << 8) | 0x00);
+			nv_wr32(device, 0x10ec00 + or, (i << 8) | 0x00);
 
-		nv_mask(dev, 0x10ec10 + or, 0x80000002, 0x80000002);
+		nv_mask(device, 0x10ec10 + or, 0x80000002, 0x80000002);
 	}
 }
 
@@ -1120,9 +1132,10 @@ nvd0_audio_disconnect(struct drm_encoder *encoder)
 {
 	struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
 	struct drm_device *dev = encoder->dev;
+	struct nouveau_device *device = nouveau_dev(dev);
 	int or = nv_encoder->or * 0x30;
 
-	nv_mask(dev, 0x10ec10 + or, 0x80000003, 0x80000000);
+	nv_mask(device, 0x10ec10 + or, 0x80000003, 0x80000000);
 }
 
 /******************************************************************************
@@ -1135,6 +1148,7 @@ nvd0_hdmi_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode)
 	struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc);
 	struct nouveau_connector *nv_connector;
 	struct drm_device *dev = encoder->dev;
+	struct nouveau_device *device = nouveau_dev(dev);
 	int head = nv_crtc->index * 0x800;
 	u32 rekey = 56; /* binary driver, and tegra constant */
 	u32 max_ac_packet;
@@ -1149,25 +1163,25 @@ nvd0_hdmi_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode)
 	max_ac_packet /= 32;
 
 	/* AVI InfoFrame */
-	nv_mask(dev, 0x616714 + head, 0x00000001, 0x00000000);
-	nv_wr32(dev, 0x61671c + head, 0x000d0282);
-	nv_wr32(dev, 0x616720 + head, 0x0000006f);
-	nv_wr32(dev, 0x616724 + head, 0x00000000);
-	nv_wr32(dev, 0x616728 + head, 0x00000000);
-	nv_wr32(dev, 0x61672c + head, 0x00000000);
-	nv_mask(dev, 0x616714 + head, 0x00000001, 0x00000001);
+	nv_mask(device, 0x616714 + head, 0x00000001, 0x00000000);
+	nv_wr32(device, 0x61671c + head, 0x000d0282);
+	nv_wr32(device, 0x616720 + head, 0x0000006f);
+	nv_wr32(device, 0x616724 + head, 0x00000000);
+	nv_wr32(device, 0x616728 + head, 0x00000000);
+	nv_wr32(device, 0x61672c + head, 0x00000000);
+	nv_mask(device, 0x616714 + head, 0x00000001, 0x00000001);
 
 	/* ??? InfoFrame? */
-	nv_mask(dev, 0x6167a4 + head, 0x00000001, 0x00000000);
-	nv_wr32(dev, 0x6167ac + head, 0x00000010);
-	nv_mask(dev, 0x6167a4 + head, 0x00000001, 0x00000001);
+	nv_mask(device, 0x6167a4 + head, 0x00000001, 0x00000000);
+	nv_wr32(device, 0x6167ac + head, 0x00000010);
+	nv_mask(device, 0x6167a4 + head, 0x00000001, 0x00000001);
 
 	/* HDMI_CTRL */
-	nv_mask(dev, 0x616798 + head, 0x401f007f, 0x40000000 | rekey |
+	nv_mask(device, 0x616798 + head, 0x401f007f, 0x40000000 | rekey |
 						  max_ac_packet << 16);
 
 	/* NFI, audio doesn't work without it though.. */
-	nv_mask(dev, 0x616548 + head, 0x00000070, 0x00000000);
+	nv_mask(device, 0x616548 + head, 0x00000070, 0x00000000);
 
 	nvd0_audio_mode_set(encoder, mode);
 }
@@ -1178,37 +1192,41 @@ nvd0_hdmi_disconnect(struct drm_encoder *encoder)
 	struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
 	struct nouveau_crtc *nv_crtc = nouveau_crtc(nv_encoder->crtc);
 	struct drm_device *dev = encoder->dev;
+	struct nouveau_device *device = nouveau_dev(dev);
 	int head = nv_crtc->index * 0x800;
 
 	nvd0_audio_disconnect(encoder);
 
-	nv_mask(dev, 0x616798 + head, 0x40000000, 0x00000000);
-	nv_mask(dev, 0x6167a4 + head, 0x00000001, 0x00000000);
-	nv_mask(dev, 0x616714 + head, 0x00000001, 0x00000000);
+	nv_mask(device, 0x616798 + head, 0x40000000, 0x00000000);
+	nv_mask(device, 0x6167a4 + head, 0x00000001, 0x00000000);
+	nv_mask(device, 0x616714 + head, 0x00000001, 0x00000000);
 }
 
 /******************************************************************************
  * SOR
  *****************************************************************************/
 static inline u32
-nvd0_sor_dp_lane_map(struct drm_device *dev, struct dcb_entry *dcb, u8 lane)
+nvd0_sor_dp_lane_map(struct drm_device *dev, struct dcb_output *dcb, u8 lane)
 {
 	static const u8 nvd0[] = { 16, 8, 0, 24 };
 	return nvd0[lane];
 }
 
 static void
-nvd0_sor_dp_train_set(struct drm_device *dev, struct dcb_entry *dcb, u8 pattern)
+nvd0_sor_dp_train_set(struct drm_device *dev, struct dcb_output *dcb, u8 pattern)
 {
+	struct nouveau_device *device = nouveau_dev(dev);
 	const u32 or = ffs(dcb->or) - 1, link = !(dcb->sorconf.link & 1);
 	const u32 loff = (or * 0x800) + (link * 0x80);
-	nv_mask(dev, 0x61c110 + loff, 0x0f0f0f0f, 0x01010101 * pattern);
+	nv_mask(device, 0x61c110 + loff, 0x0f0f0f0f, 0x01010101 * pattern);
 }
 
 static void
-nvd0_sor_dp_train_adj(struct drm_device *dev, struct dcb_entry *dcb,
+nvd0_sor_dp_train_adj(struct drm_device *dev, struct dcb_output *dcb,
 		      u8 lane, u8 swing, u8 preem)
 {
+	struct nouveau_device *device = nouveau_dev(dev);
+	struct nouveau_drm *drm = nouveau_drm(dev);
 	const u32 or = ffs(dcb->or) - 1, link = !(dcb->sorconf.link & 1);
 	const u32 loff = (or * 0x800) + (link * 0x80);
 	u32 shift = nvd0_sor_dp_lane_map(dev, dcb, lane);
@@ -1236,25 +1254,26 @@ nvd0_sor_dp_train_adj(struct drm_device *dev, struct dcb_entry *dcb,
 	}
 
 	if (!config) {
-		NV_ERROR(dev, "PDISP: unsupported DP table for chipset\n");
+		NV_ERROR(drm, "PDISP: unsupported DP table for chipset\n");
 		return;
 	}
 
-	nv_mask(dev, 0x61c118 + loff, mask, config[1] << shift);
-	nv_mask(dev, 0x61c120 + loff, mask, config[2] << shift);
-	nv_mask(dev, 0x61c130 + loff, 0x0000ff00, config[3] << 8);
-	nv_mask(dev, 0x61c13c + loff, 0x00000000, 0x00000000);
+	nv_mask(device, 0x61c118 + loff, mask, config[1] << shift);
+	nv_mask(device, 0x61c120 + loff, mask, config[2] << shift);
+	nv_mask(device, 0x61c130 + loff, 0x0000ff00, config[3] << 8);
+	nv_mask(device, 0x61c13c + loff, 0x00000000, 0x00000000);
 }
 
 static void
-nvd0_sor_dp_link_set(struct drm_device *dev, struct dcb_entry *dcb, int crtc,
+nvd0_sor_dp_link_set(struct drm_device *dev, struct dcb_output *dcb, int crtc,
 		     int link_nr, u32 link_bw, bool enhframe)
 {
+	struct nouveau_device *device = nouveau_dev(dev);
 	const u32 or = ffs(dcb->or) - 1, link = !(dcb->sorconf.link & 1);
 	const u32 loff = (or * 0x800) + (link * 0x80);
 	const u32 soff = (or * 0x800);
-	u32 dpctrl = nv_rd32(dev, 0x61c10c + loff) & ~0x001f4000;
-	u32 clksor = nv_rd32(dev, 0x612300 + soff) & ~0x007c0000;
+	u32 dpctrl = nv_rd32(device, 0x61c10c + loff) & ~0x001f4000;
+	u32 clksor = nv_rd32(device, 0x612300 + soff) & ~0x007c0000;
 	u32 script = 0x0000, lane_mask = 0;
 	u8 *table, *entry;
 	int i;
@@ -1284,20 +1303,21 @@ nvd0_sor_dp_link_set(struct drm_device *dev, struct dcb_entry *dcb, int crtc,
 	for (i = 0; i < link_nr; i++)
 		lane_mask |= 1 << (nvd0_sor_dp_lane_map(dev, dcb, i) >> 3);
 
-	nv_wr32(dev, 0x612300 + soff, clksor);
-	nv_wr32(dev, 0x61c10c + loff, dpctrl);
-	nv_mask(dev, 0x61c130 + loff, 0x0000000f, lane_mask);
+	nv_wr32(device, 0x612300 + soff, clksor);
+	nv_wr32(device, 0x61c10c + loff, dpctrl);
+	nv_mask(device, 0x61c130 + loff, 0x0000000f, lane_mask);
 }
 
 static void
-nvd0_sor_dp_link_get(struct drm_device *dev, struct dcb_entry *dcb,
+nvd0_sor_dp_link_get(struct drm_device *dev, struct dcb_output *dcb,
 		     u32 *link_nr, u32 *link_bw)
 {
+	struct nouveau_device *device = nouveau_dev(dev);
 	const u32 or = ffs(dcb->or) - 1, link = !(dcb->sorconf.link & 1);
 	const u32 loff = (or * 0x800) + (link * 0x80);
 	const u32 soff = (or * 0x800);
-	u32 dpctrl = nv_rd32(dev, 0x61c10c + loff) & 0x000f0000;
-	u32 clksor = nv_rd32(dev, 0x612300 + soff);
+	u32 dpctrl = nv_rd32(device, 0x61c10c + loff) & 0x000f0000;
+	u32 clksor = nv_rd32(device, 0x612300 + soff);
 
 	if      (dpctrl > 0x00030000) *link_nr = 4;
 	else if (dpctrl > 0x00010000) *link_nr = 2;
@@ -1308,9 +1328,10 @@ nvd0_sor_dp_link_get(struct drm_device *dev, struct dcb_entry *dcb,
 }
 
 static void
-nvd0_sor_dp_calc_tu(struct drm_device *dev, struct dcb_entry *dcb,
+nvd0_sor_dp_calc_tu(struct drm_device *dev, struct dcb_output *dcb,
 		    u32 crtc, u32 datarate)
 {
+	struct nouveau_device *device = nouveau_dev(dev);
 	const u32 symbol = 100000;
 	const u32 TU = 64;
 	u32 link_nr, link_bw;
@@ -1330,7 +1351,7 @@ nvd0_sor_dp_calc_tu(struct drm_device *dev, struct dcb_entry *dcb,
 	value += 5;
 	value |= 0x08000000;
 
-	nv_wr32(dev, 0x616610 + (crtc * 0x800), value);
+	nv_wr32(device, 0x616610 + (crtc * 0x800), value);
 }
 
 static void
@@ -1338,6 +1359,7 @@ nvd0_sor_dpms(struct drm_encoder *encoder, int mode)
 {
 	struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
 	struct drm_device *dev = encoder->dev;
+	struct nouveau_device *device = nouveau_dev(dev);
 	struct drm_encoder *partner;
 	int or = nv_encoder->or;
 	u32 dpms_ctrl;
@@ -1361,12 +1383,12 @@ nvd0_sor_dpms(struct drm_encoder *encoder, int mode)
 	dpms_ctrl  = (mode == DRM_MODE_DPMS_ON);
 	dpms_ctrl |= 0x80000000;
 
-	nv_wait(dev, 0x61c004 + (or * 0x0800), 0x80000000, 0x00000000);
-	nv_mask(dev, 0x61c004 + (or * 0x0800), 0x80000001, dpms_ctrl);
-	nv_wait(dev, 0x61c004 + (or * 0x0800), 0x80000000, 0x00000000);
-	nv_wait(dev, 0x61c030 + (or * 0x0800), 0x10000000, 0x00000000);
+	nv_wait(device, 0x61c004 + (or * 0x0800), 0x80000000, 0x00000000);
+	nv_mask(device, 0x61c004 + (or * 0x0800), 0x80000001, dpms_ctrl);
+	nv_wait(device, 0x61c004 + (or * 0x0800), 0x80000000, 0x00000000);
+	nv_wait(device, 0x61c030 + (or * 0x0800), 0x10000000, 0x00000000);
 
-	if (nv_encoder->dcb->type == OUTPUT_DP) {
+	if (nv_encoder->dcb->type == DCB_OUTPUT_DP) {
 		struct dp_train_func func = {
 			.link_set = nvd0_sor_dp_link_set,
 			.train_set = nvd0_sor_dp_train_set,
@@ -1427,7 +1449,7 @@ static void
 nvd0_sor_prepare(struct drm_encoder *encoder)
 {
 	nvd0_sor_disconnect(encoder);
-	if (nouveau_encoder(encoder)->dcb->type == OUTPUT_DP)
+	if (nouveau_encoder(encoder)->dcb->type == DCB_OUTPUT_DP)
 		evo_sync(encoder->dev, EVO_MASTER);
 }
 
@@ -1441,11 +1463,11 @@ nvd0_sor_mode_set(struct drm_encoder *encoder, struct drm_display_mode *umode,
 		  struct drm_display_mode *mode)
 {
 	struct drm_device *dev = encoder->dev;
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_drm *drm = nouveau_drm(dev);
 	struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
 	struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc);
 	struct nouveau_connector *nv_connector;
-	struct nvbios *bios = &dev_priv->vbios;
+	struct nvbios *bios = &drm->vbios;
 	u32 mode_ctrl = (1 << nv_crtc->index);
 	u32 syncs, magic, *push;
 	u32 or_config;
@@ -1462,7 +1484,7 @@ nvd0_sor_mode_set(struct drm_encoder *encoder, struct drm_display_mode *umode,
 
 	nv_connector = nouveau_encoder_connector_get(nv_encoder);
 	switch (nv_encoder->dcb->type) {
-	case OUTPUT_TMDS:
+	case DCB_OUTPUT_TMDS:
 		if (nv_encoder->dcb->sorconf.link & 1) {
 			if (mode->clock < 165000)
 				mode_ctrl |= 0x00000100;
@@ -1478,7 +1500,7 @@ nvd0_sor_mode_set(struct drm_encoder *encoder, struct drm_display_mode *umode,
 
 		nvd0_hdmi_mode_set(encoder, mode);
 		break;
-	case OUTPUT_LVDS:
+	case DCB_OUTPUT_LVDS:
 		or_config = (mode_ctrl & 0x00000f00) >> 8;
 		if (bios->fp_no_ddc) {
 			if (bios->fp.dual_link)
@@ -1507,7 +1529,7 @@ nvd0_sor_mode_set(struct drm_encoder *encoder, struct drm_display_mode *umode,
 
 		}
 		break;
-	case OUTPUT_DP:
+	case DCB_OUTPUT_DP:
 		if (nv_connector->base.display_info.bpc == 6) {
 			nv_encoder->dp.datarate = mode->clock * 18 / 8;
 			syncs |= 0x00000002 << 6;
@@ -1530,7 +1552,7 @@ nvd0_sor_mode_set(struct drm_encoder *encoder, struct drm_display_mode *umode,
 
 	nvd0_sor_dpms(encoder, DRM_MODE_DPMS_ON);
 
-	if (nv_encoder->dcb->type == OUTPUT_DP) {
+	if (nv_encoder->dcb->type == DCB_OUTPUT_DP) {
 		nvd0_sor_dp_calc_tu(dev, nv_encoder->dcb, nv_crtc->index,
 					 nv_encoder->dp.datarate);
 	}
@@ -1571,7 +1593,7 @@ static const struct drm_encoder_funcs nvd0_sor_func = {
 };
 
 static int
-nvd0_sor_create(struct drm_connector *connector, struct dcb_entry *dcbe)
+nvd0_sor_create(struct drm_connector *connector, struct dcb_output *dcbe)
 {
 	struct drm_device *dev = connector->dev;
 	struct nouveau_encoder *nv_encoder;
@@ -1597,50 +1619,51 @@ nvd0_sor_create(struct drm_connector *connector, struct dcb_entry *dcbe)
 /******************************************************************************
  * IRQ
  *****************************************************************************/
-static struct dcb_entry *
+static struct dcb_output *
 lookup_dcb(struct drm_device *dev, int id, u32 mc)
 {
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_drm *drm = nouveau_drm(dev);
 	int type, or, i, link = -1;
 
 	if (id < 4) {
-		type = OUTPUT_ANALOG;
+		type = DCB_OUTPUT_ANALOG;
 		or   = id;
 	} else {
 		switch (mc & 0x00000f00) {
-		case 0x00000000: link = 0; type = OUTPUT_LVDS; break;
-		case 0x00000100: link = 0; type = OUTPUT_TMDS; break;
-		case 0x00000200: link = 1; type = OUTPUT_TMDS; break;
-		case 0x00000500: link = 0; type = OUTPUT_TMDS; break;
-		case 0x00000800: link = 0; type = OUTPUT_DP; break;
-		case 0x00000900: link = 1; type = OUTPUT_DP; break;
+		case 0x00000000: link = 0; type = DCB_OUTPUT_LVDS; break;
+		case 0x00000100: link = 0; type = DCB_OUTPUT_TMDS; break;
+		case 0x00000200: link = 1; type = DCB_OUTPUT_TMDS; break;
+		case 0x00000500: link = 0; type = DCB_OUTPUT_TMDS; break;
+		case 0x00000800: link = 0; type = DCB_OUTPUT_DP; break;
+		case 0x00000900: link = 1; type = DCB_OUTPUT_DP; break;
 		default:
-			NV_ERROR(dev, "PDISP: unknown SOR mc 0x%08x\n", mc);
+			NV_ERROR(drm, "PDISP: unknown SOR mc 0x%08x\n", mc);
 			return NULL;
 		}
 
 		or = id - 4;
 	}
 
-	for (i = 0; i < dev_priv->vbios.dcb.entries; i++) {
-		struct dcb_entry *dcb = &dev_priv->vbios.dcb.entry[i];
+	for (i = 0; i < drm->vbios.dcb.entries; i++) {
+		struct dcb_output *dcb = &drm->vbios.dcb.entry[i];
 		if (dcb->type == type && (dcb->or & (1 << or)) &&
 		    (link < 0 || link == !(dcb->sorconf.link & 1)))
 			return dcb;
 	}
 
-	NV_ERROR(dev, "PDISP: DCB for %d/0x%08x not found\n", id, mc);
+	NV_ERROR(drm, "PDISP: DCB for %d/0x%08x not found\n", id, mc);
 	return NULL;
 }
 
 static void
 nvd0_display_unk1_handler(struct drm_device *dev, u32 crtc, u32 mask)
 {
-	struct dcb_entry *dcb;
+	struct nouveau_device *device = nouveau_dev(dev);
+	struct dcb_output *dcb;
 	int i;
 
 	for (i = 0; mask && i < 8; i++) {
-		u32 mcc = nv_rd32(dev, 0x640180 + (i * 0x20));
+		u32 mcc = nv_rd32(device, 0x640180 + (i * 0x20));
 		if (!(mcc & (1 << crtc)))
 			continue;
 
@@ -1651,20 +1674,22 @@ nvd0_display_unk1_handler(struct drm_device *dev, u32 crtc, u32 mask)
 		nouveau_bios_run_display_table(dev, 0x0000, -1, dcb, crtc);
 	}
 
-	nv_wr32(dev, 0x6101d4, 0x00000000);
-	nv_wr32(dev, 0x6109d4, 0x00000000);
-	nv_wr32(dev, 0x6101d0, 0x80000000);
+	nv_wr32(device, 0x6101d4, 0x00000000);
+	nv_wr32(device, 0x6109d4, 0x00000000);
+	nv_wr32(device, 0x6101d0, 0x80000000);
 }
 
 static void
 nvd0_display_unk2_handler(struct drm_device *dev, u32 crtc, u32 mask)
 {
-	struct dcb_entry *dcb;
+	struct nouveau_device *device = nouveau_dev(dev);
+	struct nouveau_drm *drm = nouveau_drm(dev);
+	struct dcb_output *dcb;
 	u32 or, tmp, pclk;
 	int i;
 
 	for (i = 0; mask && i < 8; i++) {
-		u32 mcc = nv_rd32(dev, 0x640180 + (i * 0x20));
+		u32 mcc = nv_rd32(device, 0x640180 + (i * 0x20));
 		if (!(mcc & (1 << crtc)))
 			continue;
 
@@ -1675,16 +1700,16 @@ nvd0_display_unk2_handler(struct drm_device *dev, u32 crtc, u32 mask)
 		nouveau_bios_run_display_table(dev, 0x0000, -2, dcb, crtc);
 	}
 
-	pclk = nv_rd32(dev, 0x660450 + (crtc * 0x300)) / 1000;
-	NV_DEBUG_KMS(dev, "PDISP: crtc %d pclk %d mask 0x%08x\n",
+	pclk = nv_rd32(device, 0x660450 + (crtc * 0x300)) / 1000;
+	NV_DEBUG(drm, "PDISP: crtc %d pclk %d mask 0x%08x\n",
 			  crtc, pclk, mask);
 	if (pclk && (mask & 0x00010000)) {
 		nv50_crtc_set_clock(dev, crtc, pclk);
 	}
 
 	for (i = 0; mask && i < 8; i++) {
-		u32 mcp = nv_rd32(dev, 0x660180 + (i * 0x20));
-		u32 cfg = nv_rd32(dev, 0x660184 + (i * 0x20));
+		u32 mcp = nv_rd32(device, 0x660180 + (i * 0x20));
+		u32 cfg = nv_rd32(device, 0x660184 + (i * 0x20));
 		if (!(mcp & (1 << crtc)))
 			continue;
 
@@ -1695,20 +1720,20 @@ nvd0_display_unk2_handler(struct drm_device *dev, u32 crtc, u32 mask)
 
 		nouveau_bios_run_display_table(dev, cfg, pclk, dcb, crtc);
 
-		nv_wr32(dev, 0x612200 + (crtc * 0x800), 0x00000000);
+		nv_wr32(device, 0x612200 + (crtc * 0x800), 0x00000000);
 		switch (dcb->type) {
-		case OUTPUT_ANALOG:
-			nv_wr32(dev, 0x612280 + (or * 0x800), 0x00000000);
+		case DCB_OUTPUT_ANALOG:
+			nv_wr32(device, 0x612280 + (or * 0x800), 0x00000000);
 			break;
-		case OUTPUT_TMDS:
-		case OUTPUT_LVDS:
-		case OUTPUT_DP:
+		case DCB_OUTPUT_TMDS:
+		case DCB_OUTPUT_LVDS:
+		case DCB_OUTPUT_DP:
 			if (cfg & 0x00000100)
 				tmp = 0x00000101;
 			else
 				tmp = 0x00000000;
 
-			nv_mask(dev, 0x612300 + (or * 0x800), 0x00000707, tmp);
+			nv_mask(device, 0x612300 + (or * 0x800), 0x00000707, tmp);
 			break;
 		default:
 			break;
@@ -1717,22 +1742,23 @@ nvd0_display_unk2_handler(struct drm_device *dev, u32 crtc, u32 mask)
 		break;
 	}
 
-	nv_wr32(dev, 0x6101d4, 0x00000000);
-	nv_wr32(dev, 0x6109d4, 0x00000000);
-	nv_wr32(dev, 0x6101d0, 0x80000000);
+	nv_wr32(device, 0x6101d4, 0x00000000);
+	nv_wr32(device, 0x6109d4, 0x00000000);
+	nv_wr32(device, 0x6101d0, 0x80000000);
 }
 
 static void
 nvd0_display_unk4_handler(struct drm_device *dev, u32 crtc, u32 mask)
 {
-	struct dcb_entry *dcb;
+	struct nouveau_device *device = nouveau_dev(dev);
+	struct dcb_output *dcb;
 	int pclk, i;
 
-	pclk = nv_rd32(dev, 0x660450 + (crtc * 0x300)) / 1000;
+	pclk = nv_rd32(device, 0x660450 + (crtc * 0x300)) / 1000;
 
 	for (i = 0; mask && i < 8; i++) {
-		u32 mcp = nv_rd32(dev, 0x660180 + (i * 0x20));
-		u32 cfg = nv_rd32(dev, 0x660184 + (i * 0x20));
+		u32 mcp = nv_rd32(device, 0x660180 + (i * 0x20));
+		u32 cfg = nv_rd32(device, 0x660184 + (i * 0x20));
 		if (!(mcp & (1 << crtc)))
 			continue;
 
@@ -1743,34 +1769,36 @@ nvd0_display_unk4_handler(struct drm_device *dev, u32 crtc, u32 mask)
 		nouveau_bios_run_display_table(dev, cfg, -pclk, dcb, crtc);
 	}
 
-	nv_wr32(dev, 0x6101d4, 0x00000000);
-	nv_wr32(dev, 0x6109d4, 0x00000000);
-	nv_wr32(dev, 0x6101d0, 0x80000000);
+	nv_wr32(device, 0x6101d4, 0x00000000);
+	nv_wr32(device, 0x6109d4, 0x00000000);
+	nv_wr32(device, 0x6101d0, 0x80000000);
 }
 
 static void
 nvd0_display_bh(unsigned long data)
 {
 	struct drm_device *dev = (struct drm_device *)data;
+	struct nouveau_device *device = nouveau_dev(dev);
+	struct nouveau_drm *drm = nouveau_drm(dev);
 	struct nvd0_display *disp = nvd0_display(dev);
 	u32 mask = 0, crtc = ~0;
 	int i;
 
 	if (drm_debug & (DRM_UT_DRIVER | DRM_UT_KMS)) {
-		NV_INFO(dev, "PDISP: modeset req %d\n", disp->modeset);
-		NV_INFO(dev, " STAT: 0x%08x 0x%08x 0x%08x\n",
-			 nv_rd32(dev, 0x6101d0),
-			 nv_rd32(dev, 0x6101d4), nv_rd32(dev, 0x6109d4));
+		NV_INFO(drm, "PDISP: modeset req %d\n", disp->modeset);
+		NV_INFO(drm, " STAT: 0x%08x 0x%08x 0x%08x\n",
+			 nv_rd32(device, 0x6101d0),
+			 nv_rd32(device, 0x6101d4), nv_rd32(device, 0x6109d4));
 		for (i = 0; i < 8; i++) {
-			NV_INFO(dev, " %s%d: 0x%08x 0x%08x\n",
+			NV_INFO(drm, " %s%d: 0x%08x 0x%08x\n",
 				i < 4 ? "DAC" : "SOR", i,
-				nv_rd32(dev, 0x640180 + (i * 0x20)),
-				nv_rd32(dev, 0x660180 + (i * 0x20)));
+				nv_rd32(device, 0x640180 + (i * 0x20)),
+				nv_rd32(device, 0x660180 + (i * 0x20)));
 		}
 	}
 
 	while (!mask && ++crtc < dev->mode_config.num_crtc)
-		mask = nv_rd32(dev, 0x6101d4 + (crtc * 0x800));
+		mask = nv_rd32(device, 0x6101d4 + (crtc * 0x800));
 
 	if (disp->modeset & 0x00000001)
 		nvd0_display_unk1_handler(dev, crtc, mask);
@@ -1780,67 +1808,60 @@ nvd0_display_bh(unsigned long data)
 		nvd0_display_unk4_handler(dev, crtc, mask);
 }
 
-static void
+void
 nvd0_display_intr(struct drm_device *dev)
 {
 	struct nvd0_display *disp = nvd0_display(dev);
-	u32 intr = nv_rd32(dev, 0x610088);
-	int i;
+	struct nouveau_device *device = nouveau_dev(dev);
+	struct nouveau_drm *drm = nouveau_drm(dev);
+	u32 intr = nv_rd32(device, 0x610088);
 
 	if (intr & 0x00000001) {
-		u32 stat = nv_rd32(dev, 0x61008c);
-		nv_wr32(dev, 0x61008c, stat);
+		u32 stat = nv_rd32(device, 0x61008c);
+		nv_wr32(device, 0x61008c, stat);
 		intr &= ~0x00000001;
 	}
 
 	if (intr & 0x00000002) {
-		u32 stat = nv_rd32(dev, 0x61009c);
+		u32 stat = nv_rd32(device, 0x61009c);
 		int chid = ffs(stat) - 1;
 		if (chid >= 0) {
-			u32 mthd = nv_rd32(dev, 0x6101f0 + (chid * 12));
-			u32 data = nv_rd32(dev, 0x6101f4 + (chid * 12));
-			u32 unkn = nv_rd32(dev, 0x6101f8 + (chid * 12));
+			u32 mthd = nv_rd32(device, 0x6101f0 + (chid * 12));
+			u32 data = nv_rd32(device, 0x6101f4 + (chid * 12));
+			u32 unkn = nv_rd32(device, 0x6101f8 + (chid * 12));
 
-			NV_INFO(dev, "EvoCh: chid %d mthd 0x%04x data 0x%08x "
+			NV_INFO(drm, "EvoCh: chid %d mthd 0x%04x data 0x%08x "
 				     "0x%08x 0x%08x\n",
 				chid, (mthd & 0x0000ffc), data, mthd, unkn);
-			nv_wr32(dev, 0x61009c, (1 << chid));
-			nv_wr32(dev, 0x6101f0 + (chid * 12), 0x90000000);
+			nv_wr32(device, 0x61009c, (1 << chid));
+			nv_wr32(device, 0x6101f0 + (chid * 12), 0x90000000);
 		}
 
 		intr &= ~0x00000002;
 	}
 
 	if (intr & 0x00100000) {
-		u32 stat = nv_rd32(dev, 0x6100ac);
+		u32 stat = nv_rd32(device, 0x6100ac);
 
 		if (stat & 0x00000007) {
 			disp->modeset = stat;
 			tasklet_schedule(&disp->tasklet);
 
-			nv_wr32(dev, 0x6100ac, (stat & 0x00000007));
+			nv_wr32(device, 0x6100ac, (stat & 0x00000007));
 			stat &= ~0x00000007;
 		}
 
 		if (stat) {
-			NV_INFO(dev, "PDISP: unknown intr24 0x%08x\n", stat);
-			nv_wr32(dev, 0x6100ac, stat);
+			NV_INFO(drm, "PDISP: unknown intr24 0x%08x\n", stat);
+			nv_wr32(device, 0x6100ac, stat);
 		}
 
 		intr &= ~0x00100000;
 	}
 
-	for (i = 0; i < dev->mode_config.num_crtc; i++) {
-		u32 mask = 0x01000000 << i;
-		if (intr & mask) {
-			u32 stat = nv_rd32(dev, 0x6100bc + (i * 0x800));
-			nv_wr32(dev, 0x6100bc + (i * 0x800), stat);
-			intr &= ~mask;
-		}
-	}
-
+	intr &= ~0x0f000000; /* vblank, handled in core */
 	if (intr)
-		NV_INFO(dev, "PDISP: unknown intr 0x%08x\n", intr);
+		NV_INFO(drm, "PDISP: unknown intr 0x%08x\n", intr);
 }
 
 /******************************************************************************
@@ -1867,15 +1888,17 @@ int
 nvd0_display_init(struct drm_device *dev)
 {
 	struct nvd0_display *disp = nvd0_display(dev);
+	struct nouveau_device *device = nouveau_dev(dev);
+	struct nouveau_drm *drm = nouveau_drm(dev);
 	int ret, i;
 	u32 *push;
 
-	if (nv_rd32(dev, 0x6100ac) & 0x00000100) {
-		nv_wr32(dev, 0x6100ac, 0x00000100);
-		nv_mask(dev, 0x6194e8, 0x00000001, 0x00000000);
-		if (!nv_wait(dev, 0x6194e8, 0x00000002, 0x00000000)) {
-			NV_ERROR(dev, "PDISP: 0x6194e8 0x%08x\n",
-				 nv_rd32(dev, 0x6194e8));
+	if (nv_rd32(device, 0x6100ac) & 0x00000100) {
+		nv_wr32(device, 0x6100ac, 0x00000100);
+		nv_mask(device, 0x6194e8, 0x00000001, 0x00000000);
+		if (!nv_wait(device, 0x6194e8, 0x00000002, 0x00000000)) {
+			NV_ERROR(drm, "PDISP: 0x6194e8 0x%08x\n",
+				 nv_rd32(device, 0x6194e8));
 			return -EBUSY;
 		}
 	}
@@ -1884,27 +1907,27 @@ nvd0_display_init(struct drm_device *dev)
 	 * work at all unless you do the SOR part below.
 	 */
 	for (i = 0; i < 3; i++) {
-		u32 dac = nv_rd32(dev, 0x61a000 + (i * 0x800));
-		nv_wr32(dev, 0x6101c0 + (i * 0x800), dac);
+		u32 dac = nv_rd32(device, 0x61a000 + (i * 0x800));
+		nv_wr32(device, 0x6101c0 + (i * 0x800), dac);
 	}
 
 	for (i = 0; i < 4; i++) {
-		u32 sor = nv_rd32(dev, 0x61c000 + (i * 0x800));
-		nv_wr32(dev, 0x6301c4 + (i * 0x800), sor);
+		u32 sor = nv_rd32(device, 0x61c000 + (i * 0x800));
+		nv_wr32(device, 0x6301c4 + (i * 0x800), sor);
 	}
 
 	for (i = 0; i < dev->mode_config.num_crtc; i++) {
-		u32 crtc0 = nv_rd32(dev, 0x616104 + (i * 0x800));
-		u32 crtc1 = nv_rd32(dev, 0x616108 + (i * 0x800));
-		u32 crtc2 = nv_rd32(dev, 0x61610c + (i * 0x800));
-		nv_wr32(dev, 0x6101b4 + (i * 0x800), crtc0);
-		nv_wr32(dev, 0x6101b8 + (i * 0x800), crtc1);
-		nv_wr32(dev, 0x6101bc + (i * 0x800), crtc2);
+		u32 crtc0 = nv_rd32(device, 0x616104 + (i * 0x800));
+		u32 crtc1 = nv_rd32(device, 0x616108 + (i * 0x800));
+		u32 crtc2 = nv_rd32(device, 0x61610c + (i * 0x800));
+		nv_wr32(device, 0x6101b4 + (i * 0x800), crtc0);
+		nv_wr32(device, 0x6101b8 + (i * 0x800), crtc1);
+		nv_wr32(device, 0x6101bc + (i * 0x800), crtc2);
 	}
 
 	/* point at our hash table / objects, enable interrupts */
-	nv_wr32(dev, 0x610010, (disp->mem->vinst >> 8) | 9);
-	nv_mask(dev, 0x6100b0, 0x00000307, 0x00000307);
+	nv_wr32(device, 0x610010, (disp->mem->addr >> 8) | 9);
+	nv_mask(device, 0x6100b0, 0x00000307, 0x00000307);
 
 	/* init master */
 	ret = evo_init_dma(dev, EVO_MASTER);
@@ -1944,7 +1967,6 @@ error:
 void
 nvd0_display_destroy(struct drm_device *dev)
 {
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
 	struct nvd0_display *disp = nvd0_display(dev);
 	struct pci_dev *pdev = dev->pdev;
 	int i;
@@ -1957,31 +1979,36 @@ nvd0_display_destroy(struct drm_device *dev)
 	nouveau_gpuobj_ref(NULL, &disp->mem);
 	nouveau_bo_unmap(disp->sync);
 	nouveau_bo_ref(NULL, &disp->sync);
-	nouveau_irq_unregister(dev, 26);
 
-	dev_priv->engine.display.priv = NULL;
+	nouveau_display(dev)->priv = NULL;
 	kfree(disp);
 }
 
 int
 nvd0_display_create(struct drm_device *dev)
 {
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct nouveau_instmem_engine *pinstmem = &dev_priv->engine.instmem;
-	struct dcb_table *dcb = &dev_priv->vbios.dcb;
+	struct nouveau_device *device = nouveau_dev(dev);
+	struct nouveau_drm *drm = nouveau_drm(dev);
+	struct nouveau_bar *bar = nouveau_bar(device);
+	struct nouveau_fb *pfb = nouveau_fb(device);
+	struct dcb_table *dcb = &drm->vbios.dcb;
 	struct drm_connector *connector, *tmp;
 	struct pci_dev *pdev = dev->pdev;
 	struct nvd0_display *disp;
-	struct dcb_entry *dcbe;
+	struct dcb_output *dcbe;
 	int crtcs, ret, i;
 
 	disp = kzalloc(sizeof(*disp), GFP_KERNEL);
 	if (!disp)
 		return -ENOMEM;
-	dev_priv->engine.display.priv = disp;
+
+	nouveau_display(dev)->priv = disp;
+	nouveau_display(dev)->dtor = nvd0_display_destroy;
+	nouveau_display(dev)->init = nvd0_display_init;
+	nouveau_display(dev)->fini = nvd0_display_fini;
 
 	/* create crtc objects to represent the hw heads */
-	crtcs = nv_rd32(dev, 0x022448);
+	crtcs = nv_rd32(device, 0x022448);
 	for (i = 0; i < crtcs; i++) {
 		ret = nvd0_crtc_create(dev, i);
 		if (ret)
@@ -1995,22 +2022,22 @@ nvd0_display_create(struct drm_device *dev)
 			continue;
 
 		if (dcbe->location != DCB_LOC_ON_CHIP) {
-			NV_WARN(dev, "skipping off-chip encoder %d/%d\n",
+			NV_WARN(drm, "skipping off-chip encoder %d/%d\n",
 				dcbe->type, ffs(dcbe->or) - 1);
 			continue;
 		}
 
 		switch (dcbe->type) {
-		case OUTPUT_TMDS:
-		case OUTPUT_LVDS:
-		case OUTPUT_DP:
+		case DCB_OUTPUT_TMDS:
+		case DCB_OUTPUT_LVDS:
+		case DCB_OUTPUT_DP:
 			nvd0_sor_create(connector, dcbe);
 			break;
-		case OUTPUT_ANALOG:
+		case DCB_OUTPUT_ANALOG:
 			nvd0_dac_create(connector, dcbe);
 			break;
 		default:
-			NV_WARN(dev, "skipping unsupported encoder %d/%d\n",
+			NV_WARN(drm, "skipping unsupported encoder %d/%d\n",
 				dcbe->type, ffs(dcbe->or) - 1);
 			continue;
 		}
@@ -2021,14 +2048,13 @@ nvd0_display_create(struct drm_device *dev)
 		if (connector->encoder_ids[0])
 			continue;
 
-		NV_WARN(dev, "%s has no encoders, removing\n",
+		NV_WARN(drm, "%s has no encoders, removing\n",
 			drm_get_connector_name(connector));
 		connector->funcs->destroy(connector);
 	}
 
 	/* setup interrupt handling */
 	tasklet_init(&disp->tasklet, nvd0_display_bh, (unsigned long)dev);
-	nouveau_irq_register(dev, 26, nvd0_display_intr);
 
 	/* small shared memory area we use for notifiers and semaphores */
 	ret = nouveau_bo_new(dev, 4096, 0x1000, TTM_PL_FLAG_VRAM,
@@ -2045,7 +2071,7 @@ nvd0_display_create(struct drm_device *dev)
 		goto out;
 
 	/* hash table and dma objects for the memory areas we care about */
-	ret = nouveau_gpuobj_new(dev, NULL, 0x4000, 0x10000,
+	ret = nouveau_gpuobj_new(nv_object(device), NULL, 0x4000, 0x10000,
 				 NVOBJ_FLAG_ZERO_ALLOC, &disp->mem);
 	if (ret)
 		goto out;
@@ -2077,7 +2103,7 @@ nvd0_display_create(struct drm_device *dev)
 
 		nv_wo32(disp->mem, dmao + 0x20, 0x00000049);
 		nv_wo32(disp->mem, dmao + 0x24, 0x00000000);
-		nv_wo32(disp->mem, dmao + 0x28, (dev_priv->vram_size - 1) >> 8);
+		nv_wo32(disp->mem, dmao + 0x28, (pfb->ram.size - 1) >> 8);
 		nv_wo32(disp->mem, dmao + 0x2c, 0x00000000);
 		nv_wo32(disp->mem, dmao + 0x30, 0x00000000);
 		nv_wo32(disp->mem, dmao + 0x34, 0x00000000);
@@ -2087,7 +2113,7 @@ nvd0_display_create(struct drm_device *dev)
 
 		nv_wo32(disp->mem, dmao + 0x40, 0x00000009);
 		nv_wo32(disp->mem, dmao + 0x44, 0x00000000);
-		nv_wo32(disp->mem, dmao + 0x48, (dev_priv->vram_size - 1) >> 8);
+		nv_wo32(disp->mem, dmao + 0x48, (pfb->ram.size - 1) >> 8);
 		nv_wo32(disp->mem, dmao + 0x4c, 0x00000000);
 		nv_wo32(disp->mem, dmao + 0x50, 0x00000000);
 		nv_wo32(disp->mem, dmao + 0x54, 0x00000000);
@@ -2097,7 +2123,7 @@ nvd0_display_create(struct drm_device *dev)
 
 		nv_wo32(disp->mem, dmao + 0x60, 0x0fe00009);
 		nv_wo32(disp->mem, dmao + 0x64, 0x00000000);
-		nv_wo32(disp->mem, dmao + 0x68, (dev_priv->vram_size - 1) >> 8);
+		nv_wo32(disp->mem, dmao + 0x68, (pfb->ram.size - 1) >> 8);
 		nv_wo32(disp->mem, dmao + 0x6c, 0x00000000);
 		nv_wo32(disp->mem, dmao + 0x70, 0x00000000);
 		nv_wo32(disp->mem, dmao + 0x74, 0x00000000);
@@ -2106,7 +2132,7 @@ nvd0_display_create(struct drm_device *dev)
 						((dmao + 0x60) << 9));
 	}
 
-	pinstmem->flush(dev);
+	bar->flush(bar);
 
 out:
 	if (ret)
diff --git a/drivers/gpu/drm/nouveau/nve0_fifo.c b/drivers/gpu/drm/nouveau/nve0_fifo.c
deleted file mode 100644
index 0eba15b2201a..000000000000
--- a/drivers/gpu/drm/nouveau/nve0_fifo.c
+++ /dev/null
@@ -1,453 +0,0 @@
-/*
- * Copyright 2010 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Ben Skeggs
- */
-
-#include <drm/drmP.h>
-
-#include "nouveau_drv.h"
-#include "nouveau_mm.h"
-#include "nouveau_fifo.h"
-
-#define NVE0_FIFO_ENGINE_NUM 32
-
-static void nve0_fifo_isr(struct drm_device *);
-
-struct nve0_fifo_engine {
-	struct nouveau_gpuobj *playlist[2];
-	int cur_playlist;
-};
-
-struct nve0_fifo_priv {
-	struct nouveau_fifo_priv base;
-	struct nve0_fifo_engine engine[NVE0_FIFO_ENGINE_NUM];
-	struct {
-		struct nouveau_gpuobj *mem;
-		struct nouveau_vma bar;
-	} user;
-	int spoon_nr;
-};
-
-struct nve0_fifo_chan {
-	struct nouveau_fifo_chan base;
-	u32 engine;
-};
-
-static void
-nve0_fifo_playlist_update(struct drm_device *dev, u32 engine)
-{
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct nouveau_instmem_engine *pinstmem = &dev_priv->engine.instmem;
-	struct nve0_fifo_priv *priv = nv_engine(dev, NVOBJ_ENGINE_FIFO);
-	struct nve0_fifo_engine *peng = &priv->engine[engine];
-	struct nouveau_gpuobj *cur;
-	u32 match = (engine << 16) | 0x00000001;
-	int ret, i, p;
-
-	cur = peng->playlist[peng->cur_playlist];
-	if (unlikely(cur == NULL)) {
-		ret = nouveau_gpuobj_new(dev, NULL, 0x8000, 0x1000, 0, &cur);
-		if (ret) {
-			NV_ERROR(dev, "PFIFO: playlist alloc failed\n");
-			return;
-		}
-
-		peng->playlist[peng->cur_playlist] = cur;
-	}
-
-	peng->cur_playlist = !peng->cur_playlist;
-
-	for (i = 0, p = 0; i < priv->base.channels; i++) {
-		u32 ctrl = nv_rd32(dev, 0x800004 + (i * 8)) & 0x001f0001;
-		if (ctrl != match)
-			continue;
-		nv_wo32(cur, p + 0, i);
-		nv_wo32(cur, p + 4, 0x00000000);
-		p += 8;
-	}
-	pinstmem->flush(dev);
-
-	nv_wr32(dev, 0x002270, cur->vinst >> 12);
-	nv_wr32(dev, 0x002274, (engine << 20) | (p >> 3));
-	if (!nv_wait(dev, 0x002284 + (engine * 4), 0x00100000, 0x00000000))
-		NV_ERROR(dev, "PFIFO: playlist %d update timeout\n", engine);
-}
-
-static int
-nve0_fifo_context_new(struct nouveau_channel *chan, int engine)
-{
-	struct drm_device *dev = chan->dev;
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct nouveau_instmem_engine *pinstmem = &dev_priv->engine.instmem;
-	struct nve0_fifo_priv *priv = nv_engine(dev, engine);
-	struct nve0_fifo_chan *fctx;
-	u64 usermem = priv->user.mem->vinst + chan->id * 512;
-	u64 ib_virt = chan->pushbuf_base + chan->dma.ib_base * 4;
-	int ret = 0, i;
-
-	fctx = chan->engctx[engine] = kzalloc(sizeof(*fctx), GFP_KERNEL);
-	if (!fctx)
-		return -ENOMEM;
-
-	fctx->engine = 0; /* PGRAPH */
-
-	/* allocate vram for control regs, map into polling area */
-	chan->user = ioremap_wc(pci_resource_start(dev->pdev, 1) +
-				priv->user.bar.offset + (chan->id * 512), 512);
-	if (!chan->user) {
-		ret = -ENOMEM;
-		goto error;
-	}
-
-	for (i = 0; i < 0x100; i += 4)
-		nv_wo32(chan->ramin, i, 0x00000000);
-	nv_wo32(chan->ramin, 0x08, lower_32_bits(usermem));
-	nv_wo32(chan->ramin, 0x0c, upper_32_bits(usermem));
-	nv_wo32(chan->ramin, 0x10, 0x0000face);
-	nv_wo32(chan->ramin, 0x30, 0xfffff902);
-	nv_wo32(chan->ramin, 0x48, lower_32_bits(ib_virt));
-	nv_wo32(chan->ramin, 0x4c, drm_order(chan->dma.ib_max + 1) << 16 |
-				     upper_32_bits(ib_virt));
-	nv_wo32(chan->ramin, 0x84, 0x20400000);
-	nv_wo32(chan->ramin, 0x94, 0x30000001);
-	nv_wo32(chan->ramin, 0x9c, 0x00000100);
-	nv_wo32(chan->ramin, 0xac, 0x0000001f);
-	nv_wo32(chan->ramin, 0xe4, 0x00000000);
-	nv_wo32(chan->ramin, 0xe8, chan->id);
-	nv_wo32(chan->ramin, 0xf8, 0x10003080); /* 0x002310 */
-	nv_wo32(chan->ramin, 0xfc, 0x10000010); /* 0x002350 */
-	pinstmem->flush(dev);
-
-	nv_wr32(dev, 0x800000 + (chan->id * 8), 0x80000000 |
-						(chan->ramin->vinst >> 12));
-	nv_mask(dev, 0x800004 + (chan->id * 8), 0x00000400, 0x00000400);
-	nve0_fifo_playlist_update(dev, fctx->engine);
-	nv_mask(dev, 0x800004 + (chan->id * 8), 0x00000400, 0x00000400);
-
-error:
-	if (ret)
-		priv->base.base.context_del(chan, engine);
-	return ret;
-}
-
-static void
-nve0_fifo_context_del(struct nouveau_channel *chan, int engine)
-{
-	struct nve0_fifo_chan *fctx = chan->engctx[engine];
-	struct drm_device *dev = chan->dev;
-
-	nv_mask(dev, 0x800004 + (chan->id * 8), 0x00000800, 0x00000800);
-	nv_wr32(dev, 0x002634, chan->id);
-	if (!nv_wait(dev, 0x0002634, 0xffffffff, chan->id))
-		NV_WARN(dev, "0x2634 != chid: 0x%08x\n", nv_rd32(dev, 0x2634));
-	nve0_fifo_playlist_update(dev, fctx->engine);
-	nv_wr32(dev, 0x800000 + (chan->id * 8), 0x00000000);
-
-	if (chan->user) {
-		iounmap(chan->user);
-		chan->user = NULL;
-	}
-
-	chan->engctx[NVOBJ_ENGINE_FIFO] = NULL;
-	kfree(fctx);
-}
-
-static int
-nve0_fifo_init(struct drm_device *dev, int engine)
-{
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct nve0_fifo_priv *priv = nv_engine(dev, engine);
-	struct nve0_fifo_chan *fctx;
-	int i;
-
-	/* reset PFIFO, enable all available PSUBFIFO areas */
-	nv_mask(dev, 0x000200, 0x00000100, 0x00000000);
-	nv_mask(dev, 0x000200, 0x00000100, 0x00000100);
-	nv_wr32(dev, 0x000204, 0xffffffff);
-
-	priv->spoon_nr = hweight32(nv_rd32(dev, 0x000204));
-	NV_DEBUG(dev, "PFIFO: %d subfifo(s)\n", priv->spoon_nr);
-
-	/* PSUBFIFO[n] */
-	for (i = 0; i < priv->spoon_nr; i++) {
-		nv_mask(dev, 0x04013c + (i * 0x2000), 0x10000100, 0x00000000);
-		nv_wr32(dev, 0x040108 + (i * 0x2000), 0xffffffff); /* INTR */
-		nv_wr32(dev, 0x04010c + (i * 0x2000), 0xfffffeff); /* INTR_EN */
-	}
-
-	nv_wr32(dev, 0x002254, 0x10000000 | priv->user.bar.offset >> 12);
-
-	nv_wr32(dev, 0x002a00, 0xffffffff);
-	nv_wr32(dev, 0x002100, 0xffffffff);
-	nv_wr32(dev, 0x002140, 0xbfffffff);
-
-	/* restore PFIFO context table */
-	for (i = 0; i < priv->base.channels; i++) {
-		struct nouveau_channel *chan = dev_priv->channels.ptr[i];
-		if (!chan || !(fctx = chan->engctx[engine]))
-			continue;
-
-		nv_wr32(dev, 0x800000 + (i * 8), 0x80000000 |
-						 (chan->ramin->vinst >> 12));
-		nv_mask(dev, 0x800004 + (i * 8), 0x00000400, 0x00000400);
-		nve0_fifo_playlist_update(dev, fctx->engine);
-		nv_mask(dev, 0x800004 + (i * 8), 0x00000400, 0x00000400);
-	}
-
-	return 0;
-}
-
-static int
-nve0_fifo_fini(struct drm_device *dev, int engine, bool suspend)
-{
-	struct nve0_fifo_priv *priv = nv_engine(dev, engine);
-	int i;
-
-	for (i = 0; i < priv->base.channels; i++) {
-		if (!(nv_rd32(dev, 0x800004 + (i * 8)) & 1))
-			continue;
-
-		nv_mask(dev, 0x800004 + (i * 8), 0x00000800, 0x00000800);
-		nv_wr32(dev, 0x002634, i);
-		if (!nv_wait(dev, 0x002634, 0xffffffff, i)) {
-			NV_INFO(dev, "PFIFO: kick ch %d failed: 0x%08x\n",
-				i, nv_rd32(dev, 0x002634));
-			return -EBUSY;
-		}
-	}
-
-	nv_wr32(dev, 0x002140, 0x00000000);
-	return 0;
-}
-
-struct nouveau_enum nve0_fifo_fault_unit[] = {
-	{}
-};
-
-struct nouveau_enum nve0_fifo_fault_reason[] = {
-	{ 0x00, "PT_NOT_PRESENT" },
-	{ 0x01, "PT_TOO_SHORT" },
-	{ 0x02, "PAGE_NOT_PRESENT" },
-	{ 0x03, "VM_LIMIT_EXCEEDED" },
-	{ 0x04, "NO_CHANNEL" },
-	{ 0x05, "PAGE_SYSTEM_ONLY" },
-	{ 0x06, "PAGE_READ_ONLY" },
-	{ 0x0a, "COMPRESSED_SYSRAM" },
-	{ 0x0c, "INVALID_STORAGE_TYPE" },
-	{}
-};
-
-struct nouveau_enum nve0_fifo_fault_hubclient[] = {
-	{}
-};
-
-struct nouveau_enum nve0_fifo_fault_gpcclient[] = {
-	{}
-};
-
-struct nouveau_bitfield nve0_fifo_subfifo_intr[] = {
-	{ 0x00200000, "ILLEGAL_MTHD" },
-	{ 0x00800000, "EMPTY_SUBC" },
-	{}
-};
-
-static void
-nve0_fifo_isr_vm_fault(struct drm_device *dev, int unit)
-{
-	u32 inst = nv_rd32(dev, 0x2800 + (unit * 0x10));
-	u32 valo = nv_rd32(dev, 0x2804 + (unit * 0x10));
-	u32 vahi = nv_rd32(dev, 0x2808 + (unit * 0x10));
-	u32 stat = nv_rd32(dev, 0x280c + (unit * 0x10));
-	u32 client = (stat & 0x00001f00) >> 8;
-
-	NV_INFO(dev, "PFIFO: %s fault at 0x%010llx [",
-		(stat & 0x00000080) ? "write" : "read", (u64)vahi << 32 | valo);
-	nouveau_enum_print(nve0_fifo_fault_reason, stat & 0x0000000f);
-	printk("] from ");
-	nouveau_enum_print(nve0_fifo_fault_unit, unit);
-	if (stat & 0x00000040) {
-		printk("/");
-		nouveau_enum_print(nve0_fifo_fault_hubclient, client);
-	} else {
-		printk("/GPC%d/", (stat & 0x1f000000) >> 24);
-		nouveau_enum_print(nve0_fifo_fault_gpcclient, client);
-	}
-	printk(" on channel 0x%010llx\n", (u64)inst << 12);
-}
-
-static int
-nve0_fifo_page_flip(struct drm_device *dev, u32 chid)
-{
-	struct nve0_fifo_priv *priv = nv_engine(dev, NVOBJ_ENGINE_FIFO);
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct nouveau_channel *chan = NULL;
-	unsigned long flags;
-	int ret = -EINVAL;
-
-	spin_lock_irqsave(&dev_priv->channels.lock, flags);
-	if (likely(chid >= 0 && chid < priv->base.channels)) {
-		chan = dev_priv->channels.ptr[chid];
-		if (likely(chan))
-			ret = nouveau_finish_page_flip(chan, NULL);
-	}
-	spin_unlock_irqrestore(&dev_priv->channels.lock, flags);
-	return ret;
-}
-
-static void
-nve0_fifo_isr_subfifo_intr(struct drm_device *dev, int unit)
-{
-	u32 stat = nv_rd32(dev, 0x040108 + (unit * 0x2000));
-	u32 addr = nv_rd32(dev, 0x0400c0 + (unit * 0x2000));
-	u32 data = nv_rd32(dev, 0x0400c4 + (unit * 0x2000));
-	u32 chid = nv_rd32(dev, 0x040120 + (unit * 0x2000)) & 0x7f;
-	u32 subc = (addr & 0x00070000);
-	u32 mthd = (addr & 0x00003ffc);
-	u32 show = stat;
-
-	if (stat & 0x00200000) {
-		if (mthd == 0x0054) {
-			if (!nve0_fifo_page_flip(dev, chid))
-				show &= ~0x00200000;
-		}
-	}
-
-	if (show) {
-		NV_INFO(dev, "PFIFO%d:", unit);
-		nouveau_bitfield_print(nve0_fifo_subfifo_intr, show);
-		NV_INFO(dev, "PFIFO%d: ch %d subc %d mthd 0x%04x data 0x%08x\n",
-			unit, chid, subc, mthd, data);
-	}
-
-	nv_wr32(dev, 0x0400c0 + (unit * 0x2000), 0x80600008);
-	nv_wr32(dev, 0x040108 + (unit * 0x2000), stat);
-}
-
-static void
-nve0_fifo_isr(struct drm_device *dev)
-{
-	u32 mask = nv_rd32(dev, 0x002140);
-	u32 stat = nv_rd32(dev, 0x002100) & mask;
-
-	if (stat & 0x00000100) {
-		NV_INFO(dev, "PFIFO: unknown status 0x00000100\n");
-		nv_wr32(dev, 0x002100, 0x00000100);
-		stat &= ~0x00000100;
-	}
-
-	if (stat & 0x10000000) {
-		u32 units = nv_rd32(dev, 0x00259c);
-		u32 u = units;
-
-		while (u) {
-			int i = ffs(u) - 1;
-			nve0_fifo_isr_vm_fault(dev, i);
-			u &= ~(1 << i);
-		}
-
-		nv_wr32(dev, 0x00259c, units);
-		stat &= ~0x10000000;
-	}
-
-	if (stat & 0x20000000) {
-		u32 units = nv_rd32(dev, 0x0025a0);
-		u32 u = units;
-
-		while (u) {
-			int i = ffs(u) - 1;
-			nve0_fifo_isr_subfifo_intr(dev, i);
-			u &= ~(1 << i);
-		}
-
-		nv_wr32(dev, 0x0025a0, units);
-		stat &= ~0x20000000;
-	}
-
-	if (stat & 0x40000000) {
-		NV_INFO(dev, "PFIFO: unknown status 0x40000000\n");
-		nv_mask(dev, 0x002a00, 0x00000000, 0x00000000);
-		stat &= ~0x40000000;
-	}
-
-	if (stat) {
-		NV_INFO(dev, "PFIFO: unhandled status 0x%08x\n", stat);
-		nv_wr32(dev, 0x002100, stat);
-		nv_wr32(dev, 0x002140, 0);
-	}
-}
-
-static void
-nve0_fifo_destroy(struct drm_device *dev, int engine)
-{
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct nve0_fifo_priv *priv = nv_engine(dev, engine);
-	int i;
-
-	nouveau_vm_put(&priv->user.bar);
-	nouveau_gpuobj_ref(NULL, &priv->user.mem);
-
-	for (i = 0; i < NVE0_FIFO_ENGINE_NUM; i++) {
-		nouveau_gpuobj_ref(NULL, &priv->engine[i].playlist[0]);
-		nouveau_gpuobj_ref(NULL, &priv->engine[i].playlist[1]);
-	}
-
-	dev_priv->eng[engine] = NULL;
-	kfree(priv);
-}
-
-int
-nve0_fifo_create(struct drm_device *dev)
-{
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct nve0_fifo_priv *priv;
-	int ret;
-
-	priv = kzalloc(sizeof(*priv), GFP_KERNEL);
-	if (!priv)
-		return -ENOMEM;
-
-	priv->base.base.destroy = nve0_fifo_destroy;
-	priv->base.base.init = nve0_fifo_init;
-	priv->base.base.fini = nve0_fifo_fini;
-	priv->base.base.context_new = nve0_fifo_context_new;
-	priv->base.base.context_del = nve0_fifo_context_del;
-	priv->base.channels = 4096;
-	dev_priv->eng[NVOBJ_ENGINE_FIFO] = &priv->base.base;
-
-	ret = nouveau_gpuobj_new(dev, NULL, priv->base.channels * 512, 0x1000,
-				 NVOBJ_FLAG_ZERO_ALLOC, &priv->user.mem);
-	if (ret)
-		goto error;
-
-	ret = nouveau_vm_get(dev_priv->bar1_vm, priv->user.mem->size,
-			     12, NV_MEM_ACCESS_RW, &priv->user.bar);
-	if (ret)
-		goto error;
-
-	nouveau_vm_map(&priv->user.bar, *(struct nouveau_mem **)priv->user.mem->node);
-
-	nouveau_irq_register(dev, 8, nve0_fifo_isr);
-error:
-	if (ret)
-		priv->base.base.destroy(dev, NVOBJ_ENGINE_FIFO);
-	return ret;
-}
diff --git a/drivers/gpu/drm/nouveau/nve0_graph.c b/drivers/gpu/drm/nouveau/nve0_graph.c
deleted file mode 100644
index b784a8b32458..000000000000
--- a/drivers/gpu/drm/nouveau/nve0_graph.c
+++ /dev/null
@@ -1,831 +0,0 @@
-/*
- * Copyright 2010 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Ben Skeggs
- */
-
-#include <linux/firmware.h>
-#include <linux/module.h>
-
-#include <drm/drmP.h>
-
-#include "nouveau_drv.h"
-#include "nouveau_mm.h"
-#include "nouveau_fifo.h"
-
-#include "nve0_graph.h"
-
-static void
-nve0_graph_ctxctl_debug_unit(struct drm_device *dev, u32 base)
-{
-	NV_INFO(dev, "PGRAPH: %06x - done 0x%08x\n", base,
-		nv_rd32(dev, base + 0x400));
-	NV_INFO(dev, "PGRAPH: %06x - stat 0x%08x 0x%08x 0x%08x 0x%08x\n", base,
-		nv_rd32(dev, base + 0x800), nv_rd32(dev, base + 0x804),
-		nv_rd32(dev, base + 0x808), nv_rd32(dev, base + 0x80c));
-	NV_INFO(dev, "PGRAPH: %06x - stat 0x%08x 0x%08x 0x%08x 0x%08x\n", base,
-		nv_rd32(dev, base + 0x810), nv_rd32(dev, base + 0x814),
-		nv_rd32(dev, base + 0x818), nv_rd32(dev, base + 0x81c));
-}
-
-static void
-nve0_graph_ctxctl_debug(struct drm_device *dev)
-{
-	u32 gpcnr = nv_rd32(dev, 0x409604) & 0xffff;
-	u32 gpc;
-
-	nve0_graph_ctxctl_debug_unit(dev, 0x409000);
-	for (gpc = 0; gpc < gpcnr; gpc++)
-		nve0_graph_ctxctl_debug_unit(dev, 0x502000 + (gpc * 0x8000));
-}
-
-static int
-nve0_graph_load_context(struct nouveau_channel *chan)
-{
-	struct drm_device *dev = chan->dev;
-
-	nv_wr32(dev, 0x409840, 0x00000030);
-	nv_wr32(dev, 0x409500, 0x80000000 | chan->ramin->vinst >> 12);
-	nv_wr32(dev, 0x409504, 0x00000003);
-	if (!nv_wait(dev, 0x409800, 0x00000010, 0x00000010))
-		NV_ERROR(dev, "PGRAPH: load_ctx timeout\n");
-
-	return 0;
-}
-
-static int
-nve0_graph_unload_context_to(struct drm_device *dev, u64 chan)
-{
-	nv_wr32(dev, 0x409840, 0x00000003);
-	nv_wr32(dev, 0x409500, 0x80000000 | chan >> 12);
-	nv_wr32(dev, 0x409504, 0x00000009);
-	if (!nv_wait(dev, 0x409800, 0x00000001, 0x00000000)) {
-		NV_ERROR(dev, "PGRAPH: unload_ctx timeout\n");
-		return -EBUSY;
-	}
-
-	return 0;
-}
-
-static int
-nve0_graph_construct_context(struct nouveau_channel *chan)
-{
-	struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
-	struct nve0_graph_priv *priv = nv_engine(chan->dev, NVOBJ_ENGINE_GR);
-	struct nve0_graph_chan *grch = chan->engctx[NVOBJ_ENGINE_GR];
-	struct drm_device *dev = chan->dev;
-	int ret, i;
-	u32 *ctx;
-
-	ctx = kmalloc(priv->grctx_size, GFP_KERNEL);
-	if (!ctx)
-		return -ENOMEM;
-
-	nve0_graph_load_context(chan);
-
-	nv_wo32(grch->grctx, 0x1c, 1);
-	nv_wo32(grch->grctx, 0x20, 0);
-	nv_wo32(grch->grctx, 0x28, 0);
-	nv_wo32(grch->grctx, 0x2c, 0);
-	dev_priv->engine.instmem.flush(dev);
-
-	ret = nve0_grctx_generate(chan);
-	if (ret)
-		goto err;
-
-	ret = nve0_graph_unload_context_to(dev, chan->ramin->vinst);
-	if (ret)
-		goto err;
-
-	for (i = 0; i < priv->grctx_size; i += 4)
-		ctx[i / 4] = nv_ro32(grch->grctx, i);
-
-	priv->grctx_vals = ctx;
-	return 0;
-
-err:
-	kfree(ctx);
-	return ret;
-}
-
-static int
-nve0_graph_create_context_mmio_list(struct nouveau_channel *chan)
-{
-	struct nve0_graph_priv *priv = nv_engine(chan->dev, NVOBJ_ENGINE_GR);
-	struct nve0_graph_chan *grch = chan->engctx[NVOBJ_ENGINE_GR];
-	struct drm_device *dev = chan->dev;
-	u32 magic[GPC_MAX][2];
-	u16 offset = 0x0000;
-	int gpc;
-	int ret;
-
-	ret = nouveau_gpuobj_new(dev, chan, 0x3000, 256, NVOBJ_FLAG_VM,
-				 &grch->unk408004);
-	if (ret)
-		return ret;
-
-	ret = nouveau_gpuobj_new(dev, chan, 0x8000, 256, NVOBJ_FLAG_VM,
-				 &grch->unk40800c);
-	if (ret)
-		return ret;
-
-	ret = nouveau_gpuobj_new(dev, chan, 384 * 1024, 4096,
-				 NVOBJ_FLAG_VM | NVOBJ_FLAG_VM_USER,
-				 &grch->unk418810);
-	if (ret)
-		return ret;
-
-	ret = nouveau_gpuobj_new(dev, chan, 0x1000, 0, NVOBJ_FLAG_VM,
-				 &grch->mmio);
-	if (ret)
-		return ret;
-
-#define mmio(r,v) do {                                                         \
-	nv_wo32(grch->mmio, (grch->mmio_nr * 8) + 0, (r));                     \
-	nv_wo32(grch->mmio, (grch->mmio_nr * 8) + 4, (v));                     \
-	grch->mmio_nr++;                                                       \
-} while (0)
-	mmio(0x40800c, grch->unk40800c->linst >> 8);
-	mmio(0x408010, 0x80000000);
-	mmio(0x419004, grch->unk40800c->linst >> 8);
-	mmio(0x419008, 0x00000000);
-	mmio(0x4064cc, 0x80000000);
-	mmio(0x408004, grch->unk408004->linst >> 8);
-	mmio(0x408008, 0x80000030);
-	mmio(0x418808, grch->unk408004->linst >> 8);
-	mmio(0x41880c, 0x80000030);
-	mmio(0x4064c8, 0x01800600);
-	mmio(0x418810, 0x80000000 | grch->unk418810->linst >> 12);
-	mmio(0x419848, 0x10000000 | grch->unk418810->linst >> 12);
-	mmio(0x405830, 0x02180648);
-	mmio(0x4064c4, 0x0192ffff);
-
-	for (gpc = 0; gpc < priv->gpc_nr; gpc++) {
-		u16 magic0 = 0x0218 * priv->tpc_nr[gpc];
-		u16 magic1 = 0x0648 * priv->tpc_nr[gpc];
-		magic[gpc][0]  = 0x10000000 | (magic0 << 16) | offset;
-		magic[gpc][1]  = 0x00000000 | (magic1 << 16);
-		offset += 0x0324 * priv->tpc_nr[gpc];
-	}
-
-	for (gpc = 0; gpc < priv->gpc_nr; gpc++) {
-		mmio(GPC_UNIT(gpc, 0x30c0), magic[gpc][0]);
-		mmio(GPC_UNIT(gpc, 0x30e4), magic[gpc][1] | offset);
-		offset += 0x07ff * priv->tpc_nr[gpc];
-	}
-
-	mmio(0x17e91c, 0x06060609);
-	mmio(0x17e920, 0x00090a05);
-#undef mmio
-	return 0;
-}
-
-static int
-nve0_graph_context_new(struct nouveau_channel *chan, int engine)
-{
-	struct drm_device *dev = chan->dev;
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct nouveau_instmem_engine *pinstmem = &dev_priv->engine.instmem;
-	struct nve0_graph_priv *priv = nv_engine(dev, engine);
-	struct nve0_graph_chan *grch;
-	struct nouveau_gpuobj *grctx;
-	int ret, i;
-
-	grch = kzalloc(sizeof(*grch), GFP_KERNEL);
-	if (!grch)
-		return -ENOMEM;
-	chan->engctx[NVOBJ_ENGINE_GR] = grch;
-
-	ret = nouveau_gpuobj_new(dev, chan, priv->grctx_size, 256,
-				 NVOBJ_FLAG_VM | NVOBJ_FLAG_ZERO_ALLOC,
-				 &grch->grctx);
-	if (ret)
-		goto error;
-	grctx = grch->grctx;
-
-	ret = nve0_graph_create_context_mmio_list(chan);
-	if (ret)
-		goto error;
-
-	nv_wo32(chan->ramin, 0x0210, lower_32_bits(grctx->linst) | 4);
-	nv_wo32(chan->ramin, 0x0214, upper_32_bits(grctx->linst));
-	pinstmem->flush(dev);
-
-	if (!priv->grctx_vals) {
-		ret = nve0_graph_construct_context(chan);
-		if (ret)
-			goto error;
-	}
-
-	for (i = 0; i < priv->grctx_size; i += 4)
-		nv_wo32(grctx, i, priv->grctx_vals[i / 4]);
-	nv_wo32(grctx, 0xf4, 0);
-	nv_wo32(grctx, 0xf8, 0);
-	nv_wo32(grctx, 0x10, grch->mmio_nr);
-	nv_wo32(grctx, 0x14, lower_32_bits(grch->mmio->linst));
-	nv_wo32(grctx, 0x18, upper_32_bits(grch->mmio->linst));
-	nv_wo32(grctx, 0x1c, 1);
-	nv_wo32(grctx, 0x20, 0);
-	nv_wo32(grctx, 0x28, 0);
-	nv_wo32(grctx, 0x2c, 0);
-
-	pinstmem->flush(dev);
-	return 0;
-
-error:
-	priv->base.context_del(chan, engine);
-	return ret;
-}
-
-static void
-nve0_graph_context_del(struct nouveau_channel *chan, int engine)
-{
-	struct nve0_graph_chan *grch = chan->engctx[engine];
-
-	nouveau_gpuobj_ref(NULL, &grch->mmio);
-	nouveau_gpuobj_ref(NULL, &grch->unk418810);
-	nouveau_gpuobj_ref(NULL, &grch->unk40800c);
-	nouveau_gpuobj_ref(NULL, &grch->unk408004);
-	nouveau_gpuobj_ref(NULL, &grch->grctx);
-	chan->engctx[engine] = NULL;
-}
-
-static int
-nve0_graph_object_new(struct nouveau_channel *chan, int engine,
-		      u32 handle, u16 class)
-{
-	return 0;
-}
-
-static int
-nve0_graph_fini(struct drm_device *dev, int engine, bool suspend)
-{
-	return 0;
-}
-
-static void
-nve0_graph_init_obj418880(struct drm_device *dev)
-{
-	struct nve0_graph_priv *priv = nv_engine(dev, NVOBJ_ENGINE_GR);
-	int i;
-
-	nv_wr32(dev, GPC_BCAST(0x0880), 0x00000000);
-	nv_wr32(dev, GPC_BCAST(0x08a4), 0x00000000);
-	for (i = 0; i < 4; i++)
-		nv_wr32(dev, GPC_BCAST(0x0888) + (i * 4), 0x00000000);
-	nv_wr32(dev, GPC_BCAST(0x08b4), priv->unk4188b4->vinst >> 8);
-	nv_wr32(dev, GPC_BCAST(0x08b8), priv->unk4188b8->vinst >> 8);
-}
-
-static void
-nve0_graph_init_regs(struct drm_device *dev)
-{
-	nv_wr32(dev, 0x400080, 0x003083c2);
-	nv_wr32(dev, 0x400088, 0x0001ffe7);
-	nv_wr32(dev, 0x40008c, 0x00000000);
-	nv_wr32(dev, 0x400090, 0x00000030);
-	nv_wr32(dev, 0x40013c, 0x003901f7);
-	nv_wr32(dev, 0x400140, 0x00000100);
-	nv_wr32(dev, 0x400144, 0x00000000);
-	nv_wr32(dev, 0x400148, 0x00000110);
-	nv_wr32(dev, 0x400138, 0x00000000);
-	nv_wr32(dev, 0x400130, 0x00000000);
-	nv_wr32(dev, 0x400134, 0x00000000);
-	nv_wr32(dev, 0x400124, 0x00000002);
-}
-
-static void
-nve0_graph_init_units(struct drm_device *dev)
-{
-	nv_wr32(dev, 0x409ffc, 0x00000000);
-	nv_wr32(dev, 0x409c14, 0x00003e3e);
-	nv_wr32(dev, 0x409c24, 0x000f0000);
-
-	nv_wr32(dev, 0x404000, 0xc0000000);
-	nv_wr32(dev, 0x404600, 0xc0000000);
-	nv_wr32(dev, 0x408030, 0xc0000000);
-	nv_wr32(dev, 0x404490, 0xc0000000);
-	nv_wr32(dev, 0x406018, 0xc0000000);
-	nv_wr32(dev, 0x407020, 0xc0000000);
-	nv_wr32(dev, 0x405840, 0xc0000000);
-	nv_wr32(dev, 0x405844, 0x00ffffff);
-
-	nv_mask(dev, 0x419cc0, 0x00000008, 0x00000008);
-	nv_mask(dev, 0x419eb4, 0x00001000, 0x00001000);
-
-}
-
-static void
-nve0_graph_init_gpc_0(struct drm_device *dev)
-{
-	struct nve0_graph_priv *priv = nv_engine(dev, NVOBJ_ENGINE_GR);
-	const u32 magicgpc918 = DIV_ROUND_UP(0x00800000, priv->tpc_total);
-	u32 data[TPC_MAX / 8];
-	u8  tpcnr[GPC_MAX];
-	int i, gpc, tpc;
-
-	nv_wr32(dev, GPC_UNIT(0, 0x3018), 0x00000001);
-
-	memset(data, 0x00, sizeof(data));
-	memcpy(tpcnr, priv->tpc_nr, sizeof(priv->tpc_nr));
-	for (i = 0, gpc = -1; i < priv->tpc_total; i++) {
-		do {
-			gpc = (gpc + 1) % priv->gpc_nr;
-		} while (!tpcnr[gpc]);
-		tpc = priv->tpc_nr[gpc] - tpcnr[gpc]--;
-
-		data[i / 8] |= tpc << ((i % 8) * 4);
-	}
-
-	nv_wr32(dev, GPC_BCAST(0x0980), data[0]);
-	nv_wr32(dev, GPC_BCAST(0x0984), data[1]);
-	nv_wr32(dev, GPC_BCAST(0x0988), data[2]);
-	nv_wr32(dev, GPC_BCAST(0x098c), data[3]);
-
-	for (gpc = 0; gpc < priv->gpc_nr; gpc++) {
-		nv_wr32(dev, GPC_UNIT(gpc, 0x0914), priv->magic_not_rop_nr << 8 |
-						  priv->tpc_nr[gpc]);
-		nv_wr32(dev, GPC_UNIT(gpc, 0x0910), 0x00040000 | priv->tpc_total);
-		nv_wr32(dev, GPC_UNIT(gpc, 0x0918), magicgpc918);
-	}
-
-	nv_wr32(dev, GPC_BCAST(0x1bd4), magicgpc918);
-	nv_wr32(dev, GPC_BCAST(0x08ac), nv_rd32(dev, 0x100800));
-}
-
-static void
-nve0_graph_init_gpc_1(struct drm_device *dev)
-{
-	struct nve0_graph_priv *priv = nv_engine(dev, NVOBJ_ENGINE_GR);
-	int gpc, tpc;
-
-	for (gpc = 0; gpc < priv->gpc_nr; gpc++) {
-		nv_wr32(dev, GPC_UNIT(gpc, 0x3038), 0xc0000000);
-		nv_wr32(dev, GPC_UNIT(gpc, 0x0420), 0xc0000000);
-		nv_wr32(dev, GPC_UNIT(gpc, 0x0900), 0xc0000000);
-		nv_wr32(dev, GPC_UNIT(gpc, 0x1028), 0xc0000000);
-		nv_wr32(dev, GPC_UNIT(gpc, 0x0824), 0xc0000000);
-		for (tpc = 0; tpc < priv->tpc_nr[gpc]; tpc++) {
-			nv_wr32(dev, TPC_UNIT(gpc, tpc, 0x508), 0xffffffff);
-			nv_wr32(dev, TPC_UNIT(gpc, tpc, 0x50c), 0xffffffff);
-			nv_wr32(dev, TPC_UNIT(gpc, tpc, 0x224), 0xc0000000);
-			nv_wr32(dev, TPC_UNIT(gpc, tpc, 0x48c), 0xc0000000);
-			nv_wr32(dev, TPC_UNIT(gpc, tpc, 0x084), 0xc0000000);
-			nv_wr32(dev, TPC_UNIT(gpc, tpc, 0x644), 0x001ffffe);
-			nv_wr32(dev, TPC_UNIT(gpc, tpc, 0x64c), 0x0000000f);
-		}
-		nv_wr32(dev, GPC_UNIT(gpc, 0x2c90), 0xffffffff);
-		nv_wr32(dev, GPC_UNIT(gpc, 0x2c94), 0xffffffff);
-	}
-}
-
-static void
-nve0_graph_init_rop(struct drm_device *dev)
-{
-	struct nve0_graph_priv *priv = nv_engine(dev, NVOBJ_ENGINE_GR);
-	int rop;
-
-	for (rop = 0; rop < priv->rop_nr; rop++) {
-		nv_wr32(dev, ROP_UNIT(rop, 0x144), 0xc0000000);
-		nv_wr32(dev, ROP_UNIT(rop, 0x070), 0xc0000000);
-		nv_wr32(dev, ROP_UNIT(rop, 0x204), 0xffffffff);
-		nv_wr32(dev, ROP_UNIT(rop, 0x208), 0xffffffff);
-	}
-}
-
-static void
-nve0_graph_init_fuc(struct drm_device *dev, u32 fuc_base,
-		    struct nve0_graph_fuc *code, struct nve0_graph_fuc *data)
-{
-	int i;
-
-	nv_wr32(dev, fuc_base + 0x01c0, 0x01000000);
-	for (i = 0; i < data->size / 4; i++)
-		nv_wr32(dev, fuc_base + 0x01c4, data->data[i]);
-
-	nv_wr32(dev, fuc_base + 0x0180, 0x01000000);
-	for (i = 0; i < code->size / 4; i++) {
-		if ((i & 0x3f) == 0)
-			nv_wr32(dev, fuc_base + 0x0188, i >> 6);
-		nv_wr32(dev, fuc_base + 0x0184, code->data[i]);
-	}
-}
-
-static int
-nve0_graph_init_ctxctl(struct drm_device *dev)
-{
-	struct nve0_graph_priv *priv = nv_engine(dev, NVOBJ_ENGINE_GR);
-	u32 r000260;
-
-	/* load fuc microcode */
-	r000260 = nv_mask(dev, 0x000260, 0x00000001, 0x00000000);
-	nve0_graph_init_fuc(dev, 0x409000, &priv->fuc409c, &priv->fuc409d);
-	nve0_graph_init_fuc(dev, 0x41a000, &priv->fuc41ac, &priv->fuc41ad);
-	nv_wr32(dev, 0x000260, r000260);
-
-	/* start both of them running */
-	nv_wr32(dev, 0x409840, 0xffffffff);
-	nv_wr32(dev, 0x41a10c, 0x00000000);
-	nv_wr32(dev, 0x40910c, 0x00000000);
-	nv_wr32(dev, 0x41a100, 0x00000002);
-	nv_wr32(dev, 0x409100, 0x00000002);
-	if (!nv_wait(dev, 0x409800, 0x00000001, 0x00000001))
-		NV_INFO(dev, "0x409800 wait failed\n");
-
-	nv_wr32(dev, 0x409840, 0xffffffff);
-	nv_wr32(dev, 0x409500, 0x7fffffff);
-	nv_wr32(dev, 0x409504, 0x00000021);
-
-	nv_wr32(dev, 0x409840, 0xffffffff);
-	nv_wr32(dev, 0x409500, 0x00000000);
-	nv_wr32(dev, 0x409504, 0x00000010);
-	if (!nv_wait_ne(dev, 0x409800, 0xffffffff, 0x00000000)) {
-		NV_ERROR(dev, "fuc09 req 0x10 timeout\n");
-		return -EBUSY;
-	}
-	priv->grctx_size = nv_rd32(dev, 0x409800);
-
-	nv_wr32(dev, 0x409840, 0xffffffff);
-	nv_wr32(dev, 0x409500, 0x00000000);
-	nv_wr32(dev, 0x409504, 0x00000016);
-	if (!nv_wait_ne(dev, 0x409800, 0xffffffff, 0x00000000)) {
-		NV_ERROR(dev, "fuc09 req 0x16 timeout\n");
-		return -EBUSY;
-	}
-
-	nv_wr32(dev, 0x409840, 0xffffffff);
-	nv_wr32(dev, 0x409500, 0x00000000);
-	nv_wr32(dev, 0x409504, 0x00000025);
-	if (!nv_wait_ne(dev, 0x409800, 0xffffffff, 0x00000000)) {
-		NV_ERROR(dev, "fuc09 req 0x25 timeout\n");
-		return -EBUSY;
-	}
-
-	nv_wr32(dev, 0x409800, 0x00000000);
-	nv_wr32(dev, 0x409500, 0x00000001);
-	nv_wr32(dev, 0x409504, 0x00000030);
-	if (!nv_wait_ne(dev, 0x409800, 0xffffffff, 0x00000000)) {
-		NV_ERROR(dev, "fuc09 req 0x30 timeout\n");
-		return -EBUSY;
-	}
-
-	nv_wr32(dev, 0x409810, 0xb00095c8);
-	nv_wr32(dev, 0x409800, 0x00000000);
-	nv_wr32(dev, 0x409500, 0x00000001);
-	nv_wr32(dev, 0x409504, 0x00000031);
-	if (!nv_wait_ne(dev, 0x409800, 0xffffffff, 0x00000000)) {
-		NV_ERROR(dev, "fuc09 req 0x31 timeout\n");
-		return -EBUSY;
-	}
-
-	nv_wr32(dev, 0x409810, 0x00080420);
-	nv_wr32(dev, 0x409800, 0x00000000);
-	nv_wr32(dev, 0x409500, 0x00000001);
-	nv_wr32(dev, 0x409504, 0x00000032);
-	if (!nv_wait_ne(dev, 0x409800, 0xffffffff, 0x00000000)) {
-		NV_ERROR(dev, "fuc09 req 0x32 timeout\n");
-		return -EBUSY;
-	}
-
-	nv_wr32(dev, 0x409614, 0x00000070);
-	nv_wr32(dev, 0x409614, 0x00000770);
-	nv_wr32(dev, 0x40802c, 0x00000001);
-	return 0;
-}
-
-static int
-nve0_graph_init(struct drm_device *dev, int engine)
-{
-	int ret;
-
-	nv_mask(dev, 0x000200, 0x18001000, 0x00000000);
-	nv_mask(dev, 0x000200, 0x18001000, 0x18001000);
-
-	nve0_graph_init_obj418880(dev);
-	nve0_graph_init_regs(dev);
-	nve0_graph_init_gpc_0(dev);
-
-	nv_wr32(dev, 0x400500, 0x00010001);
-	nv_wr32(dev, 0x400100, 0xffffffff);
-	nv_wr32(dev, 0x40013c, 0xffffffff);
-
-	nve0_graph_init_units(dev);
-	nve0_graph_init_gpc_1(dev);
-	nve0_graph_init_rop(dev);
-
-	nv_wr32(dev, 0x400108, 0xffffffff);
-	nv_wr32(dev, 0x400138, 0xffffffff);
-	nv_wr32(dev, 0x400118, 0xffffffff);
-	nv_wr32(dev, 0x400130, 0xffffffff);
-	nv_wr32(dev, 0x40011c, 0xffffffff);
-	nv_wr32(dev, 0x400134, 0xffffffff);
-	nv_wr32(dev, 0x400054, 0x34ce3464);
-
-	ret = nve0_graph_init_ctxctl(dev);
-	if (ret)
-		return ret;
-
-	return 0;
-}
-
-int
-nve0_graph_isr_chid(struct drm_device *dev, u64 inst)
-{
-	struct nouveau_fifo_priv *pfifo = nv_engine(dev, NVOBJ_ENGINE_FIFO);
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct nouveau_channel *chan;
-	unsigned long flags;
-	int i;
-
-	spin_lock_irqsave(&dev_priv->channels.lock, flags);
-	for (i = 0; i < pfifo->channels; i++) {
-		chan = dev_priv->channels.ptr[i];
-		if (!chan || !chan->ramin)
-			continue;
-
-		if (inst == chan->ramin->vinst)
-			break;
-	}
-	spin_unlock_irqrestore(&dev_priv->channels.lock, flags);
-	return i;
-}
-
-static void
-nve0_graph_ctxctl_isr(struct drm_device *dev)
-{
-	u32 ustat = nv_rd32(dev, 0x409c18);
-
-	if (ustat & 0x00000001)
-		NV_INFO(dev, "PGRAPH: CTXCTRL ucode error\n");
-	if (ustat & 0x00080000)
-		NV_INFO(dev, "PGRAPH: CTXCTRL watchdog timeout\n");
-	if (ustat & ~0x00080001)
-		NV_INFO(dev, "PGRAPH: CTXCTRL 0x%08x\n", ustat);
-
-	nve0_graph_ctxctl_debug(dev);
-	nv_wr32(dev, 0x409c20, ustat);
-}
-
-static void
-nve0_graph_trap_isr(struct drm_device *dev, int chid)
-{
-	struct nve0_graph_priv *priv = nv_engine(dev, NVOBJ_ENGINE_GR);
-	u32 trap = nv_rd32(dev, 0x400108);
-	int rop;
-
-	if (trap & 0x00000001) {
-		u32 stat = nv_rd32(dev, 0x404000);
-		NV_INFO(dev, "PGRAPH: DISPATCH ch %d 0x%08x\n", chid, stat);
-		nv_wr32(dev, 0x404000, 0xc0000000);
-		nv_wr32(dev, 0x400108, 0x00000001);
-		trap &= ~0x00000001;
-	}
-
-	if (trap & 0x00000010) {
-		u32 stat = nv_rd32(dev, 0x405840);
-		NV_INFO(dev, "PGRAPH: SHADER ch %d 0x%08x\n", chid, stat);
-		nv_wr32(dev, 0x405840, 0xc0000000);
-		nv_wr32(dev, 0x400108, 0x00000010);
-		trap &= ~0x00000010;
-	}
-
-	if (trap & 0x02000000) {
-		for (rop = 0; rop < priv->rop_nr; rop++) {
-			u32 statz = nv_rd32(dev, ROP_UNIT(rop, 0x070));
-			u32 statc = nv_rd32(dev, ROP_UNIT(rop, 0x144));
-			NV_INFO(dev, "PGRAPH: ROP%d ch %d 0x%08x 0x%08x\n",
-				     rop, chid, statz, statc);
-			nv_wr32(dev, ROP_UNIT(rop, 0x070), 0xc0000000);
-			nv_wr32(dev, ROP_UNIT(rop, 0x144), 0xc0000000);
-		}
-		nv_wr32(dev, 0x400108, 0x02000000);
-		trap &= ~0x02000000;
-	}
-
-	if (trap) {
-		NV_INFO(dev, "PGRAPH: TRAP ch %d 0x%08x\n", chid, trap);
-		nv_wr32(dev, 0x400108, trap);
-	}
-}
-
-static void
-nve0_graph_isr(struct drm_device *dev)
-{
-	u64 inst = (u64)(nv_rd32(dev, 0x409b00) & 0x0fffffff) << 12;
-	u32 chid = nve0_graph_isr_chid(dev, inst);
-	u32 stat = nv_rd32(dev, 0x400100);
-	u32 addr = nv_rd32(dev, 0x400704);
-	u32 mthd = (addr & 0x00003ffc);
-	u32 subc = (addr & 0x00070000) >> 16;
-	u32 data = nv_rd32(dev, 0x400708);
-	u32 code = nv_rd32(dev, 0x400110);
-	u32 class = nv_rd32(dev, 0x404200 + (subc * 4));
-
-	if (stat & 0x00000010) {
-		if (nouveau_gpuobj_mthd_call2(dev, chid, class, mthd, data)) {
-			NV_INFO(dev, "PGRAPH: ILLEGAL_MTHD ch %d [0x%010llx] "
-				     "subc %d class 0x%04x mthd 0x%04x "
-				     "data 0x%08x\n",
-				chid, inst, subc, class, mthd, data);
-		}
-		nv_wr32(dev, 0x400100, 0x00000010);
-		stat &= ~0x00000010;
-	}
-
-	if (stat & 0x00000020) {
-		NV_INFO(dev, "PGRAPH: ILLEGAL_CLASS ch %d [0x%010llx] subc %d "
-			     "class 0x%04x mthd 0x%04x data 0x%08x\n",
-			chid, inst, subc, class, mthd, data);
-		nv_wr32(dev, 0x400100, 0x00000020);
-		stat &= ~0x00000020;
-	}
-
-	if (stat & 0x00100000) {
-		NV_INFO(dev, "PGRAPH: DATA_ERROR [");
-		nouveau_enum_print(nv50_data_error_names, code);
-		printk("] ch %d [0x%010llx] subc %d class 0x%04x "
-		       "mthd 0x%04x data 0x%08x\n",
-		       chid, inst, subc, class, mthd, data);
-		nv_wr32(dev, 0x400100, 0x00100000);
-		stat &= ~0x00100000;
-	}
-
-	if (stat & 0x00200000) {
-		nve0_graph_trap_isr(dev, chid);
-		nv_wr32(dev, 0x400100, 0x00200000);
-		stat &= ~0x00200000;
-	}
-
-	if (stat & 0x00080000) {
-		nve0_graph_ctxctl_isr(dev);
-		nv_wr32(dev, 0x400100, 0x00080000);
-		stat &= ~0x00080000;
-	}
-
-	if (stat) {
-		NV_INFO(dev, "PGRAPH: unknown stat 0x%08x\n", stat);
-		nv_wr32(dev, 0x400100, stat);
-	}
-
-	nv_wr32(dev, 0x400500, 0x00010001);
-}
-
-static int
-nve0_graph_create_fw(struct drm_device *dev, const char *fwname,
-		     struct nve0_graph_fuc *fuc)
-{
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	const struct firmware *fw;
-	char f[32];
-	int ret;
-
-	snprintf(f, sizeof(f), "nouveau/nv%02x_%s", dev_priv->chipset, fwname);
-	ret = request_firmware(&fw, f, &dev->pdev->dev);
-	if (ret)
-		return ret;
-
-	fuc->size = fw->size;
-	fuc->data = kmemdup(fw->data, fuc->size, GFP_KERNEL);
-	release_firmware(fw);
-	return (fuc->data != NULL) ? 0 : -ENOMEM;
-}
-
-static void
-nve0_graph_destroy_fw(struct nve0_graph_fuc *fuc)
-{
-	if (fuc->data) {
-		kfree(fuc->data);
-		fuc->data = NULL;
-	}
-}
-
-static void
-nve0_graph_destroy(struct drm_device *dev, int engine)
-{
-	struct nve0_graph_priv *priv = nv_engine(dev, engine);
-
-	nve0_graph_destroy_fw(&priv->fuc409c);
-	nve0_graph_destroy_fw(&priv->fuc409d);
-	nve0_graph_destroy_fw(&priv->fuc41ac);
-	nve0_graph_destroy_fw(&priv->fuc41ad);
-
-	nouveau_irq_unregister(dev, 12);
-
-	nouveau_gpuobj_ref(NULL, &priv->unk4188b8);
-	nouveau_gpuobj_ref(NULL, &priv->unk4188b4);
-
-	if (priv->grctx_vals)
-		kfree(priv->grctx_vals);
-
-	NVOBJ_ENGINE_DEL(dev, GR);
-	kfree(priv);
-}
-
-int
-nve0_graph_create(struct drm_device *dev)
-{
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct nve0_graph_priv *priv;
-	int ret, gpc, i;
-	u32 kepler;
-
-	kepler = nve0_graph_class(dev);
-	if (!kepler) {
-		NV_ERROR(dev, "PGRAPH: unsupported chipset, please report!\n");
-		return 0;
-	}
-
-	priv = kzalloc(sizeof(*priv), GFP_KERNEL);
-	if (!priv)
-		return -ENOMEM;
-
-	priv->base.destroy = nve0_graph_destroy;
-	priv->base.init = nve0_graph_init;
-	priv->base.fini = nve0_graph_fini;
-	priv->base.context_new = nve0_graph_context_new;
-	priv->base.context_del = nve0_graph_context_del;
-	priv->base.object_new = nve0_graph_object_new;
-
-	NVOBJ_ENGINE_ADD(dev, GR, &priv->base);
-	nouveau_irq_register(dev, 12, nve0_graph_isr);
-
-	NV_INFO(dev, "PGRAPH: using external firmware\n");
-	if (nve0_graph_create_fw(dev, "fuc409c", &priv->fuc409c) ||
-	    nve0_graph_create_fw(dev, "fuc409d", &priv->fuc409d) ||
-	    nve0_graph_create_fw(dev, "fuc41ac", &priv->fuc41ac) ||
-	    nve0_graph_create_fw(dev, "fuc41ad", &priv->fuc41ad)) {
-		ret = 0;
-		goto error;
-	}
-
-	ret = nouveau_gpuobj_new(dev, NULL, 0x1000, 256, 0, &priv->unk4188b4);
-	if (ret)
-		goto error;
-
-	ret = nouveau_gpuobj_new(dev, NULL, 0x1000, 256, 0, &priv->unk4188b8);
-	if (ret)
-		goto error;
-
-	for (i = 0; i < 0x1000; i += 4) {
-		nv_wo32(priv->unk4188b4, i, 0x00000010);
-		nv_wo32(priv->unk4188b8, i, 0x00000010);
-	}
-
-	priv->gpc_nr  =  nv_rd32(dev, 0x409604) & 0x0000001f;
-	priv->rop_nr = (nv_rd32(dev, 0x409604) & 0x001f0000) >> 16;
-	for (gpc = 0; gpc < priv->gpc_nr; gpc++) {
-		priv->tpc_nr[gpc] = nv_rd32(dev, GPC_UNIT(gpc, 0x2608));
-		priv->tpc_total += priv->tpc_nr[gpc];
-	}
-
-	switch (dev_priv->chipset) {
-	case 0xe4:
-		if (priv->tpc_total == 8)
-			priv->magic_not_rop_nr = 3;
-		else
-		if (priv->tpc_total == 7)
-			priv->magic_not_rop_nr = 1;
-		break;
-	case 0xe7:
-		priv->magic_not_rop_nr = 1;
-		break;
-	default:
-		break;
-	}
-
-	if (!priv->magic_not_rop_nr) {
-		NV_ERROR(dev, "PGRAPH: unknown config: %d/%d/%d/%d, %d\n",
-			 priv->tpc_nr[0], priv->tpc_nr[1], priv->tpc_nr[2],
-			 priv->tpc_nr[3], priv->rop_nr);
-		priv->magic_not_rop_nr = 0x00;
-	}
-
-	NVOBJ_CLASS(dev, 0xa097, GR); /* subc 0: 3D */
-	NVOBJ_CLASS(dev, 0xa0c0, GR); /* subc 1: COMPUTE */
-	NVOBJ_CLASS(dev, 0xa040, GR); /* subc 2: P2MF */
-	NVOBJ_CLASS(dev, 0x902d, GR); /* subc 3: 2D */
-	NVOBJ_CLASS(dev, 0xa0b5, GR); /* subc 4: COPY */
-	return 0;
-
-error:
-	nve0_graph_destroy(dev, NVOBJ_ENGINE_GR);
-	return ret;
-}
diff --git a/drivers/gpu/drm/nouveau/nve0_graph.h b/drivers/gpu/drm/nouveau/nve0_graph.h
deleted file mode 100644
index 2ba70449ba01..000000000000
--- a/drivers/gpu/drm/nouveau/nve0_graph.h
+++ /dev/null
@@ -1,89 +0,0 @@
-/*
- * Copyright 2012 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Ben Skeggs
- */
-
-#ifndef __NVE0_GRAPH_H__
-#define __NVE0_GRAPH_H__
-
-#define GPC_MAX 4
-#define TPC_MAX 32
-
-#define ROP_BCAST(r)     (0x408800 + (r))
-#define ROP_UNIT(u, r)   (0x410000 + (u) * 0x400 + (r))
-#define GPC_BCAST(r)     (0x418000 + (r))
-#define GPC_UNIT(t, r)   (0x500000 + (t) * 0x8000 + (r))
-#define TPC_UNIT(t, m, r) (0x504000 + (t) * 0x8000 + (m) * 0x800 + (r))
-
-struct nve0_graph_fuc {
-	u32 *data;
-	u32  size;
-};
-
-struct nve0_graph_priv {
-	struct nouveau_exec_engine base;
-
-	struct nve0_graph_fuc fuc409c;
-	struct nve0_graph_fuc fuc409d;
-	struct nve0_graph_fuc fuc41ac;
-	struct nve0_graph_fuc fuc41ad;
-
-	u8 gpc_nr;
-	u8 rop_nr;
-	u8 tpc_nr[GPC_MAX];
-	u8 tpc_total;
-
-	u32  grctx_size;
-	u32 *grctx_vals;
-	struct nouveau_gpuobj *unk4188b4;
-	struct nouveau_gpuobj *unk4188b8;
-
-	u8 magic_not_rop_nr;
-};
-
-struct nve0_graph_chan {
-	struct nouveau_gpuobj *grctx;
-	struct nouveau_gpuobj *unk408004; /* 0x418810 too */
-	struct nouveau_gpuobj *unk40800c; /* 0x419004 too */
-	struct nouveau_gpuobj *unk418810; /* 0x419848 too */
-	struct nouveau_gpuobj *mmio;
-	int mmio_nr;
-};
-
-int nve0_grctx_generate(struct nouveau_channel *);
-
-/* nve0_graph.c uses this also to determine supported chipsets */
-static inline u32
-nve0_graph_class(struct drm_device *dev)
-{
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-
-	switch (dev_priv->chipset) {
-	case 0xe4:
-	case 0xe7:
-		return 0xa097;
-	default:
-		return 0;
-	}
-}
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/nve0_grctx.c b/drivers/gpu/drm/nouveau/nve0_grctx.c
deleted file mode 100644
index d3a802987972..000000000000
--- a/drivers/gpu/drm/nouveau/nve0_grctx.c
+++ /dev/null
@@ -1,2777 +0,0 @@
-/*
- * Copyright 2010 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Ben Skeggs
- */
-
-#include <drm/drmP.h>
-#include "nouveau_drv.h"
-#include "nouveau_mm.h"
-#include "nve0_graph.h"
-
-static void
-nv_icmd(struct drm_device *dev, u32 icmd, u32 data)
-{
-	nv_wr32(dev, 0x400204, data);
-	nv_wr32(dev, 0x400200, icmd);
-	while (nv_rd32(dev, 0x400700) & 0x00000002) {}
-}
-
-static void
-nve0_grctx_generate_icmd(struct drm_device *dev)
-{
-	nv_wr32(dev, 0x400208, 0x80000000);
-	nv_icmd(dev, 0x001000, 0x00000004);
-	nv_icmd(dev, 0x000039, 0x00000000);
-	nv_icmd(dev, 0x00003a, 0x00000000);
-	nv_icmd(dev, 0x00003b, 0x00000000);
-	nv_icmd(dev, 0x0000a9, 0x0000ffff);
-	nv_icmd(dev, 0x000038, 0x0fac6881);
-	nv_icmd(dev, 0x00003d, 0x00000001);
-	nv_icmd(dev, 0x0000e8, 0x00000400);
-	nv_icmd(dev, 0x0000e9, 0x00000400);
-	nv_icmd(dev, 0x0000ea, 0x00000400);
-	nv_icmd(dev, 0x0000eb, 0x00000400);
-	nv_icmd(dev, 0x0000ec, 0x00000400);
-	nv_icmd(dev, 0x0000ed, 0x00000400);
-	nv_icmd(dev, 0x0000ee, 0x00000400);
-	nv_icmd(dev, 0x0000ef, 0x00000400);
-	nv_icmd(dev, 0x000078, 0x00000300);
-	nv_icmd(dev, 0x000079, 0x00000300);
-	nv_icmd(dev, 0x00007a, 0x00000300);
-	nv_icmd(dev, 0x00007b, 0x00000300);
-	nv_icmd(dev, 0x00007c, 0x00000300);
-	nv_icmd(dev, 0x00007d, 0x00000300);
-	nv_icmd(dev, 0x00007e, 0x00000300);
-	nv_icmd(dev, 0x00007f, 0x00000300);
-	nv_icmd(dev, 0x000050, 0x00000011);
-	nv_icmd(dev, 0x000058, 0x00000008);
-	nv_icmd(dev, 0x000059, 0x00000008);
-	nv_icmd(dev, 0x00005a, 0x00000008);
-	nv_icmd(dev, 0x00005b, 0x00000008);
-	nv_icmd(dev, 0x00005c, 0x00000008);
-	nv_icmd(dev, 0x00005d, 0x00000008);
-	nv_icmd(dev, 0x00005e, 0x00000008);
-	nv_icmd(dev, 0x00005f, 0x00000008);
-	nv_icmd(dev, 0x000208, 0x00000001);
-	nv_icmd(dev, 0x000209, 0x00000001);
-	nv_icmd(dev, 0x00020a, 0x00000001);
-	nv_icmd(dev, 0x00020b, 0x00000001);
-	nv_icmd(dev, 0x00020c, 0x00000001);
-	nv_icmd(dev, 0x00020d, 0x00000001);
-	nv_icmd(dev, 0x00020e, 0x00000001);
-	nv_icmd(dev, 0x00020f, 0x00000001);
-	nv_icmd(dev, 0x000081, 0x00000001);
-	nv_icmd(dev, 0x000085, 0x00000004);
-	nv_icmd(dev, 0x000088, 0x00000400);
-	nv_icmd(dev, 0x000090, 0x00000300);
-	nv_icmd(dev, 0x000098, 0x00001001);
-	nv_icmd(dev, 0x0000e3, 0x00000001);
-	nv_icmd(dev, 0x0000da, 0x00000001);
-	nv_icmd(dev, 0x0000f8, 0x00000003);
-	nv_icmd(dev, 0x0000fa, 0x00000001);
-	nv_icmd(dev, 0x00009f, 0x0000ffff);
-	nv_icmd(dev, 0x0000a0, 0x0000ffff);
-	nv_icmd(dev, 0x0000a1, 0x0000ffff);
-	nv_icmd(dev, 0x0000a2, 0x0000ffff);
-	nv_icmd(dev, 0x0000b1, 0x00000001);
-	nv_icmd(dev, 0x0000ad, 0x0000013e);
-	nv_icmd(dev, 0x0000e1, 0x00000010);
-	nv_icmd(dev, 0x000290, 0x00000000);
-	nv_icmd(dev, 0x000291, 0x00000000);
-	nv_icmd(dev, 0x000292, 0x00000000);
-	nv_icmd(dev, 0x000293, 0x00000000);
-	nv_icmd(dev, 0x000294, 0x00000000);
-	nv_icmd(dev, 0x000295, 0x00000000);
-	nv_icmd(dev, 0x000296, 0x00000000);
-	nv_icmd(dev, 0x000297, 0x00000000);
-	nv_icmd(dev, 0x000298, 0x00000000);
-	nv_icmd(dev, 0x000299, 0x00000000);
-	nv_icmd(dev, 0x00029a, 0x00000000);
-	nv_icmd(dev, 0x00029b, 0x00000000);
-	nv_icmd(dev, 0x00029c, 0x00000000);
-	nv_icmd(dev, 0x00029d, 0x00000000);
-	nv_icmd(dev, 0x00029e, 0x00000000);
-	nv_icmd(dev, 0x00029f, 0x00000000);
-	nv_icmd(dev, 0x0003b0, 0x00000000);
-	nv_icmd(dev, 0x0003b1, 0x00000000);
-	nv_icmd(dev, 0x0003b2, 0x00000000);
-	nv_icmd(dev, 0x0003b3, 0x00000000);
-	nv_icmd(dev, 0x0003b4, 0x00000000);
-	nv_icmd(dev, 0x0003b5, 0x00000000);
-	nv_icmd(dev, 0x0003b6, 0x00000000);
-	nv_icmd(dev, 0x0003b7, 0x00000000);
-	nv_icmd(dev, 0x0003b8, 0x00000000);
-	nv_icmd(dev, 0x0003b9, 0x00000000);
-	nv_icmd(dev, 0x0003ba, 0x00000000);
-	nv_icmd(dev, 0x0003bb, 0x00000000);
-	nv_icmd(dev, 0x0003bc, 0x00000000);
-	nv_icmd(dev, 0x0003bd, 0x00000000);
-	nv_icmd(dev, 0x0003be, 0x00000000);
-	nv_icmd(dev, 0x0003bf, 0x00000000);
-	nv_icmd(dev, 0x0002a0, 0x00000000);
-	nv_icmd(dev, 0x0002a1, 0x00000000);
-	nv_icmd(dev, 0x0002a2, 0x00000000);
-	nv_icmd(dev, 0x0002a3, 0x00000000);
-	nv_icmd(dev, 0x0002a4, 0x00000000);
-	nv_icmd(dev, 0x0002a5, 0x00000000);
-	nv_icmd(dev, 0x0002a6, 0x00000000);
-	nv_icmd(dev, 0x0002a7, 0x00000000);
-	nv_icmd(dev, 0x0002a8, 0x00000000);
-	nv_icmd(dev, 0x0002a9, 0x00000000);
-	nv_icmd(dev, 0x0002aa, 0x00000000);
-	nv_icmd(dev, 0x0002ab, 0x00000000);
-	nv_icmd(dev, 0x0002ac, 0x00000000);
-	nv_icmd(dev, 0x0002ad, 0x00000000);
-	nv_icmd(dev, 0x0002ae, 0x00000000);
-	nv_icmd(dev, 0x0002af, 0x00000000);
-	nv_icmd(dev, 0x000420, 0x00000000);
-	nv_icmd(dev, 0x000421, 0x00000000);
-	nv_icmd(dev, 0x000422, 0x00000000);
-	nv_icmd(dev, 0x000423, 0x00000000);
-	nv_icmd(dev, 0x000424, 0x00000000);
-	nv_icmd(dev, 0x000425, 0x00000000);
-	nv_icmd(dev, 0x000426, 0x00000000);
-	nv_icmd(dev, 0x000427, 0x00000000);
-	nv_icmd(dev, 0x000428, 0x00000000);
-	nv_icmd(dev, 0x000429, 0x00000000);
-	nv_icmd(dev, 0x00042a, 0x00000000);
-	nv_icmd(dev, 0x00042b, 0x00000000);
-	nv_icmd(dev, 0x00042c, 0x00000000);
-	nv_icmd(dev, 0x00042d, 0x00000000);
-	nv_icmd(dev, 0x00042e, 0x00000000);
-	nv_icmd(dev, 0x00042f, 0x00000000);
-	nv_icmd(dev, 0x0002b0, 0x00000000);
-	nv_icmd(dev, 0x0002b1, 0x00000000);
-	nv_icmd(dev, 0x0002b2, 0x00000000);
-	nv_icmd(dev, 0x0002b3, 0x00000000);
-	nv_icmd(dev, 0x0002b4, 0x00000000);
-	nv_icmd(dev, 0x0002b5, 0x00000000);
-	nv_icmd(dev, 0x0002b6, 0x00000000);
-	nv_icmd(dev, 0x0002b7, 0x00000000);
-	nv_icmd(dev, 0x0002b8, 0x00000000);
-	nv_icmd(dev, 0x0002b9, 0x00000000);
-	nv_icmd(dev, 0x0002ba, 0x00000000);
-	nv_icmd(dev, 0x0002bb, 0x00000000);
-	nv_icmd(dev, 0x0002bc, 0x00000000);
-	nv_icmd(dev, 0x0002bd, 0x00000000);
-	nv_icmd(dev, 0x0002be, 0x00000000);
-	nv_icmd(dev, 0x0002bf, 0x00000000);
-	nv_icmd(dev, 0x000430, 0x00000000);
-	nv_icmd(dev, 0x000431, 0x00000000);
-	nv_icmd(dev, 0x000432, 0x00000000);
-	nv_icmd(dev, 0x000433, 0x00000000);
-	nv_icmd(dev, 0x000434, 0x00000000);
-	nv_icmd(dev, 0x000435, 0x00000000);
-	nv_icmd(dev, 0x000436, 0x00000000);
-	nv_icmd(dev, 0x000437, 0x00000000);
-	nv_icmd(dev, 0x000438, 0x00000000);
-	nv_icmd(dev, 0x000439, 0x00000000);
-	nv_icmd(dev, 0x00043a, 0x00000000);
-	nv_icmd(dev, 0x00043b, 0x00000000);
-	nv_icmd(dev, 0x00043c, 0x00000000);
-	nv_icmd(dev, 0x00043d, 0x00000000);
-	nv_icmd(dev, 0x00043e, 0x00000000);
-	nv_icmd(dev, 0x00043f, 0x00000000);
-	nv_icmd(dev, 0x0002c0, 0x00000000);
-	nv_icmd(dev, 0x0002c1, 0x00000000);
-	nv_icmd(dev, 0x0002c2, 0x00000000);
-	nv_icmd(dev, 0x0002c3, 0x00000000);
-	nv_icmd(dev, 0x0002c4, 0x00000000);
-	nv_icmd(dev, 0x0002c5, 0x00000000);
-	nv_icmd(dev, 0x0002c6, 0x00000000);
-	nv_icmd(dev, 0x0002c7, 0x00000000);
-	nv_icmd(dev, 0x0002c8, 0x00000000);
-	nv_icmd(dev, 0x0002c9, 0x00000000);
-	nv_icmd(dev, 0x0002ca, 0x00000000);
-	nv_icmd(dev, 0x0002cb, 0x00000000);
-	nv_icmd(dev, 0x0002cc, 0x00000000);
-	nv_icmd(dev, 0x0002cd, 0x00000000);
-	nv_icmd(dev, 0x0002ce, 0x00000000);
-	nv_icmd(dev, 0x0002cf, 0x00000000);
-	nv_icmd(dev, 0x0004d0, 0x00000000);
-	nv_icmd(dev, 0x0004d1, 0x00000000);
-	nv_icmd(dev, 0x0004d2, 0x00000000);
-	nv_icmd(dev, 0x0004d3, 0x00000000);
-	nv_icmd(dev, 0x0004d4, 0x00000000);
-	nv_icmd(dev, 0x0004d5, 0x00000000);
-	nv_icmd(dev, 0x0004d6, 0x00000000);
-	nv_icmd(dev, 0x0004d7, 0x00000000);
-	nv_icmd(dev, 0x0004d8, 0x00000000);
-	nv_icmd(dev, 0x0004d9, 0x00000000);
-	nv_icmd(dev, 0x0004da, 0x00000000);
-	nv_icmd(dev, 0x0004db, 0x00000000);
-	nv_icmd(dev, 0x0004dc, 0x00000000);
-	nv_icmd(dev, 0x0004dd, 0x00000000);
-	nv_icmd(dev, 0x0004de, 0x00000000);
-	nv_icmd(dev, 0x0004df, 0x00000000);
-	nv_icmd(dev, 0x000720, 0x00000000);
-	nv_icmd(dev, 0x000721, 0x00000000);
-	nv_icmd(dev, 0x000722, 0x00000000);
-	nv_icmd(dev, 0x000723, 0x00000000);
-	nv_icmd(dev, 0x000724, 0x00000000);
-	nv_icmd(dev, 0x000725, 0x00000000);
-	nv_icmd(dev, 0x000726, 0x00000000);
-	nv_icmd(dev, 0x000727, 0x00000000);
-	nv_icmd(dev, 0x000728, 0x00000000);
-	nv_icmd(dev, 0x000729, 0x00000000);
-	nv_icmd(dev, 0x00072a, 0x00000000);
-	nv_icmd(dev, 0x00072b, 0x00000000);
-	nv_icmd(dev, 0x00072c, 0x00000000);
-	nv_icmd(dev, 0x00072d, 0x00000000);
-	nv_icmd(dev, 0x00072e, 0x00000000);
-	nv_icmd(dev, 0x00072f, 0x00000000);
-	nv_icmd(dev, 0x0008c0, 0x00000000);
-	nv_icmd(dev, 0x0008c1, 0x00000000);
-	nv_icmd(dev, 0x0008c2, 0x00000000);
-	nv_icmd(dev, 0x0008c3, 0x00000000);
-	nv_icmd(dev, 0x0008c4, 0x00000000);
-	nv_icmd(dev, 0x0008c5, 0x00000000);
-	nv_icmd(dev, 0x0008c6, 0x00000000);
-	nv_icmd(dev, 0x0008c7, 0x00000000);
-	nv_icmd(dev, 0x0008c8, 0x00000000);
-	nv_icmd(dev, 0x0008c9, 0x00000000);
-	nv_icmd(dev, 0x0008ca, 0x00000000);
-	nv_icmd(dev, 0x0008cb, 0x00000000);
-	nv_icmd(dev, 0x0008cc, 0x00000000);
-	nv_icmd(dev, 0x0008cd, 0x00000000);
-	nv_icmd(dev, 0x0008ce, 0x00000000);
-	nv_icmd(dev, 0x0008cf, 0x00000000);
-	nv_icmd(dev, 0x000890, 0x00000000);
-	nv_icmd(dev, 0x000891, 0x00000000);
-	nv_icmd(dev, 0x000892, 0x00000000);
-	nv_icmd(dev, 0x000893, 0x00000000);
-	nv_icmd(dev, 0x000894, 0x00000000);
-	nv_icmd(dev, 0x000895, 0x00000000);
-	nv_icmd(dev, 0x000896, 0x00000000);
-	nv_icmd(dev, 0x000897, 0x00000000);
-	nv_icmd(dev, 0x000898, 0x00000000);
-	nv_icmd(dev, 0x000899, 0x00000000);
-	nv_icmd(dev, 0x00089a, 0x00000000);
-	nv_icmd(dev, 0x00089b, 0x00000000);
-	nv_icmd(dev, 0x00089c, 0x00000000);
-	nv_icmd(dev, 0x00089d, 0x00000000);
-	nv_icmd(dev, 0x00089e, 0x00000000);
-	nv_icmd(dev, 0x00089f, 0x00000000);
-	nv_icmd(dev, 0x0008e0, 0x00000000);
-	nv_icmd(dev, 0x0008e1, 0x00000000);
-	nv_icmd(dev, 0x0008e2, 0x00000000);
-	nv_icmd(dev, 0x0008e3, 0x00000000);
-	nv_icmd(dev, 0x0008e4, 0x00000000);
-	nv_icmd(dev, 0x0008e5, 0x00000000);
-	nv_icmd(dev, 0x0008e6, 0x00000000);
-	nv_icmd(dev, 0x0008e7, 0x00000000);
-	nv_icmd(dev, 0x0008e8, 0x00000000);
-	nv_icmd(dev, 0x0008e9, 0x00000000);
-	nv_icmd(dev, 0x0008ea, 0x00000000);
-	nv_icmd(dev, 0x0008eb, 0x00000000);
-	nv_icmd(dev, 0x0008ec, 0x00000000);
-	nv_icmd(dev, 0x0008ed, 0x00000000);
-	nv_icmd(dev, 0x0008ee, 0x00000000);
-	nv_icmd(dev, 0x0008ef, 0x00000000);
-	nv_icmd(dev, 0x0008a0, 0x00000000);
-	nv_icmd(dev, 0x0008a1, 0x00000000);
-	nv_icmd(dev, 0x0008a2, 0x00000000);
-	nv_icmd(dev, 0x0008a3, 0x00000000);
-	nv_icmd(dev, 0x0008a4, 0x00000000);
-	nv_icmd(dev, 0x0008a5, 0x00000000);
-	nv_icmd(dev, 0x0008a6, 0x00000000);
-	nv_icmd(dev, 0x0008a7, 0x00000000);
-	nv_icmd(dev, 0x0008a8, 0x00000000);
-	nv_icmd(dev, 0x0008a9, 0x00000000);
-	nv_icmd(dev, 0x0008aa, 0x00000000);
-	nv_icmd(dev, 0x0008ab, 0x00000000);
-	nv_icmd(dev, 0x0008ac, 0x00000000);
-	nv_icmd(dev, 0x0008ad, 0x00000000);
-	nv_icmd(dev, 0x0008ae, 0x00000000);
-	nv_icmd(dev, 0x0008af, 0x00000000);
-	nv_icmd(dev, 0x0008f0, 0x00000000);
-	nv_icmd(dev, 0x0008f1, 0x00000000);
-	nv_icmd(dev, 0x0008f2, 0x00000000);
-	nv_icmd(dev, 0x0008f3, 0x00000000);
-	nv_icmd(dev, 0x0008f4, 0x00000000);
-	nv_icmd(dev, 0x0008f5, 0x00000000);
-	nv_icmd(dev, 0x0008f6, 0x00000000);
-	nv_icmd(dev, 0x0008f7, 0x00000000);
-	nv_icmd(dev, 0x0008f8, 0x00000000);
-	nv_icmd(dev, 0x0008f9, 0x00000000);
-	nv_icmd(dev, 0x0008fa, 0x00000000);
-	nv_icmd(dev, 0x0008fb, 0x00000000);
-	nv_icmd(dev, 0x0008fc, 0x00000000);
-	nv_icmd(dev, 0x0008fd, 0x00000000);
-	nv_icmd(dev, 0x0008fe, 0x00000000);
-	nv_icmd(dev, 0x0008ff, 0x00000000);
-	nv_icmd(dev, 0x00094c, 0x000000ff);
-	nv_icmd(dev, 0x00094d, 0xffffffff);
-	nv_icmd(dev, 0x00094e, 0x00000002);
-	nv_icmd(dev, 0x0002ec, 0x00000001);
-	nv_icmd(dev, 0x000303, 0x00000001);
-	nv_icmd(dev, 0x0002e6, 0x00000001);
-	nv_icmd(dev, 0x000466, 0x00000052);
-	nv_icmd(dev, 0x000301, 0x3f800000);
-	nv_icmd(dev, 0x000304, 0x30201000);
-	nv_icmd(dev, 0x000305, 0x70605040);
-	nv_icmd(dev, 0x000306, 0xb8a89888);
-	nv_icmd(dev, 0x000307, 0xf8e8d8c8);
-	nv_icmd(dev, 0x00030a, 0x00ffff00);
-	nv_icmd(dev, 0x00030b, 0x0000001a);
-	nv_icmd(dev, 0x00030c, 0x00000001);
-	nv_icmd(dev, 0x000318, 0x00000001);
-	nv_icmd(dev, 0x000340, 0x00000000);
-	nv_icmd(dev, 0x000375, 0x00000001);
-	nv_icmd(dev, 0x00037d, 0x00000006);
-	nv_icmd(dev, 0x0003a0, 0x00000002);
-	nv_icmd(dev, 0x0003aa, 0x00000001);
-	nv_icmd(dev, 0x0003a9, 0x00000001);
-	nv_icmd(dev, 0x000380, 0x00000001);
-	nv_icmd(dev, 0x000383, 0x00000011);
-	nv_icmd(dev, 0x000360, 0x00000040);
-	nv_icmd(dev, 0x000366, 0x00000000);
-	nv_icmd(dev, 0x000367, 0x00000000);
-	nv_icmd(dev, 0x000368, 0x00000fff);
-	nv_icmd(dev, 0x000370, 0x00000000);
-	nv_icmd(dev, 0x000371, 0x00000000);
-	nv_icmd(dev, 0x000372, 0x000fffff);
-	nv_icmd(dev, 0x00037a, 0x00000012);
-	nv_icmd(dev, 0x000619, 0x00000003);
-	nv_icmd(dev, 0x000811, 0x00000003);
-	nv_icmd(dev, 0x000812, 0x00000004);
-	nv_icmd(dev, 0x000813, 0x00000006);
-	nv_icmd(dev, 0x000814, 0x00000008);
-	nv_icmd(dev, 0x000815, 0x0000000b);
-	nv_icmd(dev, 0x000800, 0x00000001);
-	nv_icmd(dev, 0x000801, 0x00000001);
-	nv_icmd(dev, 0x000802, 0x00000001);
-	nv_icmd(dev, 0x000803, 0x00000001);
-	nv_icmd(dev, 0x000804, 0x00000001);
-	nv_icmd(dev, 0x000805, 0x00000001);
-	nv_icmd(dev, 0x000632, 0x00000001);
-	nv_icmd(dev, 0x000633, 0x00000002);
-	nv_icmd(dev, 0x000634, 0x00000003);
-	nv_icmd(dev, 0x000635, 0x00000004);
-	nv_icmd(dev, 0x000654, 0x3f800000);
-	nv_icmd(dev, 0x000657, 0x3f800000);
-	nv_icmd(dev, 0x000655, 0x3f800000);
-	nv_icmd(dev, 0x000656, 0x3f800000);
-	nv_icmd(dev, 0x0006cd, 0x3f800000);
-	nv_icmd(dev, 0x0007f5, 0x3f800000);
-	nv_icmd(dev, 0x0007dc, 0x39291909);
-	nv_icmd(dev, 0x0007dd, 0x79695949);
-	nv_icmd(dev, 0x0007de, 0xb9a99989);
-	nv_icmd(dev, 0x0007df, 0xf9e9d9c9);
-	nv_icmd(dev, 0x0007e8, 0x00003210);
-	nv_icmd(dev, 0x0007e9, 0x00007654);
-	nv_icmd(dev, 0x0007ea, 0x00000098);
-	nv_icmd(dev, 0x0007ec, 0x39291909);
-	nv_icmd(dev, 0x0007ed, 0x79695949);
-	nv_icmd(dev, 0x0007ee, 0xb9a99989);
-	nv_icmd(dev, 0x0007ef, 0xf9e9d9c9);
-	nv_icmd(dev, 0x0007f0, 0x00003210);
-	nv_icmd(dev, 0x0007f1, 0x00007654);
-	nv_icmd(dev, 0x0007f2, 0x00000098);
-	nv_icmd(dev, 0x0005a5, 0x00000001);
-	nv_icmd(dev, 0x000980, 0x00000000);
-	nv_icmd(dev, 0x000981, 0x00000000);
-	nv_icmd(dev, 0x000982, 0x00000000);
-	nv_icmd(dev, 0x000983, 0x00000000);
-	nv_icmd(dev, 0x000984, 0x00000000);
-	nv_icmd(dev, 0x000985, 0x00000000);
-	nv_icmd(dev, 0x000986, 0x00000000);
-	nv_icmd(dev, 0x000987, 0x00000000);
-	nv_icmd(dev, 0x000988, 0x00000000);
-	nv_icmd(dev, 0x000989, 0x00000000);
-	nv_icmd(dev, 0x00098a, 0x00000000);
-	nv_icmd(dev, 0x00098b, 0x00000000);
-	nv_icmd(dev, 0x00098c, 0x00000000);
-	nv_icmd(dev, 0x00098d, 0x00000000);
-	nv_icmd(dev, 0x00098e, 0x00000000);
-	nv_icmd(dev, 0x00098f, 0x00000000);
-	nv_icmd(dev, 0x000990, 0x00000000);
-	nv_icmd(dev, 0x000991, 0x00000000);
-	nv_icmd(dev, 0x000992, 0x00000000);
-	nv_icmd(dev, 0x000993, 0x00000000);
-	nv_icmd(dev, 0x000994, 0x00000000);
-	nv_icmd(dev, 0x000995, 0x00000000);
-	nv_icmd(dev, 0x000996, 0x00000000);
-	nv_icmd(dev, 0x000997, 0x00000000);
-	nv_icmd(dev, 0x000998, 0x00000000);
-	nv_icmd(dev, 0x000999, 0x00000000);
-	nv_icmd(dev, 0x00099a, 0x00000000);
-	nv_icmd(dev, 0x00099b, 0x00000000);
-	nv_icmd(dev, 0x00099c, 0x00000000);
-	nv_icmd(dev, 0x00099d, 0x00000000);
-	nv_icmd(dev, 0x00099e, 0x00000000);
-	nv_icmd(dev, 0x00099f, 0x00000000);
-	nv_icmd(dev, 0x0009a0, 0x00000000);
-	nv_icmd(dev, 0x0009a1, 0x00000000);
-	nv_icmd(dev, 0x0009a2, 0x00000000);
-	nv_icmd(dev, 0x0009a3, 0x00000000);
-	nv_icmd(dev, 0x0009a4, 0x00000000);
-	nv_icmd(dev, 0x0009a5, 0x00000000);
-	nv_icmd(dev, 0x0009a6, 0x00000000);
-	nv_icmd(dev, 0x0009a7, 0x00000000);
-	nv_icmd(dev, 0x0009a8, 0x00000000);
-	nv_icmd(dev, 0x0009a9, 0x00000000);
-	nv_icmd(dev, 0x0009aa, 0x00000000);
-	nv_icmd(dev, 0x0009ab, 0x00000000);
-	nv_icmd(dev, 0x0009ac, 0x00000000);
-	nv_icmd(dev, 0x0009ad, 0x00000000);
-	nv_icmd(dev, 0x0009ae, 0x00000000);
-	nv_icmd(dev, 0x0009af, 0x00000000);
-	nv_icmd(dev, 0x0009b0, 0x00000000);
-	nv_icmd(dev, 0x0009b1, 0x00000000);
-	nv_icmd(dev, 0x0009b2, 0x00000000);
-	nv_icmd(dev, 0x0009b3, 0x00000000);
-	nv_icmd(dev, 0x0009b4, 0x00000000);
-	nv_icmd(dev, 0x0009b5, 0x00000000);
-	nv_icmd(dev, 0x0009b6, 0x00000000);
-	nv_icmd(dev, 0x0009b7, 0x00000000);
-	nv_icmd(dev, 0x0009b8, 0x00000000);
-	nv_icmd(dev, 0x0009b9, 0x00000000);
-	nv_icmd(dev, 0x0009ba, 0x00000000);
-	nv_icmd(dev, 0x0009bb, 0x00000000);
-	nv_icmd(dev, 0x0009bc, 0x00000000);
-	nv_icmd(dev, 0x0009bd, 0x00000000);
-	nv_icmd(dev, 0x0009be, 0x00000000);
-	nv_icmd(dev, 0x0009bf, 0x00000000);
-	nv_icmd(dev, 0x0009c0, 0x00000000);
-	nv_icmd(dev, 0x0009c1, 0x00000000);
-	nv_icmd(dev, 0x0009c2, 0x00000000);
-	nv_icmd(dev, 0x0009c3, 0x00000000);
-	nv_icmd(dev, 0x0009c4, 0x00000000);
-	nv_icmd(dev, 0x0009c5, 0x00000000);
-	nv_icmd(dev, 0x0009c6, 0x00000000);
-	nv_icmd(dev, 0x0009c7, 0x00000000);
-	nv_icmd(dev, 0x0009c8, 0x00000000);
-	nv_icmd(dev, 0x0009c9, 0x00000000);
-	nv_icmd(dev, 0x0009ca, 0x00000000);
-	nv_icmd(dev, 0x0009cb, 0x00000000);
-	nv_icmd(dev, 0x0009cc, 0x00000000);
-	nv_icmd(dev, 0x0009cd, 0x00000000);
-	nv_icmd(dev, 0x0009ce, 0x00000000);
-	nv_icmd(dev, 0x0009cf, 0x00000000);
-	nv_icmd(dev, 0x0009d0, 0x00000000);
-	nv_icmd(dev, 0x0009d1, 0x00000000);
-	nv_icmd(dev, 0x0009d2, 0x00000000);
-	nv_icmd(dev, 0x0009d3, 0x00000000);
-	nv_icmd(dev, 0x0009d4, 0x00000000);
-	nv_icmd(dev, 0x0009d5, 0x00000000);
-	nv_icmd(dev, 0x0009d6, 0x00000000);
-	nv_icmd(dev, 0x0009d7, 0x00000000);
-	nv_icmd(dev, 0x0009d8, 0x00000000);
-	nv_icmd(dev, 0x0009d9, 0x00000000);
-	nv_icmd(dev, 0x0009da, 0x00000000);
-	nv_icmd(dev, 0x0009db, 0x00000000);
-	nv_icmd(dev, 0x0009dc, 0x00000000);
-	nv_icmd(dev, 0x0009dd, 0x00000000);
-	nv_icmd(dev, 0x0009de, 0x00000000);
-	nv_icmd(dev, 0x0009df, 0x00000000);
-	nv_icmd(dev, 0x0009e0, 0x00000000);
-	nv_icmd(dev, 0x0009e1, 0x00000000);
-	nv_icmd(dev, 0x0009e2, 0x00000000);
-	nv_icmd(dev, 0x0009e3, 0x00000000);
-	nv_icmd(dev, 0x0009e4, 0x00000000);
-	nv_icmd(dev, 0x0009e5, 0x00000000);
-	nv_icmd(dev, 0x0009e6, 0x00000000);
-	nv_icmd(dev, 0x0009e7, 0x00000000);
-	nv_icmd(dev, 0x0009e8, 0x00000000);
-	nv_icmd(dev, 0x0009e9, 0x00000000);
-	nv_icmd(dev, 0x0009ea, 0x00000000);
-	nv_icmd(dev, 0x0009eb, 0x00000000);
-	nv_icmd(dev, 0x0009ec, 0x00000000);
-	nv_icmd(dev, 0x0009ed, 0x00000000);
-	nv_icmd(dev, 0x0009ee, 0x00000000);
-	nv_icmd(dev, 0x0009ef, 0x00000000);
-	nv_icmd(dev, 0x0009f0, 0x00000000);
-	nv_icmd(dev, 0x0009f1, 0x00000000);
-	nv_icmd(dev, 0x0009f2, 0x00000000);
-	nv_icmd(dev, 0x0009f3, 0x00000000);
-	nv_icmd(dev, 0x0009f4, 0x00000000);
-	nv_icmd(dev, 0x0009f5, 0x00000000);
-	nv_icmd(dev, 0x0009f6, 0x00000000);
-	nv_icmd(dev, 0x0009f7, 0x00000000);
-	nv_icmd(dev, 0x0009f8, 0x00000000);
-	nv_icmd(dev, 0x0009f9, 0x00000000);
-	nv_icmd(dev, 0x0009fa, 0x00000000);
-	nv_icmd(dev, 0x0009fb, 0x00000000);
-	nv_icmd(dev, 0x0009fc, 0x00000000);
-	nv_icmd(dev, 0x0009fd, 0x00000000);
-	nv_icmd(dev, 0x0009fe, 0x00000000);
-	nv_icmd(dev, 0x0009ff, 0x00000000);
-	nv_icmd(dev, 0x000468, 0x00000004);
-	nv_icmd(dev, 0x00046c, 0x00000001);
-	nv_icmd(dev, 0x000470, 0x00000000);
-	nv_icmd(dev, 0x000471, 0x00000000);
-	nv_icmd(dev, 0x000472, 0x00000000);
-	nv_icmd(dev, 0x000473, 0x00000000);
-	nv_icmd(dev, 0x000474, 0x00000000);
-	nv_icmd(dev, 0x000475, 0x00000000);
-	nv_icmd(dev, 0x000476, 0x00000000);
-	nv_icmd(dev, 0x000477, 0x00000000);
-	nv_icmd(dev, 0x000478, 0x00000000);
-	nv_icmd(dev, 0x000479, 0x00000000);
-	nv_icmd(dev, 0x00047a, 0x00000000);
-	nv_icmd(dev, 0x00047b, 0x00000000);
-	nv_icmd(dev, 0x00047c, 0x00000000);
-	nv_icmd(dev, 0x00047d, 0x00000000);
-	nv_icmd(dev, 0x00047e, 0x00000000);
-	nv_icmd(dev, 0x00047f, 0x00000000);
-	nv_icmd(dev, 0x000480, 0x00000000);
-	nv_icmd(dev, 0x000481, 0x00000000);
-	nv_icmd(dev, 0x000482, 0x00000000);
-	nv_icmd(dev, 0x000483, 0x00000000);
-	nv_icmd(dev, 0x000484, 0x00000000);
-	nv_icmd(dev, 0x000485, 0x00000000);
-	nv_icmd(dev, 0x000486, 0x00000000);
-	nv_icmd(dev, 0x000487, 0x00000000);
-	nv_icmd(dev, 0x000488, 0x00000000);
-	nv_icmd(dev, 0x000489, 0x00000000);
-	nv_icmd(dev, 0x00048a, 0x00000000);
-	nv_icmd(dev, 0x00048b, 0x00000000);
-	nv_icmd(dev, 0x00048c, 0x00000000);
-	nv_icmd(dev, 0x00048d, 0x00000000);
-	nv_icmd(dev, 0x00048e, 0x00000000);
-	nv_icmd(dev, 0x00048f, 0x00000000);
-	nv_icmd(dev, 0x000490, 0x00000000);
-	nv_icmd(dev, 0x000491, 0x00000000);
-	nv_icmd(dev, 0x000492, 0x00000000);
-	nv_icmd(dev, 0x000493, 0x00000000);
-	nv_icmd(dev, 0x000494, 0x00000000);
-	nv_icmd(dev, 0x000495, 0x00000000);
-	nv_icmd(dev, 0x000496, 0x00000000);
-	nv_icmd(dev, 0x000497, 0x00000000);
-	nv_icmd(dev, 0x000498, 0x00000000);
-	nv_icmd(dev, 0x000499, 0x00000000);
-	nv_icmd(dev, 0x00049a, 0x00000000);
-	nv_icmd(dev, 0x00049b, 0x00000000);
-	nv_icmd(dev, 0x00049c, 0x00000000);
-	nv_icmd(dev, 0x00049d, 0x00000000);
-	nv_icmd(dev, 0x00049e, 0x00000000);
-	nv_icmd(dev, 0x00049f, 0x00000000);
-	nv_icmd(dev, 0x0004a0, 0x00000000);
-	nv_icmd(dev, 0x0004a1, 0x00000000);
-	nv_icmd(dev, 0x0004a2, 0x00000000);
-	nv_icmd(dev, 0x0004a3, 0x00000000);
-	nv_icmd(dev, 0x0004a4, 0x00000000);
-	nv_icmd(dev, 0x0004a5, 0x00000000);
-	nv_icmd(dev, 0x0004a6, 0x00000000);
-	nv_icmd(dev, 0x0004a7, 0x00000000);
-	nv_icmd(dev, 0x0004a8, 0x00000000);
-	nv_icmd(dev, 0x0004a9, 0x00000000);
-	nv_icmd(dev, 0x0004aa, 0x00000000);
-	nv_icmd(dev, 0x0004ab, 0x00000000);
-	nv_icmd(dev, 0x0004ac, 0x00000000);
-	nv_icmd(dev, 0x0004ad, 0x00000000);
-	nv_icmd(dev, 0x0004ae, 0x00000000);
-	nv_icmd(dev, 0x0004af, 0x00000000);
-	nv_icmd(dev, 0x0004b0, 0x00000000);
-	nv_icmd(dev, 0x0004b1, 0x00000000);
-	nv_icmd(dev, 0x0004b2, 0x00000000);
-	nv_icmd(dev, 0x0004b3, 0x00000000);
-	nv_icmd(dev, 0x0004b4, 0x00000000);
-	nv_icmd(dev, 0x0004b5, 0x00000000);
-	nv_icmd(dev, 0x0004b6, 0x00000000);
-	nv_icmd(dev, 0x0004b7, 0x00000000);
-	nv_icmd(dev, 0x0004b8, 0x00000000);
-	nv_icmd(dev, 0x0004b9, 0x00000000);
-	nv_icmd(dev, 0x0004ba, 0x00000000);
-	nv_icmd(dev, 0x0004bb, 0x00000000);
-	nv_icmd(dev, 0x0004bc, 0x00000000);
-	nv_icmd(dev, 0x0004bd, 0x00000000);
-	nv_icmd(dev, 0x0004be, 0x00000000);
-	nv_icmd(dev, 0x0004bf, 0x00000000);
-	nv_icmd(dev, 0x0004c0, 0x00000000);
-	nv_icmd(dev, 0x0004c1, 0x00000000);
-	nv_icmd(dev, 0x0004c2, 0x00000000);
-	nv_icmd(dev, 0x0004c3, 0x00000000);
-	nv_icmd(dev, 0x0004c4, 0x00000000);
-	nv_icmd(dev, 0x0004c5, 0x00000000);
-	nv_icmd(dev, 0x0004c6, 0x00000000);
-	nv_icmd(dev, 0x0004c7, 0x00000000);
-	nv_icmd(dev, 0x0004c8, 0x00000000);
-	nv_icmd(dev, 0x0004c9, 0x00000000);
-	nv_icmd(dev, 0x0004ca, 0x00000000);
-	nv_icmd(dev, 0x0004cb, 0x00000000);
-	nv_icmd(dev, 0x0004cc, 0x00000000);
-	nv_icmd(dev, 0x0004cd, 0x00000000);
-	nv_icmd(dev, 0x0004ce, 0x00000000);
-	nv_icmd(dev, 0x0004cf, 0x00000000);
-	nv_icmd(dev, 0x000510, 0x3f800000);
-	nv_icmd(dev, 0x000511, 0x3f800000);
-	nv_icmd(dev, 0x000512, 0x3f800000);
-	nv_icmd(dev, 0x000513, 0x3f800000);
-	nv_icmd(dev, 0x000514, 0x3f800000);
-	nv_icmd(dev, 0x000515, 0x3f800000);
-	nv_icmd(dev, 0x000516, 0x3f800000);
-	nv_icmd(dev, 0x000517, 0x3f800000);
-	nv_icmd(dev, 0x000518, 0x3f800000);
-	nv_icmd(dev, 0x000519, 0x3f800000);
-	nv_icmd(dev, 0x00051a, 0x3f800000);
-	nv_icmd(dev, 0x00051b, 0x3f800000);
-	nv_icmd(dev, 0x00051c, 0x3f800000);
-	nv_icmd(dev, 0x00051d, 0x3f800000);
-	nv_icmd(dev, 0x00051e, 0x3f800000);
-	nv_icmd(dev, 0x00051f, 0x3f800000);
-	nv_icmd(dev, 0x000520, 0x000002b6);
-	nv_icmd(dev, 0x000529, 0x00000001);
-	nv_icmd(dev, 0x000530, 0xffff0000);
-	nv_icmd(dev, 0x000531, 0xffff0000);
-	nv_icmd(dev, 0x000532, 0xffff0000);
-	nv_icmd(dev, 0x000533, 0xffff0000);
-	nv_icmd(dev, 0x000534, 0xffff0000);
-	nv_icmd(dev, 0x000535, 0xffff0000);
-	nv_icmd(dev, 0x000536, 0xffff0000);
-	nv_icmd(dev, 0x000537, 0xffff0000);
-	nv_icmd(dev, 0x000538, 0xffff0000);
-	nv_icmd(dev, 0x000539, 0xffff0000);
-	nv_icmd(dev, 0x00053a, 0xffff0000);
-	nv_icmd(dev, 0x00053b, 0xffff0000);
-	nv_icmd(dev, 0x00053c, 0xffff0000);
-	nv_icmd(dev, 0x00053d, 0xffff0000);
-	nv_icmd(dev, 0x00053e, 0xffff0000);
-	nv_icmd(dev, 0x00053f, 0xffff0000);
-	nv_icmd(dev, 0x000585, 0x0000003f);
-	nv_icmd(dev, 0x000576, 0x00000003);
-	nv_icmd(dev, 0x00057b, 0x00000059);
-	nv_icmd(dev, 0x000586, 0x00000040);
-	nv_icmd(dev, 0x000582, 0x00000080);
-	nv_icmd(dev, 0x000583, 0x00000080);
-	nv_icmd(dev, 0x0005c2, 0x00000001);
-	nv_icmd(dev, 0x000638, 0x00000001);
-	nv_icmd(dev, 0x000639, 0x00000001);
-	nv_icmd(dev, 0x00063a, 0x00000002);
-	nv_icmd(dev, 0x00063b, 0x00000001);
-	nv_icmd(dev, 0x00063c, 0x00000001);
-	nv_icmd(dev, 0x00063d, 0x00000002);
-	nv_icmd(dev, 0x00063e, 0x00000001);
-	nv_icmd(dev, 0x0008b8, 0x00000001);
-	nv_icmd(dev, 0x0008b9, 0x00000001);
-	nv_icmd(dev, 0x0008ba, 0x00000001);
-	nv_icmd(dev, 0x0008bb, 0x00000001);
-	nv_icmd(dev, 0x0008bc, 0x00000001);
-	nv_icmd(dev, 0x0008bd, 0x00000001);
-	nv_icmd(dev, 0x0008be, 0x00000001);
-	nv_icmd(dev, 0x0008bf, 0x00000001);
-	nv_icmd(dev, 0x000900, 0x00000001);
-	nv_icmd(dev, 0x000901, 0x00000001);
-	nv_icmd(dev, 0x000902, 0x00000001);
-	nv_icmd(dev, 0x000903, 0x00000001);
-	nv_icmd(dev, 0x000904, 0x00000001);
-	nv_icmd(dev, 0x000905, 0x00000001);
-	nv_icmd(dev, 0x000906, 0x00000001);
-	nv_icmd(dev, 0x000907, 0x00000001);
-	nv_icmd(dev, 0x000908, 0x00000002);
-	nv_icmd(dev, 0x000909, 0x00000002);
-	nv_icmd(dev, 0x00090a, 0x00000002);
-	nv_icmd(dev, 0x00090b, 0x00000002);
-	nv_icmd(dev, 0x00090c, 0x00000002);
-	nv_icmd(dev, 0x00090d, 0x00000002);
-	nv_icmd(dev, 0x00090e, 0x00000002);
-	nv_icmd(dev, 0x00090f, 0x00000002);
-	nv_icmd(dev, 0x000910, 0x00000001);
-	nv_icmd(dev, 0x000911, 0x00000001);
-	nv_icmd(dev, 0x000912, 0x00000001);
-	nv_icmd(dev, 0x000913, 0x00000001);
-	nv_icmd(dev, 0x000914, 0x00000001);
-	nv_icmd(dev, 0x000915, 0x00000001);
-	nv_icmd(dev, 0x000916, 0x00000001);
-	nv_icmd(dev, 0x000917, 0x00000001);
-	nv_icmd(dev, 0x000918, 0x00000001);
-	nv_icmd(dev, 0x000919, 0x00000001);
-	nv_icmd(dev, 0x00091a, 0x00000001);
-	nv_icmd(dev, 0x00091b, 0x00000001);
-	nv_icmd(dev, 0x00091c, 0x00000001);
-	nv_icmd(dev, 0x00091d, 0x00000001);
-	nv_icmd(dev, 0x00091e, 0x00000001);
-	nv_icmd(dev, 0x00091f, 0x00000001);
-	nv_icmd(dev, 0x000920, 0x00000002);
-	nv_icmd(dev, 0x000921, 0x00000002);
-	nv_icmd(dev, 0x000922, 0x00000002);
-	nv_icmd(dev, 0x000923, 0x00000002);
-	nv_icmd(dev, 0x000924, 0x00000002);
-	nv_icmd(dev, 0x000925, 0x00000002);
-	nv_icmd(dev, 0x000926, 0x00000002);
-	nv_icmd(dev, 0x000927, 0x00000002);
-	nv_icmd(dev, 0x000928, 0x00000001);
-	nv_icmd(dev, 0x000929, 0x00000001);
-	nv_icmd(dev, 0x00092a, 0x00000001);
-	nv_icmd(dev, 0x00092b, 0x00000001);
-	nv_icmd(dev, 0x00092c, 0x00000001);
-	nv_icmd(dev, 0x00092d, 0x00000001);
-	nv_icmd(dev, 0x00092e, 0x00000001);
-	nv_icmd(dev, 0x00092f, 0x00000001);
-	nv_icmd(dev, 0x000648, 0x00000001);
-	nv_icmd(dev, 0x000649, 0x00000001);
-	nv_icmd(dev, 0x00064a, 0x00000001);
-	nv_icmd(dev, 0x00064b, 0x00000001);
-	nv_icmd(dev, 0x00064c, 0x00000001);
-	nv_icmd(dev, 0x00064d, 0x00000001);
-	nv_icmd(dev, 0x00064e, 0x00000001);
-	nv_icmd(dev, 0x00064f, 0x00000001);
-	nv_icmd(dev, 0x000650, 0x00000001);
-	nv_icmd(dev, 0x000658, 0x0000000f);
-	nv_icmd(dev, 0x0007ff, 0x0000000a);
-	nv_icmd(dev, 0x00066a, 0x40000000);
-	nv_icmd(dev, 0x00066b, 0x10000000);
-	nv_icmd(dev, 0x00066c, 0xffff0000);
-	nv_icmd(dev, 0x00066d, 0xffff0000);
-	nv_icmd(dev, 0x0007af, 0x00000008);
-	nv_icmd(dev, 0x0007b0, 0x00000008);
-	nv_icmd(dev, 0x0007f6, 0x00000001);
-	nv_icmd(dev, 0x0006b2, 0x00000055);
-	nv_icmd(dev, 0x0007ad, 0x00000003);
-	nv_icmd(dev, 0x000937, 0x00000001);
-	nv_icmd(dev, 0x000971, 0x00000008);
-	nv_icmd(dev, 0x000972, 0x00000040);
-	nv_icmd(dev, 0x000973, 0x0000012c);
-	nv_icmd(dev, 0x00097c, 0x00000040);
-	nv_icmd(dev, 0x000979, 0x00000003);
-	nv_icmd(dev, 0x000975, 0x00000020);
-	nv_icmd(dev, 0x000976, 0x00000001);
-	nv_icmd(dev, 0x000977, 0x00000020);
-	nv_icmd(dev, 0x000978, 0x00000001);
-	nv_icmd(dev, 0x000957, 0x00000003);
-	nv_icmd(dev, 0x00095e, 0x20164010);
-	nv_icmd(dev, 0x00095f, 0x00000020);
-	nv_icmd(dev, 0x00097d, 0x00000020);
-	nv_icmd(dev, 0x000683, 0x00000006);
-	nv_icmd(dev, 0x000685, 0x003fffff);
-	nv_icmd(dev, 0x000687, 0x003fffff);
-	nv_icmd(dev, 0x0006a0, 0x00000005);
-	nv_icmd(dev, 0x000840, 0x00400008);
-	nv_icmd(dev, 0x000841, 0x08000080);
-	nv_icmd(dev, 0x000842, 0x00400008);
-	nv_icmd(dev, 0x000843, 0x08000080);
-	nv_icmd(dev, 0x000818, 0x00000000);
-	nv_icmd(dev, 0x000819, 0x00000000);
-	nv_icmd(dev, 0x00081a, 0x00000000);
-	nv_icmd(dev, 0x00081b, 0x00000000);
-	nv_icmd(dev, 0x00081c, 0x00000000);
-	nv_icmd(dev, 0x00081d, 0x00000000);
-	nv_icmd(dev, 0x00081e, 0x00000000);
-	nv_icmd(dev, 0x00081f, 0x00000000);
-	nv_icmd(dev, 0x000848, 0x00000000);
-	nv_icmd(dev, 0x000849, 0x00000000);
-	nv_icmd(dev, 0x00084a, 0x00000000);
-	nv_icmd(dev, 0x00084b, 0x00000000);
-	nv_icmd(dev, 0x00084c, 0x00000000);
-	nv_icmd(dev, 0x00084d, 0x00000000);
-	nv_icmd(dev, 0x00084e, 0x00000000);
-	nv_icmd(dev, 0x00084f, 0x00000000);
-	nv_icmd(dev, 0x000850, 0x00000000);
-	nv_icmd(dev, 0x000851, 0x00000000);
-	nv_icmd(dev, 0x000852, 0x00000000);
-	nv_icmd(dev, 0x000853, 0x00000000);
-	nv_icmd(dev, 0x000854, 0x00000000);
-	nv_icmd(dev, 0x000855, 0x00000000);
-	nv_icmd(dev, 0x000856, 0x00000000);
-	nv_icmd(dev, 0x000857, 0x00000000);
-	nv_icmd(dev, 0x000738, 0x00000000);
-	nv_icmd(dev, 0x0006aa, 0x00000001);
-	nv_icmd(dev, 0x0006ab, 0x00000002);
-	nv_icmd(dev, 0x0006ac, 0x00000080);
-	nv_icmd(dev, 0x0006ad, 0x00000100);
-	nv_icmd(dev, 0x0006ae, 0x00000100);
-	nv_icmd(dev, 0x0006b1, 0x00000011);
-	nv_icmd(dev, 0x0006bb, 0x000000cf);
-	nv_icmd(dev, 0x0006ce, 0x2a712488);
-	nv_icmd(dev, 0x000739, 0x4085c000);
-	nv_icmd(dev, 0x00073a, 0x00000080);
-	nv_icmd(dev, 0x000786, 0x80000100);
-	nv_icmd(dev, 0x00073c, 0x00010100);
-	nv_icmd(dev, 0x00073d, 0x02800000);
-	nv_icmd(dev, 0x000787, 0x000000cf);
-	nv_icmd(dev, 0x00078c, 0x00000008);
-	nv_icmd(dev, 0x000792, 0x00000001);
-	nv_icmd(dev, 0x000794, 0x00000001);
-	nv_icmd(dev, 0x000795, 0x00000001);
-	nv_icmd(dev, 0x000796, 0x00000001);
-	nv_icmd(dev, 0x000797, 0x000000cf);
-	nv_icmd(dev, 0x000836, 0x00000001);
-	nv_icmd(dev, 0x00079a, 0x00000002);
-	nv_icmd(dev, 0x000833, 0x04444480);
-	nv_icmd(dev, 0x0007a1, 0x00000001);
-	nv_icmd(dev, 0x0007a3, 0x00000001);
-	nv_icmd(dev, 0x0007a4, 0x00000001);
-	nv_icmd(dev, 0x0007a5, 0x00000001);
-	nv_icmd(dev, 0x000831, 0x00000004);
-	nv_icmd(dev, 0x000b07, 0x00000002);
-	nv_icmd(dev, 0x000b08, 0x00000100);
-	nv_icmd(dev, 0x000b09, 0x00000100);
-	nv_icmd(dev, 0x000b0a, 0x00000001);
-	nv_icmd(dev, 0x000a04, 0x000000ff);
-	nv_icmd(dev, 0x000a0b, 0x00000040);
-	nv_icmd(dev, 0x00097f, 0x00000100);
-	nv_icmd(dev, 0x000a02, 0x00000001);
-	nv_icmd(dev, 0x000809, 0x00000007);
-	nv_icmd(dev, 0x00c221, 0x00000040);
-	nv_icmd(dev, 0x00c1b0, 0x0000000f);
-	nv_icmd(dev, 0x00c1b1, 0x0000000f);
-	nv_icmd(dev, 0x00c1b2, 0x0000000f);
-	nv_icmd(dev, 0x00c1b3, 0x0000000f);
-	nv_icmd(dev, 0x00c1b4, 0x0000000f);
-	nv_icmd(dev, 0x00c1b5, 0x0000000f);
-	nv_icmd(dev, 0x00c1b6, 0x0000000f);
-	nv_icmd(dev, 0x00c1b7, 0x0000000f);
-	nv_icmd(dev, 0x00c1b8, 0x0fac6881);
-	nv_icmd(dev, 0x00c1b9, 0x00fac688);
-	nv_icmd(dev, 0x00c401, 0x00000001);
-	nv_icmd(dev, 0x00c402, 0x00010001);
-	nv_icmd(dev, 0x00c403, 0x00000001);
-	nv_icmd(dev, 0x00c404, 0x00000001);
-	nv_icmd(dev, 0x00c40e, 0x00000020);
-	nv_icmd(dev, 0x00c500, 0x00000003);
-	nv_icmd(dev, 0x01e100, 0x00000001);
-	nv_icmd(dev, 0x001000, 0x00000002);
-	nv_icmd(dev, 0x0006aa, 0x00000001);
-	nv_icmd(dev, 0x0006ad, 0x00000100);
-	nv_icmd(dev, 0x0006ae, 0x00000100);
-	nv_icmd(dev, 0x0006b1, 0x00000011);
-	nv_icmd(dev, 0x00078c, 0x00000008);
-	nv_icmd(dev, 0x000792, 0x00000001);
-	nv_icmd(dev, 0x000794, 0x00000001);
-	nv_icmd(dev, 0x000795, 0x00000001);
-	nv_icmd(dev, 0x000796, 0x00000001);
-	nv_icmd(dev, 0x000797, 0x000000cf);
-	nv_icmd(dev, 0x00079a, 0x00000002);
-	nv_icmd(dev, 0x000833, 0x04444480);
-	nv_icmd(dev, 0x0007a1, 0x00000001);
-	nv_icmd(dev, 0x0007a3, 0x00000001);
-	nv_icmd(dev, 0x0007a4, 0x00000001);
-	nv_icmd(dev, 0x0007a5, 0x00000001);
-	nv_icmd(dev, 0x000831, 0x00000004);
-	nv_icmd(dev, 0x01e100, 0x00000001);
-	nv_icmd(dev, 0x001000, 0x00000008);
-	nv_icmd(dev, 0x000039, 0x00000000);
-	nv_icmd(dev, 0x00003a, 0x00000000);
-	nv_icmd(dev, 0x00003b, 0x00000000);
-	nv_icmd(dev, 0x000380, 0x00000001);
-	nv_icmd(dev, 0x000366, 0x00000000);
-	nv_icmd(dev, 0x000367, 0x00000000);
-	nv_icmd(dev, 0x000368, 0x00000fff);
-	nv_icmd(dev, 0x000370, 0x00000000);
-	nv_icmd(dev, 0x000371, 0x00000000);
-	nv_icmd(dev, 0x000372, 0x000fffff);
-	nv_icmd(dev, 0x000813, 0x00000006);
-	nv_icmd(dev, 0x000814, 0x00000008);
-	nv_icmd(dev, 0x000957, 0x00000003);
-	nv_icmd(dev, 0x000818, 0x00000000);
-	nv_icmd(dev, 0x000819, 0x00000000);
-	nv_icmd(dev, 0x00081a, 0x00000000);
-	nv_icmd(dev, 0x00081b, 0x00000000);
-	nv_icmd(dev, 0x00081c, 0x00000000);
-	nv_icmd(dev, 0x00081d, 0x00000000);
-	nv_icmd(dev, 0x00081e, 0x00000000);
-	nv_icmd(dev, 0x00081f, 0x00000000);
-	nv_icmd(dev, 0x000848, 0x00000000);
-	nv_icmd(dev, 0x000849, 0x00000000);
-	nv_icmd(dev, 0x00084a, 0x00000000);
-	nv_icmd(dev, 0x00084b, 0x00000000);
-	nv_icmd(dev, 0x00084c, 0x00000000);
-	nv_icmd(dev, 0x00084d, 0x00000000);
-	nv_icmd(dev, 0x00084e, 0x00000000);
-	nv_icmd(dev, 0x00084f, 0x00000000);
-	nv_icmd(dev, 0x000850, 0x00000000);
-	nv_icmd(dev, 0x000851, 0x00000000);
-	nv_icmd(dev, 0x000852, 0x00000000);
-	nv_icmd(dev, 0x000853, 0x00000000);
-	nv_icmd(dev, 0x000854, 0x00000000);
-	nv_icmd(dev, 0x000855, 0x00000000);
-	nv_icmd(dev, 0x000856, 0x00000000);
-	nv_icmd(dev, 0x000857, 0x00000000);
-	nv_icmd(dev, 0x000738, 0x00000000);
-	nv_icmd(dev, 0x000b07, 0x00000002);
-	nv_icmd(dev, 0x000b08, 0x00000100);
-	nv_icmd(dev, 0x000b09, 0x00000100);
-	nv_icmd(dev, 0x000b0a, 0x00000001);
-	nv_icmd(dev, 0x000a04, 0x000000ff);
-	nv_icmd(dev, 0x00097f, 0x00000100);
-	nv_icmd(dev, 0x000a02, 0x00000001);
-	nv_icmd(dev, 0x000809, 0x00000007);
-	nv_icmd(dev, 0x00c221, 0x00000040);
-	nv_icmd(dev, 0x00c401, 0x00000001);
-	nv_icmd(dev, 0x00c402, 0x00010001);
-	nv_icmd(dev, 0x00c403, 0x00000001);
-	nv_icmd(dev, 0x00c404, 0x00000001);
-	nv_icmd(dev, 0x00c40e, 0x00000020);
-	nv_icmd(dev, 0x00c500, 0x00000003);
-	nv_icmd(dev, 0x01e100, 0x00000001);
-	nv_icmd(dev, 0x001000, 0x00000001);
-	nv_icmd(dev, 0x000b07, 0x00000002);
-	nv_icmd(dev, 0x000b08, 0x00000100);
-	nv_icmd(dev, 0x000b09, 0x00000100);
-	nv_icmd(dev, 0x000b0a, 0x00000001);
-	nv_icmd(dev, 0x01e100, 0x00000001);
-	nv_wr32(dev, 0x400208, 0x00000000);
-}
-
-static void
-nv_mthd(struct drm_device *dev, u32 class, u32 mthd, u32 data)
-{
-	nv_wr32(dev, 0x40448c, data);
-	nv_wr32(dev, 0x404488, 0x80000000 | (mthd << 14) | class);
-}
-
-static void
-nve0_grctx_generate_a097(struct drm_device *dev)
-{
-	nv_mthd(dev, 0xa097, 0x0800, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x0840, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x0880, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x08c0, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x0900, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x0940, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x0980, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x09c0, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x0804, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x0844, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x0884, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x08c4, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x0904, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x0944, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x0984, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x09c4, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x0808, 0x00000400);
-	nv_mthd(dev, 0xa097, 0x0848, 0x00000400);
-	nv_mthd(dev, 0xa097, 0x0888, 0x00000400);
-	nv_mthd(dev, 0xa097, 0x08c8, 0x00000400);
-	nv_mthd(dev, 0xa097, 0x0908, 0x00000400);
-	nv_mthd(dev, 0xa097, 0x0948, 0x00000400);
-	nv_mthd(dev, 0xa097, 0x0988, 0x00000400);
-	nv_mthd(dev, 0xa097, 0x09c8, 0x00000400);
-	nv_mthd(dev, 0xa097, 0x080c, 0x00000300);
-	nv_mthd(dev, 0xa097, 0x084c, 0x00000300);
-	nv_mthd(dev, 0xa097, 0x088c, 0x00000300);
-	nv_mthd(dev, 0xa097, 0x08cc, 0x00000300);
-	nv_mthd(dev, 0xa097, 0x090c, 0x00000300);
-	nv_mthd(dev, 0xa097, 0x094c, 0x00000300);
-	nv_mthd(dev, 0xa097, 0x098c, 0x00000300);
-	nv_mthd(dev, 0xa097, 0x09cc, 0x00000300);
-	nv_mthd(dev, 0xa097, 0x0810, 0x000000cf);
-	nv_mthd(dev, 0xa097, 0x0850, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x0890, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x08d0, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x0910, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x0950, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x0990, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x09d0, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x0814, 0x00000040);
-	nv_mthd(dev, 0xa097, 0x0854, 0x00000040);
-	nv_mthd(dev, 0xa097, 0x0894, 0x00000040);
-	nv_mthd(dev, 0xa097, 0x08d4, 0x00000040);
-	nv_mthd(dev, 0xa097, 0x0914, 0x00000040);
-	nv_mthd(dev, 0xa097, 0x0954, 0x00000040);
-	nv_mthd(dev, 0xa097, 0x0994, 0x00000040);
-	nv_mthd(dev, 0xa097, 0x09d4, 0x00000040);
-	nv_mthd(dev, 0xa097, 0x0818, 0x00000001);
-	nv_mthd(dev, 0xa097, 0x0858, 0x00000001);
-	nv_mthd(dev, 0xa097, 0x0898, 0x00000001);
-	nv_mthd(dev, 0xa097, 0x08d8, 0x00000001);
-	nv_mthd(dev, 0xa097, 0x0918, 0x00000001);
-	nv_mthd(dev, 0xa097, 0x0958, 0x00000001);
-	nv_mthd(dev, 0xa097, 0x0998, 0x00000001);
-	nv_mthd(dev, 0xa097, 0x09d8, 0x00000001);
-	nv_mthd(dev, 0xa097, 0x081c, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x085c, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x089c, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x08dc, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x091c, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x095c, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x099c, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x09dc, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x0820, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x0860, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x08a0, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x08e0, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x0920, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x0960, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x09a0, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x09e0, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x1c00, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x1c10, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x1c20, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x1c30, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x1c40, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x1c50, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x1c60, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x1c70, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x1c80, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x1c90, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x1ca0, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x1cb0, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x1cc0, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x1cd0, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x1ce0, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x1cf0, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x1c04, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x1c14, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x1c24, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x1c34, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x1c44, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x1c54, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x1c64, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x1c74, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x1c84, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x1c94, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x1ca4, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x1cb4, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x1cc4, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x1cd4, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x1ce4, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x1cf4, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x1c08, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x1c18, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x1c28, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x1c38, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x1c48, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x1c58, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x1c68, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x1c78, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x1c88, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x1c98, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x1ca8, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x1cb8, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x1cc8, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x1cd8, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x1ce8, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x1cf8, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x1c0c, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x1c1c, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x1c2c, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x1c3c, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x1c4c, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x1c5c, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x1c6c, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x1c7c, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x1c8c, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x1c9c, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x1cac, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x1cbc, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x1ccc, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x1cdc, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x1cec, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x1cfc, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x1d00, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x1d10, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x1d20, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x1d30, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x1d40, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x1d50, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x1d60, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x1d70, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x1d80, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x1d90, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x1da0, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x1db0, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x1dc0, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x1dd0, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x1de0, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x1df0, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x1d04, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x1d14, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x1d24, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x1d34, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x1d44, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x1d54, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x1d64, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x1d74, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x1d84, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x1d94, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x1da4, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x1db4, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x1dc4, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x1dd4, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x1de4, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x1df4, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x1d08, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x1d18, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x1d28, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x1d38, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x1d48, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x1d58, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x1d68, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x1d78, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x1d88, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x1d98, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x1da8, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x1db8, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x1dc8, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x1dd8, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x1de8, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x1df8, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x1d0c, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x1d1c, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x1d2c, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x1d3c, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x1d4c, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x1d5c, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x1d6c, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x1d7c, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x1d8c, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x1d9c, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x1dac, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x1dbc, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x1dcc, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x1ddc, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x1dec, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x1dfc, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x1f00, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x1f08, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x1f10, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x1f18, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x1f20, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x1f28, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x1f30, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x1f38, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x1f40, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x1f48, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x1f50, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x1f58, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x1f60, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x1f68, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x1f70, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x1f78, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x1f04, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x1f0c, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x1f14, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x1f1c, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x1f24, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x1f2c, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x1f34, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x1f3c, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x1f44, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x1f4c, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x1f54, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x1f5c, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x1f64, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x1f6c, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x1f74, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x1f7c, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x1f80, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x1f88, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x1f90, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x1f98, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x1fa0, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x1fa8, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x1fb0, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x1fb8, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x1fc0, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x1fc8, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x1fd0, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x1fd8, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x1fe0, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x1fe8, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x1ff0, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x1ff8, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x1f84, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x1f8c, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x1f94, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x1f9c, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x1fa4, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x1fac, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x1fb4, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x1fbc, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x1fc4, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x1fcc, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x1fd4, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x1fdc, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x1fe4, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x1fec, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x1ff4, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x1ffc, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x2000, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x2040, 0x00000011);
-	nv_mthd(dev, 0xa097, 0x2080, 0x00000020);
-	nv_mthd(dev, 0xa097, 0x20c0, 0x00000030);
-	nv_mthd(dev, 0xa097, 0x2100, 0x00000040);
-	nv_mthd(dev, 0xa097, 0x2140, 0x00000051);
-	nv_mthd(dev, 0xa097, 0x200c, 0x00000001);
-	nv_mthd(dev, 0xa097, 0x204c, 0x00000001);
-	nv_mthd(dev, 0xa097, 0x208c, 0x00000001);
-	nv_mthd(dev, 0xa097, 0x20cc, 0x00000001);
-	nv_mthd(dev, 0xa097, 0x210c, 0x00000001);
-	nv_mthd(dev, 0xa097, 0x214c, 0x00000001);
-	nv_mthd(dev, 0xa097, 0x2010, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x2050, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x2090, 0x00000001);
-	nv_mthd(dev, 0xa097, 0x20d0, 0x00000002);
-	nv_mthd(dev, 0xa097, 0x2110, 0x00000003);
-	nv_mthd(dev, 0xa097, 0x2150, 0x00000004);
-	nv_mthd(dev, 0xa097, 0x0380, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x03a0, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x03c0, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x03e0, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x0384, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x03a4, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x03c4, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x03e4, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x0388, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x03a8, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x03c8, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x03e8, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x038c, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x03ac, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x03cc, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x03ec, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x0700, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x0710, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x0720, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x0730, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x0704, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x0714, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x0724, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x0734, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x0708, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x0718, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x0728, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x0738, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x2800, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x2804, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x2808, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x280c, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x2810, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x2814, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x2818, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x281c, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x2820, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x2824, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x2828, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x282c, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x2830, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x2834, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x2838, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x283c, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x2840, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x2844, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x2848, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x284c, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x2850, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x2854, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x2858, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x285c, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x2860, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x2864, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x2868, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x286c, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x2870, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x2874, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x2878, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x287c, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x2880, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x2884, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x2888, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x288c, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x2890, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x2894, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x2898, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x289c, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x28a0, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x28a4, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x28a8, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x28ac, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x28b0, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x28b4, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x28b8, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x28bc, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x28c0, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x28c4, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x28c8, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x28cc, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x28d0, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x28d4, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x28d8, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x28dc, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x28e0, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x28e4, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x28e8, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x28ec, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x28f0, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x28f4, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x28f8, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x28fc, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x2900, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x2904, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x2908, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x290c, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x2910, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x2914, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x2918, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x291c, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x2920, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x2924, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x2928, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x292c, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x2930, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x2934, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x2938, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x293c, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x2940, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x2944, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x2948, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x294c, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x2950, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x2954, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x2958, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x295c, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x2960, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x2964, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x2968, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x296c, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x2970, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x2974, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x2978, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x297c, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x2980, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x2984, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x2988, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x298c, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x2990, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x2994, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x2998, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x299c, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x29a0, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x29a4, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x29a8, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x29ac, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x29b0, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x29b4, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x29b8, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x29bc, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x29c0, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x29c4, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x29c8, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x29cc, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x29d0, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x29d4, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x29d8, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x29dc, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x29e0, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x29e4, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x29e8, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x29ec, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x29f0, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x29f4, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x29f8, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x29fc, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x0a00, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x0a20, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x0a40, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x0a60, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x0a80, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x0aa0, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x0ac0, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x0ae0, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x0b00, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x0b20, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x0b40, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x0b60, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x0b80, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x0ba0, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x0bc0, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x0be0, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x0a04, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x0a24, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x0a44, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x0a64, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x0a84, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x0aa4, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x0ac4, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x0ae4, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x0b04, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x0b24, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x0b44, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x0b64, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x0b84, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x0ba4, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x0bc4, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x0be4, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x0a08, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x0a28, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x0a48, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x0a68, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x0a88, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x0aa8, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x0ac8, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x0ae8, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x0b08, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x0b28, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x0b48, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x0b68, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x0b88, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x0ba8, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x0bc8, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x0be8, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x0a0c, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x0a2c, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x0a4c, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x0a6c, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x0a8c, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x0aac, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x0acc, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x0aec, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x0b0c, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x0b2c, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x0b4c, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x0b6c, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x0b8c, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x0bac, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x0bcc, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x0bec, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x0a10, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x0a30, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x0a50, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x0a70, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x0a90, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x0ab0, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x0ad0, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x0af0, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x0b10, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x0b30, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x0b50, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x0b70, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x0b90, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x0bb0, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x0bd0, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x0bf0, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x0a14, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x0a34, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x0a54, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x0a74, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x0a94, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x0ab4, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x0ad4, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x0af4, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x0b14, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x0b34, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x0b54, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x0b74, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x0b94, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x0bb4, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x0bd4, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x0bf4, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x0c00, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x0c10, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x0c20, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x0c30, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x0c40, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x0c50, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x0c60, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x0c70, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x0c80, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x0c90, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x0ca0, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x0cb0, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x0cc0, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x0cd0, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x0ce0, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x0cf0, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x0c04, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x0c14, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x0c24, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x0c34, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x0c44, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x0c54, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x0c64, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x0c74, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x0c84, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x0c94, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x0ca4, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x0cb4, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x0cc4, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x0cd4, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x0ce4, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x0cf4, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x0c08, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x0c18, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x0c28, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x0c38, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x0c48, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x0c58, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x0c68, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x0c78, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x0c88, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x0c98, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x0ca8, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x0cb8, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x0cc8, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x0cd8, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x0ce8, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x0cf8, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x0c0c, 0x3f800000);
-	nv_mthd(dev, 0xa097, 0x0c1c, 0x3f800000);
-	nv_mthd(dev, 0xa097, 0x0c2c, 0x3f800000);
-	nv_mthd(dev, 0xa097, 0x0c3c, 0x3f800000);
-	nv_mthd(dev, 0xa097, 0x0c4c, 0x3f800000);
-	nv_mthd(dev, 0xa097, 0x0c5c, 0x3f800000);
-	nv_mthd(dev, 0xa097, 0x0c6c, 0x3f800000);
-	nv_mthd(dev, 0xa097, 0x0c7c, 0x3f800000);
-	nv_mthd(dev, 0xa097, 0x0c8c, 0x3f800000);
-	nv_mthd(dev, 0xa097, 0x0c9c, 0x3f800000);
-	nv_mthd(dev, 0xa097, 0x0cac, 0x3f800000);
-	nv_mthd(dev, 0xa097, 0x0cbc, 0x3f800000);
-	nv_mthd(dev, 0xa097, 0x0ccc, 0x3f800000);
-	nv_mthd(dev, 0xa097, 0x0cdc, 0x3f800000);
-	nv_mthd(dev, 0xa097, 0x0cec, 0x3f800000);
-	nv_mthd(dev, 0xa097, 0x0cfc, 0x3f800000);
-	nv_mthd(dev, 0xa097, 0x0d00, 0xffff0000);
-	nv_mthd(dev, 0xa097, 0x0d08, 0xffff0000);
-	nv_mthd(dev, 0xa097, 0x0d10, 0xffff0000);
-	nv_mthd(dev, 0xa097, 0x0d18, 0xffff0000);
-	nv_mthd(dev, 0xa097, 0x0d20, 0xffff0000);
-	nv_mthd(dev, 0xa097, 0x0d28, 0xffff0000);
-	nv_mthd(dev, 0xa097, 0x0d30, 0xffff0000);
-	nv_mthd(dev, 0xa097, 0x0d38, 0xffff0000);
-	nv_mthd(dev, 0xa097, 0x0d04, 0xffff0000);
-	nv_mthd(dev, 0xa097, 0x0d0c, 0xffff0000);
-	nv_mthd(dev, 0xa097, 0x0d14, 0xffff0000);
-	nv_mthd(dev, 0xa097, 0x0d1c, 0xffff0000);
-	nv_mthd(dev, 0xa097, 0x0d24, 0xffff0000);
-	nv_mthd(dev, 0xa097, 0x0d2c, 0xffff0000);
-	nv_mthd(dev, 0xa097, 0x0d34, 0xffff0000);
-	nv_mthd(dev, 0xa097, 0x0d3c, 0xffff0000);
-	nv_mthd(dev, 0xa097, 0x0e00, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x0e10, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x0e20, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x0e30, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x0e40, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x0e50, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x0e60, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x0e70, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x0e80, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x0e90, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x0ea0, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x0eb0, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x0ec0, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x0ed0, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x0ee0, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x0ef0, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x0e04, 0xffff0000);
-	nv_mthd(dev, 0xa097, 0x0e14, 0xffff0000);
-	nv_mthd(dev, 0xa097, 0x0e24, 0xffff0000);
-	nv_mthd(dev, 0xa097, 0x0e34, 0xffff0000);
-	nv_mthd(dev, 0xa097, 0x0e44, 0xffff0000);
-	nv_mthd(dev, 0xa097, 0x0e54, 0xffff0000);
-	nv_mthd(dev, 0xa097, 0x0e64, 0xffff0000);
-	nv_mthd(dev, 0xa097, 0x0e74, 0xffff0000);
-	nv_mthd(dev, 0xa097, 0x0e84, 0xffff0000);
-	nv_mthd(dev, 0xa097, 0x0e94, 0xffff0000);
-	nv_mthd(dev, 0xa097, 0x0ea4, 0xffff0000);
-	nv_mthd(dev, 0xa097, 0x0eb4, 0xffff0000);
-	nv_mthd(dev, 0xa097, 0x0ec4, 0xffff0000);
-	nv_mthd(dev, 0xa097, 0x0ed4, 0xffff0000);
-	nv_mthd(dev, 0xa097, 0x0ee4, 0xffff0000);
-	nv_mthd(dev, 0xa097, 0x0ef4, 0xffff0000);
-	nv_mthd(dev, 0xa097, 0x0e08, 0xffff0000);
-	nv_mthd(dev, 0xa097, 0x0e18, 0xffff0000);
-	nv_mthd(dev, 0xa097, 0x0e28, 0xffff0000);
-	nv_mthd(dev, 0xa097, 0x0e38, 0xffff0000);
-	nv_mthd(dev, 0xa097, 0x0e48, 0xffff0000);
-	nv_mthd(dev, 0xa097, 0x0e58, 0xffff0000);
-	nv_mthd(dev, 0xa097, 0x0e68, 0xffff0000);
-	nv_mthd(dev, 0xa097, 0x0e78, 0xffff0000);
-	nv_mthd(dev, 0xa097, 0x0e88, 0xffff0000);
-	nv_mthd(dev, 0xa097, 0x0e98, 0xffff0000);
-	nv_mthd(dev, 0xa097, 0x0ea8, 0xffff0000);
-	nv_mthd(dev, 0xa097, 0x0eb8, 0xffff0000);
-	nv_mthd(dev, 0xa097, 0x0ec8, 0xffff0000);
-	nv_mthd(dev, 0xa097, 0x0ed8, 0xffff0000);
-	nv_mthd(dev, 0xa097, 0x0ee8, 0xffff0000);
-	nv_mthd(dev, 0xa097, 0x0ef8, 0xffff0000);
-	nv_mthd(dev, 0xa097, 0x0d40, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x0d48, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x0d50, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x0d58, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x0d44, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x0d4c, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x0d54, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x0d5c, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x1e00, 0x00000001);
-	nv_mthd(dev, 0xa097, 0x1e20, 0x00000001);
-	nv_mthd(dev, 0xa097, 0x1e40, 0x00000001);
-	nv_mthd(dev, 0xa097, 0x1e60, 0x00000001);
-	nv_mthd(dev, 0xa097, 0x1e80, 0x00000001);
-	nv_mthd(dev, 0xa097, 0x1ea0, 0x00000001);
-	nv_mthd(dev, 0xa097, 0x1ec0, 0x00000001);
-	nv_mthd(dev, 0xa097, 0x1ee0, 0x00000001);
-	nv_mthd(dev, 0xa097, 0x1e04, 0x00000001);
-	nv_mthd(dev, 0xa097, 0x1e24, 0x00000001);
-	nv_mthd(dev, 0xa097, 0x1e44, 0x00000001);
-	nv_mthd(dev, 0xa097, 0x1e64, 0x00000001);
-	nv_mthd(dev, 0xa097, 0x1e84, 0x00000001);
-	nv_mthd(dev, 0xa097, 0x1ea4, 0x00000001);
-	nv_mthd(dev, 0xa097, 0x1ec4, 0x00000001);
-	nv_mthd(dev, 0xa097, 0x1ee4, 0x00000001);
-	nv_mthd(dev, 0xa097, 0x1e08, 0x00000002);
-	nv_mthd(dev, 0xa097, 0x1e28, 0x00000002);
-	nv_mthd(dev, 0xa097, 0x1e48, 0x00000002);
-	nv_mthd(dev, 0xa097, 0x1e68, 0x00000002);
-	nv_mthd(dev, 0xa097, 0x1e88, 0x00000002);
-	nv_mthd(dev, 0xa097, 0x1ea8, 0x00000002);
-	nv_mthd(dev, 0xa097, 0x1ec8, 0x00000002);
-	nv_mthd(dev, 0xa097, 0x1ee8, 0x00000002);
-	nv_mthd(dev, 0xa097, 0x1e0c, 0x00000001);
-	nv_mthd(dev, 0xa097, 0x1e2c, 0x00000001);
-	nv_mthd(dev, 0xa097, 0x1e4c, 0x00000001);
-	nv_mthd(dev, 0xa097, 0x1e6c, 0x00000001);
-	nv_mthd(dev, 0xa097, 0x1e8c, 0x00000001);
-	nv_mthd(dev, 0xa097, 0x1eac, 0x00000001);
-	nv_mthd(dev, 0xa097, 0x1ecc, 0x00000001);
-	nv_mthd(dev, 0xa097, 0x1eec, 0x00000001);
-	nv_mthd(dev, 0xa097, 0x1e10, 0x00000001);
-	nv_mthd(dev, 0xa097, 0x1e30, 0x00000001);
-	nv_mthd(dev, 0xa097, 0x1e50, 0x00000001);
-	nv_mthd(dev, 0xa097, 0x1e70, 0x00000001);
-	nv_mthd(dev, 0xa097, 0x1e90, 0x00000001);
-	nv_mthd(dev, 0xa097, 0x1eb0, 0x00000001);
-	nv_mthd(dev, 0xa097, 0x1ed0, 0x00000001);
-	nv_mthd(dev, 0xa097, 0x1ef0, 0x00000001);
-	nv_mthd(dev, 0xa097, 0x1e14, 0x00000002);
-	nv_mthd(dev, 0xa097, 0x1e34, 0x00000002);
-	nv_mthd(dev, 0xa097, 0x1e54, 0x00000002);
-	nv_mthd(dev, 0xa097, 0x1e74, 0x00000002);
-	nv_mthd(dev, 0xa097, 0x1e94, 0x00000002);
-	nv_mthd(dev, 0xa097, 0x1eb4, 0x00000002);
-	nv_mthd(dev, 0xa097, 0x1ed4, 0x00000002);
-	nv_mthd(dev, 0xa097, 0x1ef4, 0x00000002);
-	nv_mthd(dev, 0xa097, 0x1e18, 0x00000001);
-	nv_mthd(dev, 0xa097, 0x1e38, 0x00000001);
-	nv_mthd(dev, 0xa097, 0x1e58, 0x00000001);
-	nv_mthd(dev, 0xa097, 0x1e78, 0x00000001);
-	nv_mthd(dev, 0xa097, 0x1e98, 0x00000001);
-	nv_mthd(dev, 0xa097, 0x1eb8, 0x00000001);
-	nv_mthd(dev, 0xa097, 0x1ed8, 0x00000001);
-	nv_mthd(dev, 0xa097, 0x1ef8, 0x00000001);
-	nv_mthd(dev, 0xa097, 0x3400, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x3404, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x3408, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x340c, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x3410, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x3414, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x3418, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x341c, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x3420, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x3424, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x3428, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x342c, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x3430, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x3434, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x3438, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x343c, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x3440, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x3444, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x3448, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x344c, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x3450, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x3454, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x3458, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x345c, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x3460, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x3464, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x3468, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x346c, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x3470, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x3474, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x3478, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x347c, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x3480, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x3484, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x3488, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x348c, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x3490, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x3494, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x3498, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x349c, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x34a0, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x34a4, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x34a8, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x34ac, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x34b0, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x34b4, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x34b8, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x34bc, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x34c0, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x34c4, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x34c8, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x34cc, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x34d0, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x34d4, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x34d8, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x34dc, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x34e0, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x34e4, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x34e8, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x34ec, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x34f0, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x34f4, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x34f8, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x34fc, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x3500, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x3504, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x3508, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x350c, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x3510, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x3514, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x3518, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x351c, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x3520, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x3524, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x3528, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x352c, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x3530, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x3534, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x3538, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x353c, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x3540, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x3544, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x3548, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x354c, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x3550, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x3554, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x3558, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x355c, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x3560, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x3564, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x3568, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x356c, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x3570, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x3574, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x3578, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x357c, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x3580, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x3584, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x3588, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x358c, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x3590, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x3594, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x3598, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x359c, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x35a0, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x35a4, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x35a8, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x35ac, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x35b0, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x35b4, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x35b8, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x35bc, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x35c0, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x35c4, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x35c8, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x35cc, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x35d0, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x35d4, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x35d8, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x35dc, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x35e0, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x35e4, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x35e8, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x35ec, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x35f0, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x35f4, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x35f8, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x35fc, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x030c, 0x00000001);
-	nv_mthd(dev, 0xa097, 0x1944, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x1514, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x0d68, 0x0000ffff);
-	nv_mthd(dev, 0xa097, 0x121c, 0x0fac6881);
-	nv_mthd(dev, 0xa097, 0x0fac, 0x00000001);
-	nv_mthd(dev, 0xa097, 0x1538, 0x00000001);
-	nv_mthd(dev, 0xa097, 0x0fe0, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x0fe4, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x0fe8, 0x00000014);
-	nv_mthd(dev, 0xa097, 0x0fec, 0x00000040);
-	nv_mthd(dev, 0xa097, 0x0ff0, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x179c, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x1228, 0x00000400);
-	nv_mthd(dev, 0xa097, 0x122c, 0x00000300);
-	nv_mthd(dev, 0xa097, 0x1230, 0x00010001);
-	nv_mthd(dev, 0xa097, 0x07f8, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x15b4, 0x00000001);
-	nv_mthd(dev, 0xa097, 0x15cc, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x1534, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x0fb0, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x15d0, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x153c, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x16b4, 0x00000003);
-	nv_mthd(dev, 0xa097, 0x0fbc, 0x0000ffff);
-	nv_mthd(dev, 0xa097, 0x0fc0, 0x0000ffff);
-	nv_mthd(dev, 0xa097, 0x0fc4, 0x0000ffff);
-	nv_mthd(dev, 0xa097, 0x0fc8, 0x0000ffff);
-	nv_mthd(dev, 0xa097, 0x0df8, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x0dfc, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x1948, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x1970, 0x00000001);
-	nv_mthd(dev, 0xa097, 0x161c, 0x000009f0);
-	nv_mthd(dev, 0xa097, 0x0dcc, 0x00000010);
-	nv_mthd(dev, 0xa097, 0x163c, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x15e4, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x1160, 0x25e00040);
-	nv_mthd(dev, 0xa097, 0x1164, 0x25e00040);
-	nv_mthd(dev, 0xa097, 0x1168, 0x25e00040);
-	nv_mthd(dev, 0xa097, 0x116c, 0x25e00040);
-	nv_mthd(dev, 0xa097, 0x1170, 0x25e00040);
-	nv_mthd(dev, 0xa097, 0x1174, 0x25e00040);
-	nv_mthd(dev, 0xa097, 0x1178, 0x25e00040);
-	nv_mthd(dev, 0xa097, 0x117c, 0x25e00040);
-	nv_mthd(dev, 0xa097, 0x1180, 0x25e00040);
-	nv_mthd(dev, 0xa097, 0x1184, 0x25e00040);
-	nv_mthd(dev, 0xa097, 0x1188, 0x25e00040);
-	nv_mthd(dev, 0xa097, 0x118c, 0x25e00040);
-	nv_mthd(dev, 0xa097, 0x1190, 0x25e00040);
-	nv_mthd(dev, 0xa097, 0x1194, 0x25e00040);
-	nv_mthd(dev, 0xa097, 0x1198, 0x25e00040);
-	nv_mthd(dev, 0xa097, 0x119c, 0x25e00040);
-	nv_mthd(dev, 0xa097, 0x11a0, 0x25e00040);
-	nv_mthd(dev, 0xa097, 0x11a4, 0x25e00040);
-	nv_mthd(dev, 0xa097, 0x11a8, 0x25e00040);
-	nv_mthd(dev, 0xa097, 0x11ac, 0x25e00040);
-	nv_mthd(dev, 0xa097, 0x11b0, 0x25e00040);
-	nv_mthd(dev, 0xa097, 0x11b4, 0x25e00040);
-	nv_mthd(dev, 0xa097, 0x11b8, 0x25e00040);
-	nv_mthd(dev, 0xa097, 0x11bc, 0x25e00040);
-	nv_mthd(dev, 0xa097, 0x11c0, 0x25e00040);
-	nv_mthd(dev, 0xa097, 0x11c4, 0x25e00040);
-	nv_mthd(dev, 0xa097, 0x11c8, 0x25e00040);
-	nv_mthd(dev, 0xa097, 0x11cc, 0x25e00040);
-	nv_mthd(dev, 0xa097, 0x11d0, 0x25e00040);
-	nv_mthd(dev, 0xa097, 0x11d4, 0x25e00040);
-	nv_mthd(dev, 0xa097, 0x11d8, 0x25e00040);
-	nv_mthd(dev, 0xa097, 0x11dc, 0x25e00040);
-	nv_mthd(dev, 0xa097, 0x1880, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x1884, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x1888, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x188c, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x1890, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x1894, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x1898, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x189c, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x18a0, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x18a4, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x18a8, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x18ac, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x18b0, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x18b4, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x18b8, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x18bc, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x18c0, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x18c4, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x18c8, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x18cc, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x18d0, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x18d4, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x18d8, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x18dc, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x18e0, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x18e4, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x18e8, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x18ec, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x18f0, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x18f4, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x18f8, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x18fc, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x0f84, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x0f88, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x17c8, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x17cc, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x17d0, 0x000000ff);
-	nv_mthd(dev, 0xa097, 0x17d4, 0xffffffff);
-	nv_mthd(dev, 0xa097, 0x17d8, 0x00000002);
-	nv_mthd(dev, 0xa097, 0x17dc, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x15f4, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x15f8, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x1434, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x1438, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x0d74, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x0dec, 0x00000001);
-	nv_mthd(dev, 0xa097, 0x13a4, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x1318, 0x00000001);
-	nv_mthd(dev, 0xa097, 0x1644, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x0748, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x0de8, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x1648, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x12a4, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x1120, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x1124, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x1128, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x112c, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x1118, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x164c, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x1658, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x1910, 0x00000290);
-	nv_mthd(dev, 0xa097, 0x1518, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x165c, 0x00000001);
-	nv_mthd(dev, 0xa097, 0x1520, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x1604, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x1570, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x13b0, 0x3f800000);
-	nv_mthd(dev, 0xa097, 0x13b4, 0x3f800000);
-	nv_mthd(dev, 0xa097, 0x020c, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x1670, 0x30201000);
-	nv_mthd(dev, 0xa097, 0x1674, 0x70605040);
-	nv_mthd(dev, 0xa097, 0x1678, 0xb8a89888);
-	nv_mthd(dev, 0xa097, 0x167c, 0xf8e8d8c8);
-	nv_mthd(dev, 0xa097, 0x166c, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x1680, 0x00ffff00);
-	nv_mthd(dev, 0xa097, 0x12d0, 0x00000003);
-	nv_mthd(dev, 0xa097, 0x12d4, 0x00000002);
-	nv_mthd(dev, 0xa097, 0x1684, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x1688, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x0dac, 0x00001b02);
-	nv_mthd(dev, 0xa097, 0x0db0, 0x00001b02);
-	nv_mthd(dev, 0xa097, 0x0db4, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x168c, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x15bc, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x156c, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x187c, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x1110, 0x00000001);
-	nv_mthd(dev, 0xa097, 0x0dc0, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x0dc4, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x0dc8, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x1234, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x1690, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x12ac, 0x00000001);
-	nv_mthd(dev, 0xa097, 0x0790, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x0794, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x0798, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x079c, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x07a0, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x077c, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x1000, 0x00000010);
-	nv_mthd(dev, 0xa097, 0x10fc, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x1290, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x0218, 0x00000010);
-	nv_mthd(dev, 0xa097, 0x12d8, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x12dc, 0x00000010);
-	nv_mthd(dev, 0xa097, 0x0d94, 0x00000001);
-	nv_mthd(dev, 0xa097, 0x155c, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x1560, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x1564, 0x00000fff);
-	nv_mthd(dev, 0xa097, 0x1574, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x1578, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x157c, 0x000fffff);
-	nv_mthd(dev, 0xa097, 0x1354, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x1610, 0x00000012);
-	nv_mthd(dev, 0xa097, 0x1608, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x160c, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x260c, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x07ac, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x162c, 0x00000003);
-	nv_mthd(dev, 0xa097, 0x0210, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x0320, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x0324, 0x3f800000);
-	nv_mthd(dev, 0xa097, 0x0328, 0x3f800000);
-	nv_mthd(dev, 0xa097, 0x032c, 0x3f800000);
-	nv_mthd(dev, 0xa097, 0x0330, 0x3f800000);
-	nv_mthd(dev, 0xa097, 0x0334, 0x3f800000);
-	nv_mthd(dev, 0xa097, 0x0338, 0x3f800000);
-	nv_mthd(dev, 0xa097, 0x0750, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x0760, 0x39291909);
-	nv_mthd(dev, 0xa097, 0x0764, 0x79695949);
-	nv_mthd(dev, 0xa097, 0x0768, 0xb9a99989);
-	nv_mthd(dev, 0xa097, 0x076c, 0xf9e9d9c9);
-	nv_mthd(dev, 0xa097, 0x0770, 0x30201000);
-	nv_mthd(dev, 0xa097, 0x0774, 0x70605040);
-	nv_mthd(dev, 0xa097, 0x0778, 0x00009080);
-	nv_mthd(dev, 0xa097, 0x0780, 0x39291909);
-	nv_mthd(dev, 0xa097, 0x0784, 0x79695949);
-	nv_mthd(dev, 0xa097, 0x0788, 0xb9a99989);
-	nv_mthd(dev, 0xa097, 0x078c, 0xf9e9d9c9);
-	nv_mthd(dev, 0xa097, 0x07d0, 0x30201000);
-	nv_mthd(dev, 0xa097, 0x07d4, 0x70605040);
-	nv_mthd(dev, 0xa097, 0x07d8, 0x00009080);
-	nv_mthd(dev, 0xa097, 0x037c, 0x00000001);
-	nv_mthd(dev, 0xa097, 0x0740, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x0744, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x2600, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x1918, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x191c, 0x00000900);
-	nv_mthd(dev, 0xa097, 0x1920, 0x00000405);
-	nv_mthd(dev, 0xa097, 0x1308, 0x00000001);
-	nv_mthd(dev, 0xa097, 0x1924, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x13ac, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x192c, 0x00000001);
-	nv_mthd(dev, 0xa097, 0x193c, 0x00002c1c);
-	nv_mthd(dev, 0xa097, 0x0d7c, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x0f8c, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x02c0, 0x00000001);
-	nv_mthd(dev, 0xa097, 0x1510, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x1940, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x0ff4, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x0ff8, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x194c, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x1950, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x1968, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x1590, 0x0000003f);
-	nv_mthd(dev, 0xa097, 0x07e8, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x07ec, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x07f0, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x07f4, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x196c, 0x00000011);
-	nv_mthd(dev, 0xa097, 0x02e4, 0x0000b001);
-	nv_mthd(dev, 0xa097, 0x036c, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x0370, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x197c, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x0fcc, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x0fd0, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x02d8, 0x00000040);
-	nv_mthd(dev, 0xa097, 0x1980, 0x00000080);
-	nv_mthd(dev, 0xa097, 0x1504, 0x00000080);
-	nv_mthd(dev, 0xa097, 0x1984, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x0300, 0x00000001);
-	nv_mthd(dev, 0xa097, 0x13a8, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x12ec, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x1310, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x1314, 0x00000001);
-	nv_mthd(dev, 0xa097, 0x1380, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x1384, 0x00000001);
-	nv_mthd(dev, 0xa097, 0x1388, 0x00000001);
-	nv_mthd(dev, 0xa097, 0x138c, 0x00000001);
-	nv_mthd(dev, 0xa097, 0x1390, 0x00000001);
-	nv_mthd(dev, 0xa097, 0x1394, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x139c, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x1398, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x1594, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x1598, 0x00000001);
-	nv_mthd(dev, 0xa097, 0x159c, 0x00000001);
-	nv_mthd(dev, 0xa097, 0x15a0, 0x00000001);
-	nv_mthd(dev, 0xa097, 0x15a4, 0x00000001);
-	nv_mthd(dev, 0xa097, 0x0f54, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x0f58, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x0f5c, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x19bc, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x0f9c, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x0fa0, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x12cc, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x12e8, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x130c, 0x00000001);
-	nv_mthd(dev, 0xa097, 0x1360, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x1364, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x1368, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x136c, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x1370, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x1374, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x1378, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x137c, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x133c, 0x00000001);
-	nv_mthd(dev, 0xa097, 0x1340, 0x00000001);
-	nv_mthd(dev, 0xa097, 0x1344, 0x00000002);
-	nv_mthd(dev, 0xa097, 0x1348, 0x00000001);
-	nv_mthd(dev, 0xa097, 0x134c, 0x00000001);
-	nv_mthd(dev, 0xa097, 0x1350, 0x00000002);
-	nv_mthd(dev, 0xa097, 0x1358, 0x00000001);
-	nv_mthd(dev, 0xa097, 0x12e4, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x131c, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x1320, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x1324, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x1328, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x19c0, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x1140, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x19c4, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x19c8, 0x00001500);
-	nv_mthd(dev, 0xa097, 0x135c, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x0f90, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x19e0, 0x00000001);
-	nv_mthd(dev, 0xa097, 0x19e4, 0x00000001);
-	nv_mthd(dev, 0xa097, 0x19e8, 0x00000001);
-	nv_mthd(dev, 0xa097, 0x19ec, 0x00000001);
-	nv_mthd(dev, 0xa097, 0x19f0, 0x00000001);
-	nv_mthd(dev, 0xa097, 0x19f4, 0x00000001);
-	nv_mthd(dev, 0xa097, 0x19f8, 0x00000001);
-	nv_mthd(dev, 0xa097, 0x19fc, 0x00000001);
-	nv_mthd(dev, 0xa097, 0x19cc, 0x00000001);
-	nv_mthd(dev, 0xa097, 0x15b8, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x1a00, 0x00001111);
-	nv_mthd(dev, 0xa097, 0x1a04, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x1a08, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x1a0c, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x1a10, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x1a14, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x1a18, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x1a1c, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x0d6c, 0xffff0000);
-	nv_mthd(dev, 0xa097, 0x0d70, 0xffff0000);
-	nv_mthd(dev, 0xa097, 0x10f8, 0x00001010);
-	nv_mthd(dev, 0xa097, 0x0d80, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x0d84, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x0d88, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x0d8c, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x0d90, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x0da0, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x07a4, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x07a8, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x1508, 0x80000000);
-	nv_mthd(dev, 0xa097, 0x150c, 0x40000000);
-	nv_mthd(dev, 0xa097, 0x1668, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x0318, 0x00000008);
-	nv_mthd(dev, 0xa097, 0x031c, 0x00000008);
-	nv_mthd(dev, 0xa097, 0x0d9c, 0x00000001);
-	nv_mthd(dev, 0xa097, 0x0374, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x0378, 0x00000020);
-	nv_mthd(dev, 0xa097, 0x07dc, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x074c, 0x00000055);
-	nv_mthd(dev, 0xa097, 0x1420, 0x00000003);
-	nv_mthd(dev, 0xa097, 0x17bc, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x17c0, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x17c4, 0x00000001);
-	nv_mthd(dev, 0xa097, 0x1008, 0x00000008);
-	nv_mthd(dev, 0xa097, 0x100c, 0x00000040);
-	nv_mthd(dev, 0xa097, 0x1010, 0x0000012c);
-	nv_mthd(dev, 0xa097, 0x0d60, 0x00000040);
-	nv_mthd(dev, 0xa097, 0x075c, 0x00000003);
-	nv_mthd(dev, 0xa097, 0x1018, 0x00000020);
-	nv_mthd(dev, 0xa097, 0x101c, 0x00000001);
-	nv_mthd(dev, 0xa097, 0x1020, 0x00000020);
-	nv_mthd(dev, 0xa097, 0x1024, 0x00000001);
-	nv_mthd(dev, 0xa097, 0x1444, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x1448, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x144c, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x0360, 0x20164010);
-	nv_mthd(dev, 0xa097, 0x0364, 0x00000020);
-	nv_mthd(dev, 0xa097, 0x0368, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x0de4, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x0204, 0x00000006);
-	nv_mthd(dev, 0xa097, 0x0208, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x02cc, 0x003fffff);
-	nv_mthd(dev, 0xa097, 0x02d0, 0x003fffff);
-	nv_mthd(dev, 0xa097, 0x1220, 0x00000005);
-	nv_mthd(dev, 0xa097, 0x0fdc, 0x00000000);
-	nv_mthd(dev, 0xa097, 0x0f98, 0x00400008);
-	nv_mthd(dev, 0xa097, 0x1284, 0x08000080);
-	nv_mthd(dev, 0xa097, 0x1450, 0x00400008);
-	nv_mthd(dev, 0xa097, 0x1454, 0x08000080);
-	nv_mthd(dev, 0xa097, 0x0214, 0x00000000);
-}
-
-static void
-nve0_grctx_generate_902d(struct drm_device *dev)
-{
-	nv_mthd(dev, 0x902d, 0x0200, 0x000000cf);
-	nv_mthd(dev, 0x902d, 0x0204, 0x00000001);
-	nv_mthd(dev, 0x902d, 0x0208, 0x00000020);
-	nv_mthd(dev, 0x902d, 0x020c, 0x00000001);
-	nv_mthd(dev, 0x902d, 0x0210, 0x00000000);
-	nv_mthd(dev, 0x902d, 0x0214, 0x00000080);
-	nv_mthd(dev, 0x902d, 0x0218, 0x00000100);
-	nv_mthd(dev, 0x902d, 0x021c, 0x00000100);
-	nv_mthd(dev, 0x902d, 0x0220, 0x00000000);
-	nv_mthd(dev, 0x902d, 0x0224, 0x00000000);
-	nv_mthd(dev, 0x902d, 0x0230, 0x000000cf);
-	nv_mthd(dev, 0x902d, 0x0234, 0x00000001);
-	nv_mthd(dev, 0x902d, 0x0238, 0x00000020);
-	nv_mthd(dev, 0x902d, 0x023c, 0x00000001);
-	nv_mthd(dev, 0x902d, 0x0244, 0x00000080);
-	nv_mthd(dev, 0x902d, 0x0248, 0x00000100);
-	nv_mthd(dev, 0x902d, 0x024c, 0x00000100);
-	nv_mthd(dev, 0x902d, 0x3410, 0x00000000);
-}
-
-static void
-nve0_graph_generate_unk40xx(struct drm_device *dev)
-{
-	nv_wr32(dev, 0x404010, 0x0);
-	nv_wr32(dev, 0x404014, 0x0);
-	nv_wr32(dev, 0x404018, 0x0);
-	nv_wr32(dev, 0x40401c, 0x0);
-	nv_wr32(dev, 0x404020, 0x0);
-	nv_wr32(dev, 0x404024, 0xe000);
-	nv_wr32(dev, 0x404028, 0x0);
-	nv_wr32(dev, 0x4040a8, 0x0);
-	nv_wr32(dev, 0x4040ac, 0x0);
-	nv_wr32(dev, 0x4040b0, 0x0);
-	nv_wr32(dev, 0x4040b4, 0x0);
-	nv_wr32(dev, 0x4040b8, 0x0);
-	nv_wr32(dev, 0x4040bc, 0x0);
-	nv_wr32(dev, 0x4040c0, 0x0);
-	nv_wr32(dev, 0x4040c4, 0x0);
-	nv_wr32(dev, 0x4040c8, 0xf800008f);
-	nv_wr32(dev, 0x4040d0, 0x0);
-	nv_wr32(dev, 0x4040d4, 0x0);
-	nv_wr32(dev, 0x4040d8, 0x0);
-	nv_wr32(dev, 0x4040dc, 0x0);
-	nv_wr32(dev, 0x4040e0, 0x0);
-	nv_wr32(dev, 0x4040e4, 0x0);
-	nv_wr32(dev, 0x4040e8, 0x1000);
-	nv_wr32(dev, 0x4040f8, 0x0);
-	nv_wr32(dev, 0x404130, 0x0);
-	nv_wr32(dev, 0x404134, 0x0);
-	nv_wr32(dev, 0x404138, 0x20000040);
-	nv_wr32(dev, 0x404150, 0x2e);
-	nv_wr32(dev, 0x404154, 0x400);
-	nv_wr32(dev, 0x404158, 0x200);
-	nv_wr32(dev, 0x404164, 0x55);
-	nv_wr32(dev, 0x4041a0, 0x0);
-	nv_wr32(dev, 0x4041a4, 0x0);
-	nv_wr32(dev, 0x4041a8, 0x0);
-	nv_wr32(dev, 0x4041ac, 0x0);
-	nv_wr32(dev, 0x404200, 0x0);
-	nv_wr32(dev, 0x404204, 0x0);
-	nv_wr32(dev, 0x404208, 0x0);
-	nv_wr32(dev, 0x40420c, 0x0);
-}
-
-static void
-nve0_graph_generate_unk44xx(struct drm_device *dev)
-{
-	nv_wr32(dev, 0x404404, 0x0);
-	nv_wr32(dev, 0x404408, 0x0);
-	nv_wr32(dev, 0x40440c, 0x0);
-	nv_wr32(dev, 0x404410, 0x0);
-	nv_wr32(dev, 0x404414, 0x0);
-	nv_wr32(dev, 0x404418, 0x0);
-	nv_wr32(dev, 0x40441c, 0x0);
-	nv_wr32(dev, 0x404420, 0x0);
-	nv_wr32(dev, 0x404424, 0x0);
-	nv_wr32(dev, 0x404428, 0x0);
-	nv_wr32(dev, 0x40442c, 0x0);
-	nv_wr32(dev, 0x404430, 0x0);
-	nv_wr32(dev, 0x404434, 0x0);
-	nv_wr32(dev, 0x404438, 0x0);
-	nv_wr32(dev, 0x404460, 0x0);
-	nv_wr32(dev, 0x404464, 0x0);
-	nv_wr32(dev, 0x404468, 0xffffff);
-	nv_wr32(dev, 0x40446c, 0x0);
-	nv_wr32(dev, 0x404480, 0x1);
-	nv_wr32(dev, 0x404498, 0x1);
-}
-
-static void
-nve0_graph_generate_unk46xx(struct drm_device *dev)
-{
-	nv_wr32(dev, 0x404604, 0x14);
-	nv_wr32(dev, 0x404608, 0x0);
-	nv_wr32(dev, 0x40460c, 0x3fff);
-	nv_wr32(dev, 0x404610, 0x100);
-	nv_wr32(dev, 0x404618, 0x0);
-	nv_wr32(dev, 0x40461c, 0x0);
-	nv_wr32(dev, 0x404620, 0x0);
-	nv_wr32(dev, 0x404624, 0x0);
-	nv_wr32(dev, 0x40462c, 0x0);
-	nv_wr32(dev, 0x404630, 0x0);
-	nv_wr32(dev, 0x404640, 0x0);
-	nv_wr32(dev, 0x404654, 0x0);
-	nv_wr32(dev, 0x404660, 0x0);
-	nv_wr32(dev, 0x404678, 0x0);
-	nv_wr32(dev, 0x40467c, 0x2);
-	nv_wr32(dev, 0x404680, 0x0);
-	nv_wr32(dev, 0x404684, 0x0);
-	nv_wr32(dev, 0x404688, 0x0);
-	nv_wr32(dev, 0x40468c, 0x0);
-	nv_wr32(dev, 0x404690, 0x0);
-	nv_wr32(dev, 0x404694, 0x0);
-	nv_wr32(dev, 0x404698, 0x0);
-	nv_wr32(dev, 0x40469c, 0x0);
-	nv_wr32(dev, 0x4046a0, 0x7f0080);
-	nv_wr32(dev, 0x4046a4, 0x0);
-	nv_wr32(dev, 0x4046a8, 0x0);
-	nv_wr32(dev, 0x4046ac, 0x0);
-	nv_wr32(dev, 0x4046b0, 0x0);
-	nv_wr32(dev, 0x4046b4, 0x0);
-	nv_wr32(dev, 0x4046b8, 0x0);
-	nv_wr32(dev, 0x4046bc, 0x0);
-	nv_wr32(dev, 0x4046c0, 0x0);
-	nv_wr32(dev, 0x4046c8, 0x0);
-	nv_wr32(dev, 0x4046cc, 0x0);
-	nv_wr32(dev, 0x4046d0, 0x0);
-}
-
-static void
-nve0_graph_generate_unk47xx(struct drm_device *dev)
-{
-	nv_wr32(dev, 0x404700, 0x0);
-	nv_wr32(dev, 0x404704, 0x0);
-	nv_wr32(dev, 0x404708, 0x0);
-	nv_wr32(dev, 0x404718, 0x0);
-	nv_wr32(dev, 0x40471c, 0x0);
-	nv_wr32(dev, 0x404720, 0x0);
-	nv_wr32(dev, 0x404724, 0x0);
-	nv_wr32(dev, 0x404728, 0x0);
-	nv_wr32(dev, 0x40472c, 0x0);
-	nv_wr32(dev, 0x404730, 0x0);
-	nv_wr32(dev, 0x404734, 0x100);
-	nv_wr32(dev, 0x404738, 0x0);
-	nv_wr32(dev, 0x40473c, 0x0);
-	nv_wr32(dev, 0x404744, 0x0);
-	nv_wr32(dev, 0x404748, 0x0);
-	nv_wr32(dev, 0x404754, 0x0);
-}
-
-static void
-nve0_graph_generate_unk58xx(struct drm_device *dev)
-{
-	nv_wr32(dev, 0x405800, 0xf8000bf);
-	nv_wr32(dev, 0x405830, 0x2180648);
-	nv_wr32(dev, 0x405834, 0x8000000);
-	nv_wr32(dev, 0x405838, 0x0);
-	nv_wr32(dev, 0x405854, 0x0);
-	nv_wr32(dev, 0x405870, 0x1);
-	nv_wr32(dev, 0x405874, 0x1);
-	nv_wr32(dev, 0x405878, 0x1);
-	nv_wr32(dev, 0x40587c, 0x1);
-	nv_wr32(dev, 0x405a00, 0x0);
-	nv_wr32(dev, 0x405a04, 0x0);
-	nv_wr32(dev, 0x405a18, 0x0);
-	nv_wr32(dev, 0x405b00, 0x0);
-	nv_wr32(dev, 0x405b10, 0x1000);
-}
-
-static void
-nve0_graph_generate_unk60xx(struct drm_device *dev)
-{
-	nv_wr32(dev, 0x406020, 0x4103c1);
-	nv_wr32(dev, 0x406028, 0x1);
-	nv_wr32(dev, 0x40602c, 0x1);
-	nv_wr32(dev, 0x406030, 0x1);
-	nv_wr32(dev, 0x406034, 0x1);
-}
-
-static void
-nve0_graph_generate_unk64xx(struct drm_device *dev)
-{
-	nv_wr32(dev, 0x4064a8, 0x0);
-	nv_wr32(dev, 0x4064ac, 0x3fff);
-	nv_wr32(dev, 0x4064b4, 0x0);
-	nv_wr32(dev, 0x4064b8, 0x0);
-	nv_wr32(dev, 0x4064c0, 0x801a00f0);
-	nv_wr32(dev, 0x4064c4, 0x192ffff);
-	nv_wr32(dev, 0x4064c8, 0x1800600);
-	nv_wr32(dev, 0x4064cc, 0x0);
-	nv_wr32(dev, 0x4064d0, 0x0);
-	nv_wr32(dev, 0x4064d4, 0x0);
-	nv_wr32(dev, 0x4064d8, 0x0);
-	nv_wr32(dev, 0x4064dc, 0x0);
-	nv_wr32(dev, 0x4064e0, 0x0);
-	nv_wr32(dev, 0x4064e4, 0x0);
-	nv_wr32(dev, 0x4064e8, 0x0);
-	nv_wr32(dev, 0x4064ec, 0x0);
-	nv_wr32(dev, 0x4064fc, 0x22a);
-}
-
-static void
-nve0_graph_generate_unk70xx(struct drm_device *dev)
-{
-	nv_wr32(dev, 0x407040, 0x0);
-}
-
-static void
-nve0_graph_generate_unk78xx(struct drm_device *dev)
-{
-	nv_wr32(dev, 0x407804, 0x23);
-	nv_wr32(dev, 0x40780c, 0xa418820);
-	nv_wr32(dev, 0x407810, 0x62080e6);
-	nv_wr32(dev, 0x407814, 0x20398a4);
-	nv_wr32(dev, 0x407818, 0xe629062);
-	nv_wr32(dev, 0x40781c, 0xa418820);
-	nv_wr32(dev, 0x407820, 0xe6);
-	nv_wr32(dev, 0x4078bc, 0x103);
-}
-
-static void
-nve0_graph_generate_unk80xx(struct drm_device *dev)
-{
-	nv_wr32(dev, 0x408000, 0x0);
-	nv_wr32(dev, 0x408004, 0x0);
-	nv_wr32(dev, 0x408008, 0x30);
-	nv_wr32(dev, 0x40800c, 0x0);
-	nv_wr32(dev, 0x408010, 0x0);
-	nv_wr32(dev, 0x408014, 0x69);
-	nv_wr32(dev, 0x408018, 0xe100e100);
-	nv_wr32(dev, 0x408064, 0x0);
-}
-
-static void
-nve0_graph_generate_unk88xx(struct drm_device *dev)
-{
-	nv_wr32(dev, 0x408800, 0x2802a3c);
-	nv_wr32(dev, 0x408804, 0x40);
-	nv_wr32(dev, 0x408808, 0x1043e005);
-	nv_wr32(dev, 0x408840, 0xb);
-	nv_wr32(dev, 0x408900, 0x3080b801);
-	nv_wr32(dev, 0x408904, 0x62000001);
-	nv_wr32(dev, 0x408908, 0xc8102f);
-	nv_wr32(dev, 0x408980, 0x11d);
-}
-
-static void
-nve0_graph_generate_gpc(struct drm_device *dev)
-{
-	nv_wr32(dev, 0x418380, 0x16);
-	nv_wr32(dev, 0x418400, 0x38004e00);
-	nv_wr32(dev, 0x418404, 0x71e0ffff);
-	nv_wr32(dev, 0x41840c, 0x1008);
-	nv_wr32(dev, 0x418410, 0xfff0fff);
-	nv_wr32(dev, 0x418414, 0x2200fff);
-	nv_wr32(dev, 0x418450, 0x0);
-	nv_wr32(dev, 0x418454, 0x0);
-	nv_wr32(dev, 0x418458, 0x0);
-	nv_wr32(dev, 0x41845c, 0x0);
-	nv_wr32(dev, 0x418460, 0x0);
-	nv_wr32(dev, 0x418464, 0x0);
-	nv_wr32(dev, 0x418468, 0x1);
-	nv_wr32(dev, 0x41846c, 0x0);
-	nv_wr32(dev, 0x418470, 0x0);
-	nv_wr32(dev, 0x418600, 0x1f);
-	nv_wr32(dev, 0x418684, 0xf);
-	nv_wr32(dev, 0x418700, 0x2);
-	nv_wr32(dev, 0x418704, 0x80);
-	nv_wr32(dev, 0x418708, 0x0);
-	nv_wr32(dev, 0x41870c, 0x0);
-	nv_wr32(dev, 0x418710, 0x0);
-	nv_wr32(dev, 0x418800, 0x7006860a);
-	nv_wr32(dev, 0x418808, 0x0);
-	nv_wr32(dev, 0x41880c, 0x0);
-	nv_wr32(dev, 0x418810, 0x0);
-	nv_wr32(dev, 0x418828, 0x44);
-	nv_wr32(dev, 0x418830, 0x10000001);
-	nv_wr32(dev, 0x4188d8, 0x8);
-	nv_wr32(dev, 0x4188e0, 0x1000000);
-	nv_wr32(dev, 0x4188e8, 0x0);
-	nv_wr32(dev, 0x4188ec, 0x0);
-	nv_wr32(dev, 0x4188f0, 0x0);
-	nv_wr32(dev, 0x4188f4, 0x0);
-	nv_wr32(dev, 0x4188f8, 0x0);
-	nv_wr32(dev, 0x4188fc, 0x20100018);
-	nv_wr32(dev, 0x41891c, 0xff00ff);
-	nv_wr32(dev, 0x418924, 0x0);
-	nv_wr32(dev, 0x418928, 0xffff00);
-	nv_wr32(dev, 0x41892c, 0xff00);
-	nv_wr32(dev, 0x418a00, 0x0);
-	nv_wr32(dev, 0x418a04, 0x0);
-	nv_wr32(dev, 0x418a08, 0x0);
-	nv_wr32(dev, 0x418a0c, 0x10000);
-	nv_wr32(dev, 0x418a10, 0x0);
-	nv_wr32(dev, 0x418a14, 0x0);
-	nv_wr32(dev, 0x418a18, 0x0);
-	nv_wr32(dev, 0x418a20, 0x0);
-	nv_wr32(dev, 0x418a24, 0x0);
-	nv_wr32(dev, 0x418a28, 0x0);
-	nv_wr32(dev, 0x418a2c, 0x10000);
-	nv_wr32(dev, 0x418a30, 0x0);
-	nv_wr32(dev, 0x418a34, 0x0);
-	nv_wr32(dev, 0x418a38, 0x0);
-	nv_wr32(dev, 0x418a40, 0x0);
-	nv_wr32(dev, 0x418a44, 0x0);
-	nv_wr32(dev, 0x418a48, 0x0);
-	nv_wr32(dev, 0x418a4c, 0x10000);
-	nv_wr32(dev, 0x418a50, 0x0);
-	nv_wr32(dev, 0x418a54, 0x0);
-	nv_wr32(dev, 0x418a58, 0x0);
-	nv_wr32(dev, 0x418a60, 0x0);
-	nv_wr32(dev, 0x418a64, 0x0);
-	nv_wr32(dev, 0x418a68, 0x0);
-	nv_wr32(dev, 0x418a6c, 0x10000);
-	nv_wr32(dev, 0x418a70, 0x0);
-	nv_wr32(dev, 0x418a74, 0x0);
-	nv_wr32(dev, 0x418a78, 0x0);
-	nv_wr32(dev, 0x418a80, 0x0);
-	nv_wr32(dev, 0x418a84, 0x0);
-	nv_wr32(dev, 0x418a88, 0x0);
-	nv_wr32(dev, 0x418a8c, 0x10000);
-	nv_wr32(dev, 0x418a90, 0x0);
-	nv_wr32(dev, 0x418a94, 0x0);
-	nv_wr32(dev, 0x418a98, 0x0);
-	nv_wr32(dev, 0x418aa0, 0x0);
-	nv_wr32(dev, 0x418aa4, 0x0);
-	nv_wr32(dev, 0x418aa8, 0x0);
-	nv_wr32(dev, 0x418aac, 0x10000);
-	nv_wr32(dev, 0x418ab0, 0x0);
-	nv_wr32(dev, 0x418ab4, 0x0);
-	nv_wr32(dev, 0x418ab8, 0x0);
-	nv_wr32(dev, 0x418ac0, 0x0);
-	nv_wr32(dev, 0x418ac4, 0x0);
-	nv_wr32(dev, 0x418ac8, 0x0);
-	nv_wr32(dev, 0x418acc, 0x10000);
-	nv_wr32(dev, 0x418ad0, 0x0);
-	nv_wr32(dev, 0x418ad4, 0x0);
-	nv_wr32(dev, 0x418ad8, 0x0);
-	nv_wr32(dev, 0x418ae0, 0x0);
-	nv_wr32(dev, 0x418ae4, 0x0);
-	nv_wr32(dev, 0x418ae8, 0x0);
-	nv_wr32(dev, 0x418aec, 0x10000);
-	nv_wr32(dev, 0x418af0, 0x0);
-	nv_wr32(dev, 0x418af4, 0x0);
-	nv_wr32(dev, 0x418af8, 0x0);
-	nv_wr32(dev, 0x418b00, 0x6);
-	nv_wr32(dev, 0x418b08, 0xa418820);
-	nv_wr32(dev, 0x418b0c, 0x62080e6);
-	nv_wr32(dev, 0x418b10, 0x20398a4);
-	nv_wr32(dev, 0x418b14, 0xe629062);
-	nv_wr32(dev, 0x418b18, 0xa418820);
-	nv_wr32(dev, 0x418b1c, 0xe6);
-	nv_wr32(dev, 0x418bb8, 0x103);
-	nv_wr32(dev, 0x418c08, 0x1);
-	nv_wr32(dev, 0x418c10, 0x0);
-	nv_wr32(dev, 0x418c14, 0x0);
-	nv_wr32(dev, 0x418c18, 0x0);
-	nv_wr32(dev, 0x418c1c, 0x0);
-	nv_wr32(dev, 0x418c20, 0x0);
-	nv_wr32(dev, 0x418c24, 0x0);
-	nv_wr32(dev, 0x418c28, 0x0);
-	nv_wr32(dev, 0x418c2c, 0x0);
-	nv_wr32(dev, 0x418c40, 0xffffffff);
-	nv_wr32(dev, 0x418c6c, 0x1);
-	nv_wr32(dev, 0x418c80, 0x20200004);
-	nv_wr32(dev, 0x418c8c, 0x1);
-	nv_wr32(dev, 0x419000, 0x780);
-	nv_wr32(dev, 0x419004, 0x0);
-	nv_wr32(dev, 0x419008, 0x0);
-	nv_wr32(dev, 0x419014, 0x4);
-}
-
-static void
-nve0_graph_generate_tpc(struct drm_device *dev)
-{
-	nv_wr32(dev, 0x419848, 0x0);
-	nv_wr32(dev, 0x419864, 0x129);
-	nv_wr32(dev, 0x419888, 0x0);
-	nv_wr32(dev, 0x419a00, 0xf0);
-	nv_wr32(dev, 0x419a04, 0x1);
-	nv_wr32(dev, 0x419a08, 0x21);
-	nv_wr32(dev, 0x419a0c, 0x20000);
-	nv_wr32(dev, 0x419a10, 0x0);
-	nv_wr32(dev, 0x419a14, 0x200);
-	nv_wr32(dev, 0x419a1c, 0xc000);
-	nv_wr32(dev, 0x419a20, 0x800);
-	nv_wr32(dev, 0x419a30, 0x1);
-	nv_wr32(dev, 0x419ac4, 0x37f440);
-	nv_wr32(dev, 0x419c00, 0xa);
-	nv_wr32(dev, 0x419c04, 0x80000006);
-	nv_wr32(dev, 0x419c08, 0x2);
-	nv_wr32(dev, 0x419c20, 0x0);
-	nv_wr32(dev, 0x419c24, 0x84210);
-	nv_wr32(dev, 0x419c28, 0x3efbefbe);
-	nv_wr32(dev, 0x419ce8, 0x0);
-	nv_wr32(dev, 0x419cf4, 0x3203);
-	nv_wr32(dev, 0x419e04, 0x0);
-	nv_wr32(dev, 0x419e08, 0x0);
-	nv_wr32(dev, 0x419e0c, 0x0);
-	nv_wr32(dev, 0x419e10, 0x402);
-	nv_wr32(dev, 0x419e44, 0x13eff2);
-	nv_wr32(dev, 0x419e48, 0x0);
-	nv_wr32(dev, 0x419e4c, 0x7f);
-	nv_wr32(dev, 0x419e50, 0x0);
-	nv_wr32(dev, 0x419e54, 0x0);
-	nv_wr32(dev, 0x419e58, 0x0);
-	nv_wr32(dev, 0x419e5c, 0x0);
-	nv_wr32(dev, 0x419e60, 0x0);
-	nv_wr32(dev, 0x419e64, 0x0);
-	nv_wr32(dev, 0x419e68, 0x0);
-	nv_wr32(dev, 0x419e6c, 0x0);
-	nv_wr32(dev, 0x419e70, 0x0);
-	nv_wr32(dev, 0x419e74, 0x0);
-	nv_wr32(dev, 0x419e78, 0x0);
-	nv_wr32(dev, 0x419e7c, 0x0);
-	nv_wr32(dev, 0x419e80, 0x0);
-	nv_wr32(dev, 0x419e84, 0x0);
-	nv_wr32(dev, 0x419e88, 0x0);
-	nv_wr32(dev, 0x419e8c, 0x0);
-	nv_wr32(dev, 0x419e90, 0x0);
-	nv_wr32(dev, 0x419e94, 0x0);
-	nv_wr32(dev, 0x419e98, 0x0);
-	nv_wr32(dev, 0x419eac, 0x1fcf);
-	nv_wr32(dev, 0x419eb0, 0xd3f);
-	nv_wr32(dev, 0x419ec8, 0x1304f);
-	nv_wr32(dev, 0x419f30, 0x0);
-	nv_wr32(dev, 0x419f34, 0x0);
-	nv_wr32(dev, 0x419f38, 0x0);
-	nv_wr32(dev, 0x419f3c, 0x0);
-	nv_wr32(dev, 0x419f40, 0x0);
-	nv_wr32(dev, 0x419f44, 0x0);
-	nv_wr32(dev, 0x419f48, 0x0);
-	nv_wr32(dev, 0x419f4c, 0x0);
-	nv_wr32(dev, 0x419f58, 0x0);
-	nv_wr32(dev, 0x419f78, 0xb);
-}
-
-static void
-nve0_graph_generate_tpcunk(struct drm_device *dev)
-{
-	nv_wr32(dev, 0x41be24, 0x6);
-	nv_wr32(dev, 0x41bec0, 0x12180000);
-	nv_wr32(dev, 0x41bec4, 0x37f7f);
-	nv_wr32(dev, 0x41bee4, 0x6480430);
-	nv_wr32(dev, 0x41bf00, 0xa418820);
-	nv_wr32(dev, 0x41bf04, 0x62080e6);
-	nv_wr32(dev, 0x41bf08, 0x20398a4);
-	nv_wr32(dev, 0x41bf0c, 0xe629062);
-	nv_wr32(dev, 0x41bf10, 0xa418820);
-	nv_wr32(dev, 0x41bf14, 0xe6);
-	nv_wr32(dev, 0x41bfd0, 0x900103);
-	nv_wr32(dev, 0x41bfe0, 0x400001);
-	nv_wr32(dev, 0x41bfe4, 0x0);
-}
-
-int
-nve0_grctx_generate(struct nouveau_channel *chan)
-{
-	struct nve0_graph_priv *priv = nv_engine(chan->dev, NVOBJ_ENGINE_GR);
-	struct nve0_graph_chan *grch = chan->engctx[NVOBJ_ENGINE_GR];
-	struct drm_device *dev = chan->dev;
-	u32 data[6] = {}, data2[2] = {}, tmp;
-	u32 tpc_set = 0, tpc_mask = 0;
-	u8 tpcnr[GPC_MAX], a, b;
-	u8 shift, ntpcv;
-	int i, gpc, tpc, id;
-
-	nv_mask(dev, 0x000260, 0x00000001, 0x00000000);
-	nv_wr32(dev, 0x400204, 0x00000000);
-	nv_wr32(dev, 0x400208, 0x00000000);
-
-	nve0_graph_generate_unk40xx(dev);
-	nve0_graph_generate_unk44xx(dev);
-	nve0_graph_generate_unk46xx(dev);
-	nve0_graph_generate_unk47xx(dev);
-	nve0_graph_generate_unk58xx(dev);
-	nve0_graph_generate_unk60xx(dev);
-	nve0_graph_generate_unk64xx(dev);
-	nve0_graph_generate_unk70xx(dev);
-	nve0_graph_generate_unk78xx(dev);
-	nve0_graph_generate_unk80xx(dev);
-	nve0_graph_generate_unk88xx(dev);
-	nve0_graph_generate_gpc(dev);
-	nve0_graph_generate_tpc(dev);
-	nve0_graph_generate_tpcunk(dev);
-
-	nv_wr32(dev, 0x404154, 0x0);
-
-	for (i = 0; i < grch->mmio_nr * 8; i += 8) {
-		u32 reg = nv_ro32(grch->mmio, i + 0);
-		u32 val = nv_ro32(grch->mmio, i + 4);
-		nv_wr32(dev, reg, val);
-	}
-
-	nv_wr32(dev, 0x418c6c, 0x1);
-	nv_wr32(dev, 0x41980c, 0x10);
-	nv_wr32(dev, 0x41be08, 0x4);
-	nv_wr32(dev, 0x4064c0, 0x801a00f0);
-	nv_wr32(dev, 0x405800, 0xf8000bf);
-	nv_wr32(dev, 0x419c00, 0xa);
-
-	for (tpc = 0, id = 0; tpc < 4; tpc++) {
-		for (gpc = 0; gpc < priv->gpc_nr; gpc++) {
-			if (tpc < priv->tpc_nr[gpc]) {
-				nv_wr32(dev, TPC_UNIT(gpc, tpc, 0x0698), id);
-				nv_wr32(dev, TPC_UNIT(gpc, tpc, 0x04e8), id);
-				nv_wr32(dev, GPC_UNIT(gpc, 0x0c10 + tpc * 4), id);
-				nv_wr32(dev, TPC_UNIT(gpc, tpc, 0x0088), id++);
-			}
-
-			nv_wr32(dev, GPC_UNIT(gpc, 0x0c08), priv->tpc_nr[gpc]);
-			nv_wr32(dev, GPC_UNIT(gpc, 0x0c8c), priv->tpc_nr[gpc]);
-		}
-	}
-
-	tmp = 0;
-	for (i = 0; i < priv->gpc_nr; i++)
-		tmp |= priv->tpc_nr[i] << (i * 4);
-	nv_wr32(dev, 0x406028, tmp);
-	nv_wr32(dev, 0x405870, tmp);
-
-	nv_wr32(dev, 0x40602c, 0x0);
-	nv_wr32(dev, 0x405874, 0x0);
-	nv_wr32(dev, 0x406030, 0x0);
-	nv_wr32(dev, 0x405878, 0x0);
-	nv_wr32(dev, 0x406034, 0x0);
-	nv_wr32(dev, 0x40587c, 0x0);
-
-	/* calculate first set of magics */
-	memcpy(tpcnr, priv->tpc_nr, sizeof(priv->tpc_nr));
-
-	gpc = -1;
-	for (tpc = 0; tpc < priv->tpc_total; tpc++) {
-		do {
-			gpc = (gpc + 1) % priv->gpc_nr;
-		} while (!tpcnr[gpc]);
-		tpcnr[gpc]--;
-
-		data[tpc / 6] |= gpc << ((tpc % 6) * 5);
-	}
-
-	for (; tpc < 32; tpc++)
-		data[tpc / 6] |= 7 << ((tpc % 6) * 5);
-
-	/* and the second... */
-	shift = 0;
-	ntpcv = priv->tpc_total;
-	while (!(ntpcv & (1 << 4))) {
-		ntpcv <<= 1;
-		shift++;
-	}
-
-	data2[0]  = ntpcv << 16;
-	data2[0] |= shift << 21;
-	data2[0] |= (((1 << (0 + 5)) % ntpcv) << 24);
-	data2[0] |= priv->tpc_total << 8;
-	data2[0] |= priv->magic_not_rop_nr;
-	for (i = 1; i < 7; i++)
-		data2[1] |= ((1 << (i + 5)) % ntpcv) << ((i - 1) * 5);
-
-	/* and write it all the various parts of PGRAPH */
-	nv_wr32(dev, 0x418bb8, (priv->tpc_total << 8) | priv->magic_not_rop_nr);
-	for (i = 0; i < 6; i++)
-		nv_wr32(dev, 0x418b08 + (i * 4), data[i]);
-
-	nv_wr32(dev, 0x41bfd0, data2[0]);
-	nv_wr32(dev, 0x41bfe4, data2[1]);
-	for (i = 0; i < 6; i++)
-		nv_wr32(dev, 0x41bf00 + (i * 4), data[i]);
-
-	nv_wr32(dev, 0x4078bc, (priv->tpc_total << 8) | priv->magic_not_rop_nr);
-	for (i = 0; i < 6; i++)
-		nv_wr32(dev, 0x40780c + (i * 4), data[i]);
-
-
-	memcpy(tpcnr, priv->tpc_nr, sizeof(priv->tpc_nr));
-	for (gpc = 0; gpc < priv->gpc_nr; gpc++)
-		tpc_mask |= ((1 << priv->tpc_nr[gpc]) - 1) << (gpc * 8);
-
-	for (i = 0, gpc = -1, b = -1; i < 32; i++) {
-		a = (i * (priv->tpc_total - 1)) / 32;
-		if (a != b) {
-			b = a;
-			do {
-				gpc = (gpc + 1) % priv->gpc_nr;
-			} while (!tpcnr[gpc]);
-			tpc = priv->tpc_nr[gpc] - tpcnr[gpc]--;
-
-			tpc_set |= 1 << ((gpc * 8) + tpc);
-		}
-
-		nv_wr32(dev, 0x406800 + (i * 0x20), tpc_set);
-		nv_wr32(dev, 0x406c00 + (i * 0x20), tpc_set ^ tpc_mask);
-	}
-
-	for (i = 0; i < 8; i++)
-		nv_wr32(dev, 0x4064d0 + (i * 0x04), 0x00000000);
-
-	nv_wr32(dev, 0x405b00, 0x201);
-	nv_wr32(dev, 0x408850, 0x2);
-	nv_wr32(dev, 0x408958, 0x2);
-	nv_wr32(dev, 0x419f78, 0xa);
-
-	nve0_grctx_generate_icmd(dev);
-	nve0_grctx_generate_a097(dev);
-	nve0_grctx_generate_902d(dev);
-
-	nv_mask(dev, 0x000260, 0x00000001, 0x00000001);
-	nv_wr32(dev, 0x418800, 0x7026860a); //XXX
-	nv_wr32(dev, 0x41be10, 0x00bb8bc7); //XXX
-	return 0;
-}
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
index 2817101fb167..96184d02c8d9 100644
--- a/drivers/gpu/drm/radeon/atombios_crtc.c
+++ b/drivers/gpu/drm/radeon/atombios_crtc.c
@@ -83,25 +83,19 @@ static void atombios_scaler_setup(struct drm_crtc *crtc)
 	struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
 	ENABLE_SCALER_PS_ALLOCATION args;
 	int index = GetIndexIntoMasterTable(COMMAND, EnableScaler);
-
+	struct radeon_encoder *radeon_encoder =
+		to_radeon_encoder(radeon_crtc->encoder);
 	/* fixme - fill in enc_priv for atom dac */
 	enum radeon_tv_std tv_std = TV_STD_NTSC;
 	bool is_tv = false, is_cv = false;
-	struct drm_encoder *encoder;
 
 	if (!ASIC_IS_AVIVO(rdev) && radeon_crtc->crtc_id)
 		return;
 
-	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
-		/* find tv std */
-		if (encoder->crtc == crtc) {
-			struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
-			if (radeon_encoder->active_device & ATOM_DEVICE_TV_SUPPORT) {
-				struct radeon_encoder_atom_dac *tv_dac = radeon_encoder->enc_priv;
-				tv_std = tv_dac->tv_std;
-				is_tv = true;
-			}
-		}
+	if (radeon_encoder->active_device & ATOM_DEVICE_TV_SUPPORT) {
+		struct radeon_encoder_atom_dac *tv_dac = radeon_encoder->enc_priv;
+		tv_std = tv_dac->tv_std;
+		is_tv = true;
 	}
 
 	memset(&args, 0, sizeof(args));
@@ -533,99 +527,87 @@ union adjust_pixel_clock {
 };
 
 static u32 atombios_adjust_pll(struct drm_crtc *crtc,
-			       struct drm_display_mode *mode,
-			       struct radeon_pll *pll,
-			       bool ss_enabled,
-			       struct radeon_atom_ss *ss)
+			       struct drm_display_mode *mode)
 {
+	struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
 	struct drm_device *dev = crtc->dev;
 	struct radeon_device *rdev = dev->dev_private;
-	struct drm_encoder *encoder = NULL;
-	struct radeon_encoder *radeon_encoder = NULL;
-	struct drm_connector *connector = NULL;
+	struct drm_encoder *encoder = radeon_crtc->encoder;
+	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+	struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
 	u32 adjusted_clock = mode->clock;
-	int encoder_mode = 0;
+	int encoder_mode = atombios_get_encoder_mode(encoder);
 	u32 dp_clock = mode->clock;
-	int bpc = 8;
-	bool is_duallink = false;
+	int bpc = radeon_get_monitor_bpc(connector);
+	bool is_duallink = radeon_dig_monitor_is_duallink(encoder, mode->clock);
 
 	/* reset the pll flags */
-	pll->flags = 0;
+	radeon_crtc->pll_flags = 0;
 
 	if (ASIC_IS_AVIVO(rdev)) {
 		if ((rdev->family == CHIP_RS600) ||
 		    (rdev->family == CHIP_RS690) ||
 		    (rdev->family == CHIP_RS740))
-			pll->flags |= (/*RADEON_PLL_USE_FRAC_FB_DIV |*/
-				       RADEON_PLL_PREFER_CLOSEST_LOWER);
+			radeon_crtc->pll_flags |= (/*RADEON_PLL_USE_FRAC_FB_DIV |*/
+				RADEON_PLL_PREFER_CLOSEST_LOWER);
 
 		if (ASIC_IS_DCE32(rdev) && mode->clock > 200000)	/* range limits??? */
-			pll->flags |= RADEON_PLL_PREFER_HIGH_FB_DIV;
+			radeon_crtc->pll_flags |= RADEON_PLL_PREFER_HIGH_FB_DIV;
 		else
-			pll->flags |= RADEON_PLL_PREFER_LOW_REF_DIV;
+			radeon_crtc->pll_flags |= RADEON_PLL_PREFER_LOW_REF_DIV;
 
 		if (rdev->family < CHIP_RV770)
-			pll->flags |= RADEON_PLL_PREFER_MINM_OVER_MAXP;
+			radeon_crtc->pll_flags |= RADEON_PLL_PREFER_MINM_OVER_MAXP;
 		/* use frac fb div on APUs */
 		if (ASIC_IS_DCE41(rdev) || ASIC_IS_DCE61(rdev))
-			pll->flags |= RADEON_PLL_USE_FRAC_FB_DIV;
+			radeon_crtc->pll_flags |= RADEON_PLL_USE_FRAC_FB_DIV;
 	} else {
-		pll->flags |= RADEON_PLL_LEGACY;
+		radeon_crtc->pll_flags |= RADEON_PLL_LEGACY;
 
 		if (mode->clock > 200000)	/* range limits??? */
-			pll->flags |= RADEON_PLL_PREFER_HIGH_FB_DIV;
+			radeon_crtc->pll_flags |= RADEON_PLL_PREFER_HIGH_FB_DIV;
 		else
-			pll->flags |= RADEON_PLL_PREFER_LOW_REF_DIV;
+			radeon_crtc->pll_flags |= RADEON_PLL_PREFER_LOW_REF_DIV;
 	}
 
-	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
-		if (encoder->crtc == crtc) {
-			radeon_encoder = to_radeon_encoder(encoder);
-			connector = radeon_get_connector_for_encoder(encoder);
-			bpc = radeon_get_monitor_bpc(connector);
-			encoder_mode = atombios_get_encoder_mode(encoder);
-			is_duallink = radeon_dig_monitor_is_duallink(encoder, mode->clock);
-			if ((radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT | ATOM_DEVICE_DFP_SUPPORT)) ||
-			    (radeon_encoder_get_dp_bridge_encoder_id(encoder) != ENCODER_OBJECT_ID_NONE)) {
-				if (connector) {
-					struct radeon_connector *radeon_connector = to_radeon_connector(connector);
-					struct radeon_connector_atom_dig *dig_connector =
-						radeon_connector->con_priv;
-
-					dp_clock = dig_connector->dp_clock;
-				}
-			}
+	if ((radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT | ATOM_DEVICE_DFP_SUPPORT)) ||
+	    (radeon_encoder_get_dp_bridge_encoder_id(encoder) != ENCODER_OBJECT_ID_NONE)) {
+		if (connector) {
+			struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+			struct radeon_connector_atom_dig *dig_connector =
+				radeon_connector->con_priv;
 
-			/* use recommended ref_div for ss */
-			if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
-				if (ss_enabled) {
-					if (ss->refdiv) {
-						pll->flags |= RADEON_PLL_USE_REF_DIV;
-						pll->reference_div = ss->refdiv;
-						if (ASIC_IS_AVIVO(rdev))
-							pll->flags |= RADEON_PLL_USE_FRAC_FB_DIV;
-					}
-				}
-			}
+			dp_clock = dig_connector->dp_clock;
+		}
+	}
 
-			if (ASIC_IS_AVIVO(rdev)) {
-				/* DVO wants 2x pixel clock if the DVO chip is in 12 bit mode */
-				if (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1)
-					adjusted_clock = mode->clock * 2;
-				if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT))
-					pll->flags |= RADEON_PLL_PREFER_CLOSEST_LOWER;
-				if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
-					pll->flags |= RADEON_PLL_IS_LCD;
-			} else {
-				if (encoder->encoder_type != DRM_MODE_ENCODER_DAC)
-					pll->flags |= RADEON_PLL_NO_ODD_POST_DIV;
-				if (encoder->encoder_type == DRM_MODE_ENCODER_LVDS)
-					pll->flags |= RADEON_PLL_USE_REF_DIV;
+	/* use recommended ref_div for ss */
+	if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
+		if (radeon_crtc->ss_enabled) {
+			if (radeon_crtc->ss.refdiv) {
+				radeon_crtc->pll_flags |= RADEON_PLL_USE_REF_DIV;
+				radeon_crtc->pll_reference_div = radeon_crtc->ss.refdiv;
+				if (ASIC_IS_AVIVO(rdev))
+					radeon_crtc->pll_flags |= RADEON_PLL_USE_FRAC_FB_DIV;
 			}
-			break;
 		}
 	}
 
+	if (ASIC_IS_AVIVO(rdev)) {
+		/* DVO wants 2x pixel clock if the DVO chip is in 12 bit mode */
+		if (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1)
+			adjusted_clock = mode->clock * 2;
+		if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT))
+			radeon_crtc->pll_flags |= RADEON_PLL_PREFER_CLOSEST_LOWER;
+		if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
+			radeon_crtc->pll_flags |= RADEON_PLL_IS_LCD;
+	} else {
+		if (encoder->encoder_type != DRM_MODE_ENCODER_DAC)
+			radeon_crtc->pll_flags |= RADEON_PLL_NO_ODD_POST_DIV;
+		if (encoder->encoder_type == DRM_MODE_ENCODER_LVDS)
+			radeon_crtc->pll_flags |= RADEON_PLL_USE_REF_DIV;
+	}
+
 	/* DCE3+ has an AdjustDisplayPll that will adjust the pixel clock
 	 * accordingly based on the encoder/transmitter to work around
 	 * special hw requirements.
@@ -650,7 +632,7 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
 				args.v1.usPixelClock = cpu_to_le16(mode->clock / 10);
 				args.v1.ucTransmitterID = radeon_encoder->encoder_id;
 				args.v1.ucEncodeMode = encoder_mode;
-				if (ss_enabled && ss->percentage)
+				if (radeon_crtc->ss_enabled && radeon_crtc->ss.percentage)
 					args.v1.ucConfig |=
 						ADJUST_DISPLAY_CONFIG_SS_ENABLE;
 
@@ -663,7 +645,7 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
 				args.v3.sInput.ucTransmitterID = radeon_encoder->encoder_id;
 				args.v3.sInput.ucEncodeMode = encoder_mode;
 				args.v3.sInput.ucDispPllConfig = 0;
-				if (ss_enabled && ss->percentage)
+				if (radeon_crtc->ss_enabled && radeon_crtc->ss.percentage)
 					args.v3.sInput.ucDispPllConfig |=
 						DISPPLL_CONFIG_SS_ENABLE;
 				if (ENCODER_MODE_IS_DP(encoder_mode)) {
@@ -695,14 +677,14 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
 						   index, (uint32_t *)&args);
 				adjusted_clock = le32_to_cpu(args.v3.sOutput.ulDispPllFreq) * 10;
 				if (args.v3.sOutput.ucRefDiv) {
-					pll->flags |= RADEON_PLL_USE_FRAC_FB_DIV;
-					pll->flags |= RADEON_PLL_USE_REF_DIV;
-					pll->reference_div = args.v3.sOutput.ucRefDiv;
+					radeon_crtc->pll_flags |= RADEON_PLL_USE_FRAC_FB_DIV;
+					radeon_crtc->pll_flags |= RADEON_PLL_USE_REF_DIV;
+					radeon_crtc->pll_reference_div = args.v3.sOutput.ucRefDiv;
 				}
 				if (args.v3.sOutput.ucPostDiv) {
-					pll->flags |= RADEON_PLL_USE_FRAC_FB_DIV;
-					pll->flags |= RADEON_PLL_USE_POST_DIV;
-					pll->post_div = args.v3.sOutput.ucPostDiv;
+					radeon_crtc->pll_flags |= RADEON_PLL_USE_FRAC_FB_DIV;
+					radeon_crtc->pll_flags |= RADEON_PLL_USE_POST_DIV;
+					radeon_crtc->pll_post_div = args.v3.sOutput.ucPostDiv;
 				}
 				break;
 			default:
@@ -837,7 +819,10 @@ static void atombios_crtc_program_pll(struct drm_crtc *crtc,
 			args.v3.ucFracFbDiv = frac_fb_div;
 			args.v3.ucPostDiv = post_div;
 			args.v3.ucPpll = pll_id;
-			args.v3.ucMiscInfo = (pll_id << 2);
+			if (crtc_id == ATOM_CRTC2)
+				args.v3.ucMiscInfo = PIXEL_CLOCK_MISC_CRTC_SEL_CRTC2;
+			else
+				args.v3.ucMiscInfo = PIXEL_CLOCK_MISC_CRTC_SEL_CRTC1;
 			if (ss_enabled && (ss->type & ATOM_EXTERNAL_SS_MASK))
 				args.v3.ucMiscInfo |= PIXEL_CLOCK_MISC_REF_DIV_SRC;
 			args.v3.ucTransmitterId = encoder_id;
@@ -907,58 +892,29 @@ static void atombios_crtc_program_pll(struct drm_crtc *crtc,
 	atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
 }
 
-static void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode)
+static bool atombios_crtc_prepare_pll(struct drm_crtc *crtc, struct drm_display_mode *mode)
 {
 	struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
 	struct drm_device *dev = crtc->dev;
 	struct radeon_device *rdev = dev->dev_private;
-	struct drm_encoder *encoder = NULL;
-	struct radeon_encoder *radeon_encoder = NULL;
-	u32 pll_clock = mode->clock;
-	u32 ref_div = 0, fb_div = 0, frac_fb_div = 0, post_div = 0;
-	struct radeon_pll *pll;
-	u32 adjusted_clock;
-	int encoder_mode = 0;
-	struct radeon_atom_ss ss;
-	bool ss_enabled = false;
-	int bpc = 8;
+	struct radeon_encoder *radeon_encoder =
+		to_radeon_encoder(radeon_crtc->encoder);
+	int encoder_mode = atombios_get_encoder_mode(radeon_crtc->encoder);
 
-	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
-		if (encoder->crtc == crtc) {
-			radeon_encoder = to_radeon_encoder(encoder);
-			encoder_mode = atombios_get_encoder_mode(encoder);
-			break;
-		}
-	}
-
-	if (!radeon_encoder)
-		return;
-
-	switch (radeon_crtc->pll_id) {
-	case ATOM_PPLL1:
-		pll = &rdev->clock.p1pll;
-		break;
-	case ATOM_PPLL2:
-		pll = &rdev->clock.p2pll;
-		break;
-	case ATOM_DCPLL:
-	case ATOM_PPLL_INVALID:
-	default:
-		pll = &rdev->clock.dcpll;
-		break;
-	}
+	radeon_crtc->bpc = 8;
+	radeon_crtc->ss_enabled = false;
 
 	if ((radeon_encoder->active_device & (ATOM_DEVICE_LCD_SUPPORT | ATOM_DEVICE_DFP_SUPPORT)) ||
-	    (radeon_encoder_get_dp_bridge_encoder_id(encoder) != ENCODER_OBJECT_ID_NONE)) {
+	    (radeon_encoder_get_dp_bridge_encoder_id(radeon_crtc->encoder) != ENCODER_OBJECT_ID_NONE)) {
 		struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
 		struct drm_connector *connector =
-			radeon_get_connector_for_encoder(encoder);
+			radeon_get_connector_for_encoder(radeon_crtc->encoder);
 		struct radeon_connector *radeon_connector =
 			to_radeon_connector(connector);
 		struct radeon_connector_atom_dig *dig_connector =
 			radeon_connector->con_priv;
 		int dp_clock;
-		bpc = radeon_get_monitor_bpc(connector);
+		radeon_crtc->bpc = radeon_get_monitor_bpc(connector);
 
 		switch (encoder_mode) {
 		case ATOM_ENCODER_MODE_DP_MST:
@@ -966,45 +922,54 @@ static void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode
 			/* DP/eDP */
 			dp_clock = dig_connector->dp_clock / 10;
 			if (ASIC_IS_DCE4(rdev))
-				ss_enabled =
-					radeon_atombios_get_asic_ss_info(rdev, &ss,
+				radeon_crtc->ss_enabled =
+					radeon_atombios_get_asic_ss_info(rdev, &radeon_crtc->ss,
 									 ASIC_INTERNAL_SS_ON_DP,
 									 dp_clock);
 			else {
 				if (dp_clock == 16200) {
-					ss_enabled =
-						radeon_atombios_get_ppll_ss_info(rdev, &ss,
+					radeon_crtc->ss_enabled =
+						radeon_atombios_get_ppll_ss_info(rdev,
+										 &radeon_crtc->ss,
 										 ATOM_DP_SS_ID2);
-					if (!ss_enabled)
-						ss_enabled =
-							radeon_atombios_get_ppll_ss_info(rdev, &ss,
+					if (!radeon_crtc->ss_enabled)
+						radeon_crtc->ss_enabled =
+							radeon_atombios_get_ppll_ss_info(rdev,
+											 &radeon_crtc->ss,
 											 ATOM_DP_SS_ID1);
 				} else
-					ss_enabled =
-						radeon_atombios_get_ppll_ss_info(rdev, &ss,
+					radeon_crtc->ss_enabled =
+						radeon_atombios_get_ppll_ss_info(rdev,
+										 &radeon_crtc->ss,
 										 ATOM_DP_SS_ID1);
 			}
 			break;
 		case ATOM_ENCODER_MODE_LVDS:
 			if (ASIC_IS_DCE4(rdev))
-				ss_enabled = radeon_atombios_get_asic_ss_info(rdev, &ss,
-									      dig->lcd_ss_id,
-									      mode->clock / 10);
+				radeon_crtc->ss_enabled =
+					radeon_atombios_get_asic_ss_info(rdev,
+									 &radeon_crtc->ss,
+									 dig->lcd_ss_id,
+									 mode->clock / 10);
 			else
-				ss_enabled = radeon_atombios_get_ppll_ss_info(rdev, &ss,
-									      dig->lcd_ss_id);
+				radeon_crtc->ss_enabled =
+					radeon_atombios_get_ppll_ss_info(rdev,
+									 &radeon_crtc->ss,
+									 dig->lcd_ss_id);
 			break;
 		case ATOM_ENCODER_MODE_DVI:
 			if (ASIC_IS_DCE4(rdev))
-				ss_enabled =
-					radeon_atombios_get_asic_ss_info(rdev, &ss,
+				radeon_crtc->ss_enabled =
+					radeon_atombios_get_asic_ss_info(rdev,
+									 &radeon_crtc->ss,
 									 ASIC_INTERNAL_SS_ON_TMDS,
 									 mode->clock / 10);
 			break;
 		case ATOM_ENCODER_MODE_HDMI:
 			if (ASIC_IS_DCE4(rdev))
-				ss_enabled =
-					radeon_atombios_get_asic_ss_info(rdev, &ss,
+				radeon_crtc->ss_enabled =
+					radeon_atombios_get_asic_ss_info(rdev,
+									 &radeon_crtc->ss,
 									 ASIC_INTERNAL_SS_ON_HDMI,
 									 mode->clock / 10);
 			break;
@@ -1014,43 +979,80 @@ static void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode
 	}
 
 	/* adjust pixel clock as needed */
-	adjusted_clock = atombios_adjust_pll(crtc, mode, pll, ss_enabled, &ss);
+	radeon_crtc->adjusted_clock = atombios_adjust_pll(crtc, mode);
+
+	return true;
+}
+
+static void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode)
+{
+	struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+	struct drm_device *dev = crtc->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_encoder *radeon_encoder =
+		to_radeon_encoder(radeon_crtc->encoder);
+	u32 pll_clock = mode->clock;
+	u32 ref_div = 0, fb_div = 0, frac_fb_div = 0, post_div = 0;
+	struct radeon_pll *pll;
+	int encoder_mode = atombios_get_encoder_mode(radeon_crtc->encoder);
+
+	switch (radeon_crtc->pll_id) {
+	case ATOM_PPLL1:
+		pll = &rdev->clock.p1pll;
+		break;
+	case ATOM_PPLL2:
+		pll = &rdev->clock.p2pll;
+		break;
+	case ATOM_DCPLL:
+	case ATOM_PPLL_INVALID:
+	default:
+		pll = &rdev->clock.dcpll;
+		break;
+	}
+
+	/* update pll params */
+	pll->flags = radeon_crtc->pll_flags;
+	pll->reference_div = radeon_crtc->pll_reference_div;
+	pll->post_div = radeon_crtc->pll_post_div;
 
 	if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT))
 		/* TV seems to prefer the legacy algo on some boards */
-		radeon_compute_pll_legacy(pll, adjusted_clock, &pll_clock, &fb_div, &frac_fb_div,
-					  &ref_div, &post_div);
+		radeon_compute_pll_legacy(pll, radeon_crtc->adjusted_clock, &pll_clock,
+					  &fb_div, &frac_fb_div, &ref_div, &post_div);
 	else if (ASIC_IS_AVIVO(rdev))
-		radeon_compute_pll_avivo(pll, adjusted_clock, &pll_clock, &fb_div, &frac_fb_div,
-					 &ref_div, &post_div);
+		radeon_compute_pll_avivo(pll, radeon_crtc->adjusted_clock, &pll_clock,
+					 &fb_div, &frac_fb_div, &ref_div, &post_div);
 	else
-		radeon_compute_pll_legacy(pll, adjusted_clock, &pll_clock, &fb_div, &frac_fb_div,
-					  &ref_div, &post_div);
+		radeon_compute_pll_legacy(pll, radeon_crtc->adjusted_clock, &pll_clock,
+					  &fb_div, &frac_fb_div, &ref_div, &post_div);
 
-	atombios_crtc_program_ss(rdev, ATOM_DISABLE, radeon_crtc->pll_id, radeon_crtc->crtc_id, &ss);
+	atombios_crtc_program_ss(rdev, ATOM_DISABLE, radeon_crtc->pll_id,
+				 radeon_crtc->crtc_id, &radeon_crtc->ss);
 
 	atombios_crtc_program_pll(crtc, radeon_crtc->crtc_id, radeon_crtc->pll_id,
 				  encoder_mode, radeon_encoder->encoder_id, mode->clock,
-				  ref_div, fb_div, frac_fb_div, post_div, bpc, ss_enabled, &ss);
+				  ref_div, fb_div, frac_fb_div, post_div,
+				  radeon_crtc->bpc, radeon_crtc->ss_enabled, &radeon_crtc->ss);
 
-	if (ss_enabled) {
+	if (radeon_crtc->ss_enabled) {
 		/* calculate ss amount and step size */
 		if (ASIC_IS_DCE4(rdev)) {
 			u32 step_size;
-			u32 amount = (((fb_div * 10) + frac_fb_div) * ss.percentage) / 10000;
-			ss.amount = (amount / 10) & ATOM_PPLL_SS_AMOUNT_V2_FBDIV_MASK;
-			ss.amount |= ((amount - (amount / 10)) << ATOM_PPLL_SS_AMOUNT_V2_NFRAC_SHIFT) &
+			u32 amount = (((fb_div * 10) + frac_fb_div) * radeon_crtc->ss.percentage) / 10000;
+			radeon_crtc->ss.amount = (amount / 10) & ATOM_PPLL_SS_AMOUNT_V2_FBDIV_MASK;
+			radeon_crtc->ss.amount |= ((amount - (amount / 10)) << ATOM_PPLL_SS_AMOUNT_V2_NFRAC_SHIFT) &
 				ATOM_PPLL_SS_AMOUNT_V2_NFRAC_MASK;
-			if (ss.type & ATOM_PPLL_SS_TYPE_V2_CENTRE_SPREAD)
-				step_size = (4 * amount * ref_div * (ss.rate * 2048)) /
+			if (radeon_crtc->ss.type & ATOM_PPLL_SS_TYPE_V2_CENTRE_SPREAD)
+				step_size = (4 * amount * ref_div * (radeon_crtc->ss.rate * 2048)) /
 					(125 * 25 * pll->reference_freq / 100);
 			else
-				step_size = (2 * amount * ref_div * (ss.rate * 2048)) /
+				step_size = (2 * amount * ref_div * (radeon_crtc->ss.rate * 2048)) /
 					(125 * 25 * pll->reference_freq / 100);
-			ss.step = step_size;
+			radeon_crtc->ss.step = step_size;
 		}
 
-		atombios_crtc_program_ss(rdev, ATOM_ENABLE, radeon_crtc->pll_id, radeon_crtc->crtc_id, &ss);
+		atombios_crtc_program_ss(rdev, ATOM_ENABLE, radeon_crtc->pll_id,
+					 radeon_crtc->crtc_id, &radeon_crtc->ss);
 	}
 }
 
@@ -1479,85 +1481,251 @@ static void radeon_legacy_atom_fixup(struct drm_crtc *crtc)
 	}
 }
 
+/**
+ * radeon_get_pll_use_mask - look up a mask of which pplls are in use
+ *
+ * @crtc: drm crtc
+ *
+ * Returns the mask of which PPLLs (Pixel PLLs) are in use.
+ */
+static u32 radeon_get_pll_use_mask(struct drm_crtc *crtc)
+{
+	struct drm_device *dev = crtc->dev;
+	struct drm_crtc *test_crtc;
+	struct radeon_crtc *test_radeon_crtc;
+	u32 pll_in_use = 0;
+
+	list_for_each_entry(test_crtc, &dev->mode_config.crtc_list, head) {
+		if (crtc == test_crtc)
+			continue;
+
+		test_radeon_crtc = to_radeon_crtc(test_crtc);
+		if (test_radeon_crtc->pll_id != ATOM_PPLL_INVALID)
+			pll_in_use |= (1 << test_radeon_crtc->pll_id);
+	}
+	return pll_in_use;
+}
+
+/**
+ * radeon_get_shared_dp_ppll - return the PPLL used by another crtc for DP
+ *
+ * @crtc: drm crtc
+ *
+ * Returns the PPLL (Pixel PLL) used by another crtc/encoder which is
+ * also in DP mode.  For DP, a single PPLL can be used for all DP
+ * crtcs/encoders.
+ */
+static int radeon_get_shared_dp_ppll(struct drm_crtc *crtc)
+{
+	struct drm_device *dev = crtc->dev;
+	struct drm_crtc *test_crtc;
+	struct radeon_crtc *test_radeon_crtc;
+
+	list_for_each_entry(test_crtc, &dev->mode_config.crtc_list, head) {
+		if (crtc == test_crtc)
+			continue;
+		test_radeon_crtc = to_radeon_crtc(test_crtc);
+		if (test_radeon_crtc->encoder &&
+		    ENCODER_MODE_IS_DP(atombios_get_encoder_mode(test_radeon_crtc->encoder))) {
+			/* for DP use the same PLL for all */
+			if (test_radeon_crtc->pll_id != ATOM_PPLL_INVALID)
+				return test_radeon_crtc->pll_id;
+		}
+	}
+	return ATOM_PPLL_INVALID;
+}
+
+/**
+ * radeon_get_shared_nondp_ppll - return the PPLL used by another non-DP crtc
+ *
+ * @crtc: drm crtc
+ * @encoder: drm encoder
+ *
+ * Returns the PPLL (Pixel PLL) used by another non-DP crtc/encoder which can
+ * be shared (i.e., same clock).
+ */
+static int radeon_get_shared_nondp_ppll(struct drm_crtc *crtc)
+{
+	struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+	struct drm_device *dev = crtc->dev;
+	struct drm_crtc *test_crtc;
+	struct radeon_crtc *test_radeon_crtc;
+	u32 adjusted_clock, test_adjusted_clock;
+
+	adjusted_clock = radeon_crtc->adjusted_clock;
+
+	if (adjusted_clock == 0)
+		return ATOM_PPLL_INVALID;
+
+	list_for_each_entry(test_crtc, &dev->mode_config.crtc_list, head) {
+		if (crtc == test_crtc)
+			continue;
+		test_radeon_crtc = to_radeon_crtc(test_crtc);
+		if (test_radeon_crtc->encoder &&
+		    !ENCODER_MODE_IS_DP(atombios_get_encoder_mode(test_radeon_crtc->encoder))) {
+			/* check if we are already driving this connector with another crtc */
+			if (test_radeon_crtc->connector == radeon_crtc->connector) {
+				/* if we are, return that pll */
+				if (test_radeon_crtc->pll_id != ATOM_PPLL_INVALID)
+					return test_radeon_crtc->pll_id;
+			}
+			/* for non-DP check the clock */
+			test_adjusted_clock = test_radeon_crtc->adjusted_clock;
+			if ((crtc->mode.clock == test_crtc->mode.clock) &&
+			    (adjusted_clock == test_adjusted_clock) &&
+			    (radeon_crtc->ss_enabled == test_radeon_crtc->ss_enabled) &&
+			    (test_radeon_crtc->pll_id != ATOM_PPLL_INVALID))
+				return test_radeon_crtc->pll_id;
+		}
+	}
+	return ATOM_PPLL_INVALID;
+}
+
+/**
+ * radeon_atom_pick_pll - Allocate a PPLL for use by the crtc.
+ *
+ * @crtc: drm crtc
+ *
+ * Returns the PPLL (Pixel PLL) to be used by the crtc.  For DP monitors
+ * a single PPLL can be used for all DP crtcs/encoders.  For non-DP
+ * monitors a dedicated PPLL must be used.  If a particular board has
+ * an external DP PLL, return ATOM_PPLL_INVALID to skip PLL programming
+ * as there is no need to program the PLL itself.  If we are not able to
+ * allocate a PLL, return ATOM_PPLL_INVALID to skip PLL programming to
+ * avoid messing up an existing monitor.
+ *
+ * Asic specific PLL information
+ *
+ * DCE 6.1
+ * - PPLL2 is only available to UNIPHYA (both DP and non-DP)
+ * - PPLL0, PPLL1 are available for UNIPHYB/C/D/E/F (both DP and non-DP)
+ *
+ * DCE 6.0
+ * - PPLL0 is available to all UNIPHY (DP only)
+ * - PPLL1, PPLL2 are available for all UNIPHY (both DP and non-DP) and DAC
+ *
+ * DCE 5.0
+ * - DCPLL is available to all UNIPHY (DP only)
+ * - PPLL1, PPLL2 are available for all UNIPHY (both DP and non-DP) and DAC
+ *
+ * DCE 3.0/4.0/4.1
+ * - PPLL1, PPLL2 are available for all UNIPHY (both DP and non-DP) and DAC
+ *
+ */
 static int radeon_atom_pick_pll(struct drm_crtc *crtc)
 {
 	struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
 	struct drm_device *dev = crtc->dev;
 	struct radeon_device *rdev = dev->dev_private;
-	struct drm_encoder *test_encoder;
-	struct drm_crtc *test_crtc;
-	uint32_t pll_in_use = 0;
+	struct radeon_encoder *radeon_encoder =
+		to_radeon_encoder(radeon_crtc->encoder);
+	u32 pll_in_use;
+	int pll;
 
 	if (ASIC_IS_DCE61(rdev)) {
-		list_for_each_entry(test_encoder, &dev->mode_config.encoder_list, head) {
-			if (test_encoder->crtc && (test_encoder->crtc == crtc)) {
-				struct radeon_encoder *test_radeon_encoder =
-					to_radeon_encoder(test_encoder);
-				struct radeon_encoder_atom_dig *dig =
-					test_radeon_encoder->enc_priv;
-
-				if ((test_radeon_encoder->encoder_id ==
-				     ENCODER_OBJECT_ID_INTERNAL_UNIPHY) &&
-				    (dig->linkb == false)) /* UNIPHY A uses PPLL2 */
-					return ATOM_PPLL2;
+		struct radeon_encoder_atom_dig *dig =
+			radeon_encoder->enc_priv;
+
+		if ((radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_UNIPHY) &&
+		    (dig->linkb == false))
+			/* UNIPHY A uses PPLL2 */
+			return ATOM_PPLL2;
+		else if (ENCODER_MODE_IS_DP(atombios_get_encoder_mode(radeon_crtc->encoder))) {
+			/* UNIPHY B/C/D/E/F */
+			if (rdev->clock.dp_extclk)
+				/* skip PPLL programming if using ext clock */
+				return ATOM_PPLL_INVALID;
+			else {
+				/* use the same PPLL for all DP monitors */
+				pll = radeon_get_shared_dp_ppll(crtc);
+				if (pll != ATOM_PPLL_INVALID)
+					return pll;
 			}
+		} else {
+			/* use the same PPLL for all monitors with the same clock */
+			pll = radeon_get_shared_nondp_ppll(crtc);
+			if (pll != ATOM_PPLL_INVALID)
+				return pll;
 		}
 		/* UNIPHY B/C/D/E/F */
-		list_for_each_entry(test_crtc, &dev->mode_config.crtc_list, head) {
-			struct radeon_crtc *radeon_test_crtc;
-
-			if (crtc == test_crtc)
-				continue;
-
-			radeon_test_crtc = to_radeon_crtc(test_crtc);
-			if ((radeon_test_crtc->pll_id == ATOM_PPLL0) ||
-			    (radeon_test_crtc->pll_id == ATOM_PPLL1))
-				pll_in_use |= (1 << radeon_test_crtc->pll_id);
-		}
-		if (!(pll_in_use & 4))
+		pll_in_use = radeon_get_pll_use_mask(crtc);
+		if (!(pll_in_use & (1 << ATOM_PPLL0)))
 			return ATOM_PPLL0;
-		return ATOM_PPLL1;
+		if (!(pll_in_use & (1 << ATOM_PPLL1)))
+			return ATOM_PPLL1;
+		DRM_ERROR("unable to allocate a PPLL\n");
+		return ATOM_PPLL_INVALID;
 	} else if (ASIC_IS_DCE4(rdev)) {
-		list_for_each_entry(test_encoder, &dev->mode_config.encoder_list, head) {
-			if (test_encoder->crtc && (test_encoder->crtc == crtc)) {
-				/* in DP mode, the DP ref clock can come from PPLL, DCPLL, or ext clock,
-				 * depending on the asic:
-				 * DCE4: PPLL or ext clock
-				 * DCE5: DCPLL or ext clock
-				 *
-				 * Setting ATOM_PPLL_INVALID will cause SetPixelClock to skip
-				 * PPLL/DCPLL programming and only program the DP DTO for the
-				 * crtc virtual pixel clock.
-				 */
-				if (ENCODER_MODE_IS_DP(atombios_get_encoder_mode(test_encoder))) {
-					if (rdev->clock.dp_extclk)
-						return ATOM_PPLL_INVALID;
-					else if (ASIC_IS_DCE6(rdev))
-						return ATOM_PPLL0;
-					else if (ASIC_IS_DCE5(rdev))
-						return ATOM_DCPLL;
-				}
+		/* in DP mode, the DP ref clock can come from PPLL, DCPLL, or ext clock,
+		 * depending on the asic:
+		 * DCE4: PPLL or ext clock
+		 * DCE5: PPLL, DCPLL, or ext clock
+		 * DCE6: PPLL, PPLL0, or ext clock
+		 *
+		 * Setting ATOM_PPLL_INVALID will cause SetPixelClock to skip
+		 * PPLL/DCPLL programming and only program the DP DTO for the
+		 * crtc virtual pixel clock.
+		 */
+		if (ENCODER_MODE_IS_DP(atombios_get_encoder_mode(radeon_crtc->encoder))) {
+			if (rdev->clock.dp_extclk)
+				/* skip PPLL programming if using ext clock */
+				return ATOM_PPLL_INVALID;
+			else if (ASIC_IS_DCE6(rdev))
+				/* use PPLL0 for all DP */
+				return ATOM_PPLL0;
+			else if (ASIC_IS_DCE5(rdev))
+				/* use DCPLL for all DP */
+				return ATOM_DCPLL;
+			else {
+				/* use the same PPLL for all DP monitors */
+				pll = radeon_get_shared_dp_ppll(crtc);
+				if (pll != ATOM_PPLL_INVALID)
+					return pll;
 			}
+		} else {
+			/* use the same PPLL for all monitors with the same clock */
+			pll = radeon_get_shared_nondp_ppll(crtc);
+			if (pll != ATOM_PPLL_INVALID)
+				return pll;
 		}
-
-		/* otherwise, pick one of the plls */
-		list_for_each_entry(test_crtc, &dev->mode_config.crtc_list, head) {
-			struct radeon_crtc *radeon_test_crtc;
-
-			if (crtc == test_crtc)
-				continue;
-
-			radeon_test_crtc = to_radeon_crtc(test_crtc);
-			if ((radeon_test_crtc->pll_id >= ATOM_PPLL1) &&
-			    (radeon_test_crtc->pll_id <= ATOM_PPLL2))
-				pll_in_use |= (1 << radeon_test_crtc->pll_id);
-		}
-		if (!(pll_in_use & 1))
+		/* all other cases */
+		pll_in_use = radeon_get_pll_use_mask(crtc);
+		if (!(pll_in_use & (1 << ATOM_PPLL2)))
+			return ATOM_PPLL2;
+		if (!(pll_in_use & (1 << ATOM_PPLL1)))
 			return ATOM_PPLL1;
-		return ATOM_PPLL2;
-	} else
-		return radeon_crtc->crtc_id;
-
+		DRM_ERROR("unable to allocate a PPLL\n");
+		return ATOM_PPLL_INVALID;
+	} else {
+		if (ASIC_IS_AVIVO(rdev)) {
+			/* in DP mode, the DP ref clock can come from either PPLL
+			 * depending on the asic:
+			 * DCE3: PPLL1 or PPLL2
+			 */
+			if (ENCODER_MODE_IS_DP(atombios_get_encoder_mode(radeon_crtc->encoder))) {
+				/* use the same PPLL for all DP monitors */
+				pll = radeon_get_shared_dp_ppll(crtc);
+				if (pll != ATOM_PPLL_INVALID)
+					return pll;
+			} else {
+				/* use the same PPLL for all monitors with the same clock */
+				pll = radeon_get_shared_nondp_ppll(crtc);
+				if (pll != ATOM_PPLL_INVALID)
+					return pll;
+			}
+			/* all other cases */
+			pll_in_use = radeon_get_pll_use_mask(crtc);
+			if (!(pll_in_use & (1 << ATOM_PPLL2)))
+				return ATOM_PPLL2;
+			if (!(pll_in_use & (1 << ATOM_PPLL1)))
+				return ATOM_PPLL1;
+			DRM_ERROR("unable to allocate a PPLL\n");
+			return ATOM_PPLL_INVALID;
+		} else {
+			/* on pre-R5xx asics, the crtc to pll mapping is hardcoded */
+			return radeon_crtc->crtc_id;
+		}
+	}
 }
 
 void radeon_atom_disp_eng_pll_init(struct radeon_device *rdev)
@@ -1588,18 +1756,13 @@ int atombios_crtc_mode_set(struct drm_crtc *crtc,
 	struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
 	struct drm_device *dev = crtc->dev;
 	struct radeon_device *rdev = dev->dev_private;
-	struct drm_encoder *encoder;
+	struct radeon_encoder *radeon_encoder =
+		to_radeon_encoder(radeon_crtc->encoder);
 	bool is_tvcv = false;
 
-	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
-		/* find tv std */
-		if (encoder->crtc == crtc) {
-			struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
-			if (radeon_encoder->active_device &
-			    (ATOM_DEVICE_TV_SUPPORT | ATOM_DEVICE_CV_SUPPORT))
-				is_tvcv = true;
-		}
-	}
+	if (radeon_encoder->active_device &
+	    (ATOM_DEVICE_TV_SUPPORT | ATOM_DEVICE_CV_SUPPORT))
+		is_tvcv = true;
 
 	atombios_crtc_set_pll(crtc, adjusted_mode);
 
@@ -1626,8 +1789,34 @@ static bool atombios_crtc_mode_fixup(struct drm_crtc *crtc,
 				     const struct drm_display_mode *mode,
 				     struct drm_display_mode *adjusted_mode)
 {
+	struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+	struct drm_device *dev = crtc->dev;
+	struct drm_encoder *encoder;
+
+	/* assign the encoder to the radeon crtc to avoid repeated lookups later */
+	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+		if (encoder->crtc == crtc) {
+			radeon_crtc->encoder = encoder;
+			radeon_crtc->connector = radeon_get_connector_for_encoder(encoder);
+			break;
+		}
+	}
+	if ((radeon_crtc->encoder == NULL) || (radeon_crtc->connector == NULL)) {
+		radeon_crtc->encoder = NULL;
+		radeon_crtc->connector = NULL;
+		return false;
+	}
 	if (!radeon_crtc_scaling_mode_fixup(crtc, mode, adjusted_mode))
 		return false;
+	if (!atombios_crtc_prepare_pll(crtc, adjusted_mode))
+		return false;
+	/* pick pll */
+	radeon_crtc->pll_id = radeon_atom_pick_pll(crtc);
+	/* if we can't get a PPLL for a non-DP encoder, fail */
+	if ((radeon_crtc->pll_id == ATOM_PPLL_INVALID) &&
+	    !ENCODER_MODE_IS_DP(atombios_get_encoder_mode(radeon_crtc->encoder)))
+		return false;
+
 	return true;
 }
 
@@ -1638,8 +1827,6 @@ static void atombios_crtc_prepare(struct drm_crtc *crtc)
 	struct radeon_device *rdev = dev->dev_private;
 
 	radeon_crtc->in_mode_set = true;
-	/* pick pll */
-	radeon_crtc->pll_id = radeon_atom_pick_pll(crtc);
 
 	/* disable crtc pair power gating before programming */
 	if (ASIC_IS_DCE6(rdev))
@@ -1697,7 +1884,10 @@ static void atombios_crtc_disable(struct drm_crtc *crtc)
 		break;
 	}
 done:
-	radeon_crtc->pll_id = -1;
+	radeon_crtc->pll_id = ATOM_PPLL_INVALID;
+	radeon_crtc->adjusted_clock = 0;
+	radeon_crtc->encoder = NULL;
+	radeon_crtc->connector = NULL;
 }
 
 static const struct drm_crtc_helper_funcs atombios_helper_funcs = {
@@ -1746,6 +1936,9 @@ void radeon_atombios_init_crtc(struct drm_device *dev,
 		else
 			radeon_crtc->crtc_offset = 0;
 	}
-	radeon_crtc->pll_id = -1;
+	radeon_crtc->pll_id = ATOM_PPLL_INVALID;
+	radeon_crtc->adjusted_clock = 0;
+	radeon_crtc->encoder = NULL;
+	radeon_crtc->connector = NULL;
 	drm_crtc_helper_add(&radeon_crtc->base, &atombios_helper_funcs);
 }
diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c
index ea8e2d471c35..d5699fe4f1e8 100644
--- a/drivers/gpu/drm/radeon/atombios_dp.c
+++ b/drivers/gpu/drm/radeon/atombios_dp.c
@@ -653,9 +653,7 @@ static bool radeon_dp_get_link_status(struct radeon_connector *radeon_connector,
 		return false;
 	}
 
-	DRM_DEBUG_KMS("link status %02x %02x %02x %02x %02x %02x\n",
-		  link_status[0], link_status[1], link_status[2],
-		  link_status[3], link_status[4], link_status[5]);
+	DRM_DEBUG_KMS("link status %*ph\n", 6, link_status);
 	return true;
 }
 
diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c
index 8e2ee98e69d2..49cbb3795a10 100644
--- a/drivers/gpu/drm/radeon/atombios_encoders.c
+++ b/drivers/gpu/drm/radeon/atombios_encoders.c
@@ -28,9 +28,251 @@
 #include <drm/radeon_drm.h>
 #include "radeon.h"
 #include "atom.h"
+#include <linux/backlight.h>
 
 extern int atom_debug;
 
+static u8
+radeon_atom_get_backlight_level_from_reg(struct radeon_device *rdev)
+{
+	u8 backlight_level;
+	u32 bios_2_scratch;
+
+	if (rdev->family >= CHIP_R600)
+		bios_2_scratch = RREG32(R600_BIOS_2_SCRATCH);
+	else
+		bios_2_scratch = RREG32(RADEON_BIOS_2_SCRATCH);
+
+	backlight_level = ((bios_2_scratch & ATOM_S2_CURRENT_BL_LEVEL_MASK) >>
+			   ATOM_S2_CURRENT_BL_LEVEL_SHIFT);
+
+	return backlight_level;
+}
+
+static void
+radeon_atom_set_backlight_level_to_reg(struct radeon_device *rdev,
+				       u8 backlight_level)
+{
+	u32 bios_2_scratch;
+
+	if (rdev->family >= CHIP_R600)
+		bios_2_scratch = RREG32(R600_BIOS_2_SCRATCH);
+	else
+		bios_2_scratch = RREG32(RADEON_BIOS_2_SCRATCH);
+
+	bios_2_scratch &= ~ATOM_S2_CURRENT_BL_LEVEL_MASK;
+	bios_2_scratch |= ((backlight_level << ATOM_S2_CURRENT_BL_LEVEL_SHIFT) &
+			   ATOM_S2_CURRENT_BL_LEVEL_MASK);
+
+	if (rdev->family >= CHIP_R600)
+		WREG32(R600_BIOS_2_SCRATCH, bios_2_scratch);
+	else
+		WREG32(RADEON_BIOS_2_SCRATCH, bios_2_scratch);
+}
+
+u8
+atombios_get_backlight_level(struct radeon_encoder *radeon_encoder)
+{
+	struct drm_device *dev = radeon_encoder->base.dev;
+	struct radeon_device *rdev = dev->dev_private;
+
+	if (!(rdev->mode_info.firmware_flags & ATOM_BIOS_INFO_BL_CONTROLLED_BY_GPU))
+		return 0;
+
+	return radeon_atom_get_backlight_level_from_reg(rdev);
+}
+
+void
+atombios_set_backlight_level(struct radeon_encoder *radeon_encoder, u8 level)
+{
+	struct drm_encoder *encoder = &radeon_encoder->base;
+	struct drm_device *dev = radeon_encoder->base.dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_encoder_atom_dig *dig;
+	DISPLAY_DEVICE_OUTPUT_CONTROL_PS_ALLOCATION args;
+	int index;
+
+	if (!(rdev->mode_info.firmware_flags & ATOM_BIOS_INFO_BL_CONTROLLED_BY_GPU))
+		return;
+
+	if ((radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) &&
+	    radeon_encoder->enc_priv) {
+		dig = radeon_encoder->enc_priv;
+		dig->backlight_level = level;
+		radeon_atom_set_backlight_level_to_reg(rdev, dig->backlight_level);
+
+		switch (radeon_encoder->encoder_id) {
+		case ENCODER_OBJECT_ID_INTERNAL_LVDS:
+		case ENCODER_OBJECT_ID_INTERNAL_LVTM1:
+			index = GetIndexIntoMasterTable(COMMAND, LCD1OutputControl);
+			if (dig->backlight_level == 0) {
+				args.ucAction = ATOM_LCD_BLOFF;
+				atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+			} else {
+				args.ucAction = ATOM_LCD_BL_BRIGHTNESS_CONTROL;
+				atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+				args.ucAction = ATOM_LCD_BLON;
+				atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+			}
+			break;
+		case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
+		case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
+		case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
+		case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
+			if (dig->backlight_level == 0)
+				atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_LCD_BLOFF, 0, 0);
+			else {
+				atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_BL_BRIGHTNESS_CONTROL, 0, 0);
+				atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_LCD_BLON, 0, 0);
+			}
+			break;
+		default:
+			break;
+		}
+	}
+}
+
+#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) || defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
+
+static u8 radeon_atom_bl_level(struct backlight_device *bd)
+{
+	u8 level;
+
+	/* Convert brightness to hardware level */
+	if (bd->props.brightness < 0)
+		level = 0;
+	else if (bd->props.brightness > RADEON_MAX_BL_LEVEL)
+		level = RADEON_MAX_BL_LEVEL;
+	else
+		level = bd->props.brightness;
+
+	return level;
+}
+
+static int radeon_atom_backlight_update_status(struct backlight_device *bd)
+{
+	struct radeon_backlight_privdata *pdata = bl_get_data(bd);
+	struct radeon_encoder *radeon_encoder = pdata->encoder;
+
+	atombios_set_backlight_level(radeon_encoder, radeon_atom_bl_level(bd));
+
+	return 0;
+}
+
+static int radeon_atom_backlight_get_brightness(struct backlight_device *bd)
+{
+	struct radeon_backlight_privdata *pdata = bl_get_data(bd);
+	struct radeon_encoder *radeon_encoder = pdata->encoder;
+	struct drm_device *dev = radeon_encoder->base.dev;
+	struct radeon_device *rdev = dev->dev_private;
+
+	return radeon_atom_get_backlight_level_from_reg(rdev);
+}
+
+static const struct backlight_ops radeon_atom_backlight_ops = {
+	.get_brightness = radeon_atom_backlight_get_brightness,
+	.update_status	= radeon_atom_backlight_update_status,
+};
+
+void radeon_atom_backlight_init(struct radeon_encoder *radeon_encoder,
+				struct drm_connector *drm_connector)
+{
+	struct drm_device *dev = radeon_encoder->base.dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct backlight_device *bd;
+	struct backlight_properties props;
+	struct radeon_backlight_privdata *pdata;
+	struct radeon_encoder_atom_dig *dig;
+	u8 backlight_level;
+
+	if (!radeon_encoder->enc_priv)
+		return;
+
+	if (!rdev->is_atom_bios)
+		return;
+
+	if (!(rdev->mode_info.firmware_flags & ATOM_BIOS_INFO_BL_CONTROLLED_BY_GPU))
+		return;
+
+	pdata = kmalloc(sizeof(struct radeon_backlight_privdata), GFP_KERNEL);
+	if (!pdata) {
+		DRM_ERROR("Memory allocation failed\n");
+		goto error;
+	}
+
+	memset(&props, 0, sizeof(props));
+	props.max_brightness = RADEON_MAX_BL_LEVEL;
+	props.type = BACKLIGHT_RAW;
+	bd = backlight_device_register("radeon_bl", &drm_connector->kdev,
+				       pdata, &radeon_atom_backlight_ops, &props);
+	if (IS_ERR(bd)) {
+		DRM_ERROR("Backlight registration failed\n");
+		goto error;
+	}
+
+	pdata->encoder = radeon_encoder;
+
+	backlight_level = radeon_atom_get_backlight_level_from_reg(rdev);
+
+	dig = radeon_encoder->enc_priv;
+	dig->bl_dev = bd;
+
+	bd->props.brightness = radeon_atom_backlight_get_brightness(bd);
+	bd->props.power = FB_BLANK_UNBLANK;
+	backlight_update_status(bd);
+
+	DRM_INFO("radeon atom DIG backlight initialized\n");
+
+	return;
+
+error:
+	kfree(pdata);
+	return;
+}
+
+static void radeon_atom_backlight_exit(struct radeon_encoder *radeon_encoder)
+{
+	struct drm_device *dev = radeon_encoder->base.dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct backlight_device *bd = NULL;
+	struct radeon_encoder_atom_dig *dig;
+
+	if (!radeon_encoder->enc_priv)
+		return;
+
+	if (!rdev->is_atom_bios)
+		return;
+
+	if (!(rdev->mode_info.firmware_flags & ATOM_BIOS_INFO_BL_CONTROLLED_BY_GPU))
+		return;
+
+	dig = radeon_encoder->enc_priv;
+	bd = dig->bl_dev;
+	dig->bl_dev = NULL;
+
+	if (bd) {
+		struct radeon_legacy_backlight_privdata *pdata;
+
+		pdata = bl_get_data(bd);
+		backlight_device_unregister(bd);
+		kfree(pdata);
+
+		DRM_INFO("radeon atom LVDS backlight unloaded\n");
+	}
+}
+
+#else /* !CONFIG_BACKLIGHT_CLASS_DEVICE */
+
+void radeon_atom_backlight_init(struct radeon_encoder *encoder)
+{
+}
+
+static void radeon_atom_backlight_exit(struct radeon_encoder *encoder)
+{
+}
+
+#endif
+
 /* evil but including atombios.h is much worse */
 bool radeon_atom_get_tv_timings(struct radeon_device *rdev, int index,
 				struct drm_display_mode *mode);
@@ -209,6 +451,32 @@ atombios_tv_setup(struct drm_encoder *encoder, int action)
 
 }
 
+static u8 radeon_atom_get_bpc(struct drm_encoder *encoder)
+{
+	struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
+	int bpc = 8;
+
+	if (connector)
+		bpc = radeon_get_monitor_bpc(connector);
+
+	switch (bpc) {
+	case 0:
+		return PANEL_BPC_UNDEFINE;
+	case 6:
+		return PANEL_6BIT_PER_COLOR;
+	case 8:
+	default:
+		return PANEL_8BIT_PER_COLOR;
+	case 10:
+		return PANEL_10BIT_PER_COLOR;
+	case 12:
+		return PANEL_12BIT_PER_COLOR;
+	case 16:
+		return PANEL_16BIT_PER_COLOR;
+	}
+}
+
+
 union dvo_encoder_control {
 	ENABLE_EXTERNAL_TMDS_ENCODER_PS_ALLOCATION ext_tmds;
 	DVO_ENCODER_CONTROL_PS_ALLOCATION dvo;
@@ -406,7 +674,8 @@ atombios_get_encoder_mode(struct drm_encoder *encoder)
 		return ATOM_ENCODER_MODE_DP;
 
 	/* DVO is always DVO */
-	if (radeon_encoder->encoder_id == ATOM_ENCODER_MODE_DVO)
+	if ((radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_DVO1) ||
+	    (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1))
 		return ATOM_ENCODER_MODE_DVO;
 
 	connector = radeon_get_connector_for_encoder(encoder);
@@ -535,7 +804,6 @@ atombios_dig_encoder_setup(struct drm_encoder *encoder, int action, int panel_mo
 	int dp_clock = 0;
 	int dp_lane_count = 0;
 	int hpd_id = RADEON_HPD_NONE;
-	int bpc = 8;
 
 	if (connector) {
 		struct radeon_connector *radeon_connector = to_radeon_connector(connector);
@@ -545,7 +813,6 @@ atombios_dig_encoder_setup(struct drm_encoder *encoder, int action, int panel_mo
 		dp_clock = dig_connector->dp_clock;
 		dp_lane_count = dig_connector->dp_lane_count;
 		hpd_id = radeon_connector->hpd.hpd;
-		bpc = radeon_get_monitor_bpc(connector);
 	}
 
 	/* no dig encoder assigned */
@@ -612,37 +879,17 @@ atombios_dig_encoder_setup(struct drm_encoder *encoder, int action, int panel_mo
 			else
 				args.v3.ucEncoderMode = atombios_get_encoder_mode(encoder);
 
-			if (ENCODER_MODE_IS_DP(args.v1.ucEncoderMode))
+			if (ENCODER_MODE_IS_DP(args.v3.ucEncoderMode))
 				args.v3.ucLaneNum = dp_lane_count;
 			else if (radeon_dig_monitor_is_duallink(encoder, radeon_encoder->pixel_clock))
 				args.v3.ucLaneNum = 8;
 			else
 				args.v3.ucLaneNum = 4;
 
-			if (ENCODER_MODE_IS_DP(args.v1.ucEncoderMode) && (dp_clock == 270000))
+			if (ENCODER_MODE_IS_DP(args.v3.ucEncoderMode) && (dp_clock == 270000))
 				args.v1.ucConfig |= ATOM_ENCODER_CONFIG_V3_DPLINKRATE_2_70GHZ;
 			args.v3.acConfig.ucDigSel = dig->dig_encoder;
-			switch (bpc) {
-			case 0:
-				args.v3.ucBitPerColor = PANEL_BPC_UNDEFINE;
-				break;
-			case 6:
-				args.v3.ucBitPerColor = PANEL_6BIT_PER_COLOR;
-				break;
-			case 8:
-			default:
-				args.v3.ucBitPerColor = PANEL_8BIT_PER_COLOR;
-				break;
-			case 10:
-				args.v3.ucBitPerColor = PANEL_10BIT_PER_COLOR;
-				break;
-			case 12:
-				args.v3.ucBitPerColor = PANEL_12BIT_PER_COLOR;
-				break;
-			case 16:
-				args.v3.ucBitPerColor = PANEL_16BIT_PER_COLOR;
-				break;
-			}
+			args.v3.ucBitPerColor = radeon_atom_get_bpc(encoder);
 			break;
 		case 4:
 			args.v4.ucAction = action;
@@ -652,41 +899,21 @@ atombios_dig_encoder_setup(struct drm_encoder *encoder, int action, int panel_mo
 			else
 				args.v4.ucEncoderMode = atombios_get_encoder_mode(encoder);
 
-			if (ENCODER_MODE_IS_DP(args.v1.ucEncoderMode))
+			if (ENCODER_MODE_IS_DP(args.v4.ucEncoderMode))
 				args.v4.ucLaneNum = dp_lane_count;
 			else if (radeon_dig_monitor_is_duallink(encoder, radeon_encoder->pixel_clock))
 				args.v4.ucLaneNum = 8;
 			else
 				args.v4.ucLaneNum = 4;
 
-			if (ENCODER_MODE_IS_DP(args.v1.ucEncoderMode)) {
+			if (ENCODER_MODE_IS_DP(args.v4.ucEncoderMode)) {
 				if (dp_clock == 270000)
 					args.v1.ucConfig |= ATOM_ENCODER_CONFIG_V4_DPLINKRATE_2_70GHZ;
 				else if (dp_clock == 540000)
 					args.v1.ucConfig |= ATOM_ENCODER_CONFIG_V4_DPLINKRATE_5_40GHZ;
 			}
 			args.v4.acConfig.ucDigSel = dig->dig_encoder;
-			switch (bpc) {
-			case 0:
-				args.v4.ucBitPerColor = PANEL_BPC_UNDEFINE;
-				break;
-			case 6:
-				args.v4.ucBitPerColor = PANEL_6BIT_PER_COLOR;
-				break;
-			case 8:
-			default:
-				args.v4.ucBitPerColor = PANEL_8BIT_PER_COLOR;
-				break;
-			case 10:
-				args.v4.ucBitPerColor = PANEL_10BIT_PER_COLOR;
-				break;
-			case 12:
-				args.v4.ucBitPerColor = PANEL_12BIT_PER_COLOR;
-				break;
-			case 16:
-				args.v4.ucBitPerColor = PANEL_16BIT_PER_COLOR;
-				break;
-			}
+			args.v4.ucBitPerColor = radeon_atom_get_bpc(encoder);
 			if (hpd_id == RADEON_HPD_NONE)
 				args.v4.ucHPD_ID = 0;
 			else
@@ -799,8 +1026,7 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t
 				args.v1.asMode.ucLaneSet = lane_set;
 			} else {
 				if (is_dp)
-					args.v1.usPixelClock =
-						cpu_to_le16(dp_clock / 10);
+					args.v1.usPixelClock = cpu_to_le16(dp_clock / 10);
 				else if (radeon_dig_monitor_is_duallink(encoder, radeon_encoder->pixel_clock))
 					args.v1.usPixelClock = cpu_to_le16((radeon_encoder->pixel_clock / 2) / 10);
 				else
@@ -857,8 +1083,7 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t
 				args.v2.asMode.ucLaneSet = lane_set;
 			} else {
 				if (is_dp)
-					args.v2.usPixelClock =
-						cpu_to_le16(dp_clock / 10);
+					args.v2.usPixelClock = cpu_to_le16(dp_clock / 10);
 				else if (radeon_dig_monitor_is_duallink(encoder, radeon_encoder->pixel_clock))
 					args.v2.usPixelClock = cpu_to_le16((radeon_encoder->pixel_clock / 2) / 10);
 				else
@@ -900,8 +1125,7 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t
 				args.v3.asMode.ucLaneSet = lane_set;
 			} else {
 				if (is_dp)
-					args.v3.usPixelClock =
-						cpu_to_le16(dp_clock / 10);
+					args.v3.usPixelClock = cpu_to_le16(dp_clock / 10);
 				else if (radeon_dig_monitor_is_duallink(encoder, radeon_encoder->pixel_clock))
 					args.v3.usPixelClock = cpu_to_le16((radeon_encoder->pixel_clock / 2) / 10);
 				else
@@ -960,8 +1184,7 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t
 				args.v4.asMode.ucLaneSet = lane_set;
 			} else {
 				if (is_dp)
-					args.v4.usPixelClock =
-						cpu_to_le16(dp_clock / 10);
+					args.v4.usPixelClock = cpu_to_le16(dp_clock / 10);
 				else if (radeon_dig_monitor_is_duallink(encoder, radeon_encoder->pixel_clock))
 					args.v4.usPixelClock = cpu_to_le16((radeon_encoder->pixel_clock / 2) / 10);
 				else
@@ -1147,7 +1370,6 @@ atombios_external_encoder_setup(struct drm_encoder *encoder,
 	int dp_lane_count = 0;
 	int connector_object_id = 0;
 	u32 ext_enum = (ext_radeon_encoder->encoder_enum & ENUM_ID_MASK) >> ENUM_ID_SHIFT;
-	int bpc = 8;
 
 	if (action == EXTERNAL_ENCODER_ACTION_V3_ENCODER_INIT)
 		connector = radeon_get_connector_for_encoder_init(encoder);
@@ -1163,7 +1385,6 @@ atombios_external_encoder_setup(struct drm_encoder *encoder,
 		dp_lane_count = dig_connector->dp_lane_count;
 		connector_object_id =
 			(radeon_connector->connector_object_id & OBJECT_ID_MASK) >> OBJECT_ID_SHIFT;
-		bpc = radeon_get_monitor_bpc(connector);
 	}
 
 	memset(&args, 0, sizeof(args));
@@ -1221,27 +1442,7 @@ atombios_external_encoder_setup(struct drm_encoder *encoder,
 				args.v3.sExtEncoder.ucConfig |= EXTERNAL_ENCODER_CONFIG_V3_ENCODER3;
 				break;
 			}
-			switch (bpc) {
-			case 0:
-				args.v3.sExtEncoder.ucBitPerColor = PANEL_BPC_UNDEFINE;
-				break;
-			case 6:
-				args.v3.sExtEncoder.ucBitPerColor = PANEL_6BIT_PER_COLOR;
-				break;
-			case 8:
-			default:
-				args.v3.sExtEncoder.ucBitPerColor = PANEL_8BIT_PER_COLOR;
-				break;
-			case 10:
-				args.v3.sExtEncoder.ucBitPerColor = PANEL_10BIT_PER_COLOR;
-				break;
-			case 12:
-				args.v3.sExtEncoder.ucBitPerColor = PANEL_12BIT_PER_COLOR;
-				break;
-			case 16:
-				args.v3.sExtEncoder.ucBitPerColor = PANEL_16BIT_PER_COLOR;
-				break;
-			}
+			args.v3.sExtEncoder.ucBitPerColor = radeon_atom_get_bpc(encoder);
 			break;
 		default:
 			DRM_ERROR("Unknown table version: %d, %d\n", frev, crev);
@@ -2286,6 +2487,8 @@ static const struct drm_encoder_helper_funcs radeon_atom_dac_helper_funcs = {
 void radeon_enc_destroy(struct drm_encoder *encoder)
 {
 	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+	if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
+		radeon_atom_backlight_exit(radeon_encoder);
 	kfree(radeon_encoder->enc_priv);
 	drm_encoder_cleanup(encoder);
 	kfree(radeon_encoder);
@@ -2295,7 +2498,7 @@ static const struct drm_encoder_funcs radeon_atom_enc_funcs = {
 	.destroy = radeon_enc_destroy,
 };
 
-struct radeon_encoder_atom_dac *
+static struct radeon_encoder_atom_dac *
 radeon_atombios_set_dac_info(struct radeon_encoder *radeon_encoder)
 {
 	struct drm_device *dev = radeon_encoder->base.dev;
@@ -2309,7 +2512,7 @@ radeon_atombios_set_dac_info(struct radeon_encoder *radeon_encoder)
 	return dac;
 }
 
-struct radeon_encoder_atom_dig *
+static struct radeon_encoder_atom_dig *
 radeon_atombios_set_dig_info(struct radeon_encoder *radeon_encoder)
 {
 	int encoder_enum = (radeon_encoder->encoder_enum & ENUM_ID_MASK) >> ENUM_ID_SHIFT;
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
index c548dd75ca8b..a1f49c5fd74b 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -37,6 +37,16 @@
 #define EVERGREEN_PFP_UCODE_SIZE 1120
 #define EVERGREEN_PM4_UCODE_SIZE 1376
 
+static const u32 crtc_offsets[6] =
+{
+	EVERGREEN_CRTC0_REGISTER_OFFSET,
+	EVERGREEN_CRTC1_REGISTER_OFFSET,
+	EVERGREEN_CRTC2_REGISTER_OFFSET,
+	EVERGREEN_CRTC3_REGISTER_OFFSET,
+	EVERGREEN_CRTC4_REGISTER_OFFSET,
+	EVERGREEN_CRTC5_REGISTER_OFFSET
+};
+
 static void evergreen_gpu_init(struct radeon_device *rdev);
 void evergreen_fini(struct radeon_device *rdev);
 void evergreen_pcie_gen2_enable(struct radeon_device *rdev);
@@ -105,17 +115,19 @@ void evergreen_fix_pci_max_read_req_size(struct radeon_device *rdev)
  */
 void dce4_wait_for_vblank(struct radeon_device *rdev, int crtc)
 {
-	struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc];
 	int i;
 
-	if (RREG32(EVERGREEN_CRTC_CONTROL + radeon_crtc->crtc_offset) & EVERGREEN_CRTC_MASTER_EN) {
+	if (crtc >= rdev->num_crtc)
+		return;
+
+	if (RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[crtc]) & EVERGREEN_CRTC_MASTER_EN) {
 		for (i = 0; i < rdev->usec_timeout; i++) {
-			if (!(RREG32(EVERGREEN_CRTC_STATUS + radeon_crtc->crtc_offset) & EVERGREEN_CRTC_V_BLANK))
+			if (!(RREG32(EVERGREEN_CRTC_STATUS + crtc_offsets[crtc]) & EVERGREEN_CRTC_V_BLANK))
 				break;
 			udelay(1);
 		}
 		for (i = 0; i < rdev->usec_timeout; i++) {
-			if (RREG32(EVERGREEN_CRTC_STATUS + radeon_crtc->crtc_offset) & EVERGREEN_CRTC_V_BLANK)
+			if (RREG32(EVERGREEN_CRTC_STATUS + crtc_offsets[crtc]) & EVERGREEN_CRTC_V_BLANK)
 				break;
 			udelay(1);
 		}
@@ -310,6 +322,64 @@ void sumo_pm_init_profile(struct radeon_device *rdev)
 }
 
 /**
+ * btc_pm_init_profile - Initialize power profiles callback.
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Initialize the power states used in profile mode
+ * (BTC, cayman).
+ * Used for profile mode only.
+ */
+void btc_pm_init_profile(struct radeon_device *rdev)
+{
+	int idx;
+
+	/* default */
+	rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
+	rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
+	rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
+	rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 2;
+	/* starting with BTC, there is one state that is used for both
+	 * MH and SH.  Difference is that we always use the high clock index for
+	 * mclk.
+	 */
+	if (rdev->flags & RADEON_IS_MOBILITY)
+		idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 0);
+	else
+		idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0);
+	/* low sh */
+	rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = idx;
+	rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = idx;
+	rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
+	rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
+	/* mid sh */
+	rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = idx;
+	rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = idx;
+	rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
+	rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 1;
+	/* high sh */
+	rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = idx;
+	rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = idx;
+	rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
+	rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 2;
+	/* low mh */
+	rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = idx;
+	rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = idx;
+	rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
+	rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
+	/* mid mh */
+	rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = idx;
+	rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = idx;
+	rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
+	rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 1;
+	/* high mh */
+	rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = idx;
+	rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = idx;
+	rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
+	rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 2;
+}
+
+/**
  * evergreen_pm_misc - set additional pm hw parameters callback.
  *
  * @rdev: radeon_device pointer
@@ -1105,7 +1175,7 @@ void evergreen_pcie_gart_tlb_flush(struct radeon_device *rdev)
 	}
 }
 
-int evergreen_pcie_gart_enable(struct radeon_device *rdev)
+static int evergreen_pcie_gart_enable(struct radeon_device *rdev)
 {
 	u32 tmp;
 	int r;
@@ -1164,7 +1234,7 @@ int evergreen_pcie_gart_enable(struct radeon_device *rdev)
 	return 0;
 }
 
-void evergreen_pcie_gart_disable(struct radeon_device *rdev)
+static void evergreen_pcie_gart_disable(struct radeon_device *rdev)
 {
 	u32 tmp;
 
@@ -1189,7 +1259,7 @@ void evergreen_pcie_gart_disable(struct radeon_device *rdev)
 	radeon_gart_table_vram_unpin(rdev);
 }
 
-void evergreen_pcie_gart_fini(struct radeon_device *rdev)
+static void evergreen_pcie_gart_fini(struct radeon_device *rdev)
 {
 	evergreen_pcie_gart_disable(rdev);
 	radeon_gart_table_vram_free(rdev);
@@ -1197,7 +1267,7 @@ void evergreen_pcie_gart_fini(struct radeon_device *rdev)
 }
 
 
-void evergreen_agp_enable(struct radeon_device *rdev)
+static void evergreen_agp_enable(struct radeon_device *rdev)
 {
 	u32 tmp;
 
@@ -1225,116 +1295,103 @@ void evergreen_agp_enable(struct radeon_device *rdev)
 
 void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *save)
 {
+	u32 crtc_enabled, tmp, frame_count, blackout;
+	int i, j;
+
 	save->vga_render_control = RREG32(VGA_RENDER_CONTROL);
 	save->vga_hdp_control = RREG32(VGA_HDP_CONTROL);
 
-	/* Stop all video */
+	/* disable VGA render */
 	WREG32(VGA_RENDER_CONTROL, 0);
-	WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC0_REGISTER_OFFSET, 1);
-	WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC1_REGISTER_OFFSET, 1);
-	if (rdev->num_crtc >= 4) {
-		WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC2_REGISTER_OFFSET, 1);
-		WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC3_REGISTER_OFFSET, 1);
-	}
-	if (rdev->num_crtc >= 6) {
-		WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC4_REGISTER_OFFSET, 1);
-		WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC5_REGISTER_OFFSET, 1);
-	}
-	WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, 0);
-	WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, 0);
-	if (rdev->num_crtc >= 4) {
-		WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, 0);
-		WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, 0);
-	}
-	if (rdev->num_crtc >= 6) {
-		WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, 0);
-		WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, 0);
-	}
-	WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC0_REGISTER_OFFSET, 0);
-	WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC1_REGISTER_OFFSET, 0);
-	if (rdev->num_crtc >= 4) {
-		WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC2_REGISTER_OFFSET, 0);
-		WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC3_REGISTER_OFFSET, 0);
-	}
-	if (rdev->num_crtc >= 6) {
-		WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC4_REGISTER_OFFSET, 0);
-		WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC5_REGISTER_OFFSET, 0);
+	/* blank the display controllers */
+	for (i = 0; i < rdev->num_crtc; i++) {
+		crtc_enabled = RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i]) & EVERGREEN_CRTC_MASTER_EN;
+		if (crtc_enabled) {
+			save->crtc_enabled[i] = true;
+			if (ASIC_IS_DCE6(rdev)) {
+				tmp = RREG32(EVERGREEN_CRTC_BLANK_CONTROL + crtc_offsets[i]);
+				if (!(tmp & EVERGREEN_CRTC_BLANK_DATA_EN)) {
+					radeon_wait_for_vblank(rdev, i);
+					tmp |= EVERGREEN_CRTC_BLANK_DATA_EN;
+					WREG32(EVERGREEN_CRTC_BLANK_CONTROL + crtc_offsets[i], tmp);
+				}
+			} else {
+				tmp = RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i]);
+				if (!(tmp & EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE)) {
+					radeon_wait_for_vblank(rdev, i);
+					tmp |= EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE;
+					WREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i], tmp);
+				}
+			}
+			/* wait for the next frame */
+			frame_count = radeon_get_vblank_counter(rdev, i);
+			for (j = 0; j < rdev->usec_timeout; j++) {
+				if (radeon_get_vblank_counter(rdev, i) != frame_count)
+					break;
+				udelay(1);
+			}
+		}
 	}
 
-	WREG32(D1VGA_CONTROL, 0);
-	WREG32(D2VGA_CONTROL, 0);
-	if (rdev->num_crtc >= 4) {
-		WREG32(EVERGREEN_D3VGA_CONTROL, 0);
-		WREG32(EVERGREEN_D4VGA_CONTROL, 0);
-	}
-	if (rdev->num_crtc >= 6) {
-		WREG32(EVERGREEN_D5VGA_CONTROL, 0);
-		WREG32(EVERGREEN_D6VGA_CONTROL, 0);
+	radeon_mc_wait_for_idle(rdev);
+
+	blackout = RREG32(MC_SHARED_BLACKOUT_CNTL);
+	if ((blackout & BLACKOUT_MODE_MASK) != 1) {
+		/* Block CPU access */
+		WREG32(BIF_FB_EN, 0);
+		/* blackout the MC */
+		blackout &= ~BLACKOUT_MODE_MASK;
+		WREG32(MC_SHARED_BLACKOUT_CNTL, blackout | 1);
 	}
 }
 
 void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_save *save)
 {
-	WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC0_REGISTER_OFFSET,
-	       upper_32_bits(rdev->mc.vram_start));
-	WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC0_REGISTER_OFFSET,
-	       upper_32_bits(rdev->mc.vram_start));
-	WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + EVERGREEN_CRTC0_REGISTER_OFFSET,
-	       (u32)rdev->mc.vram_start);
-	WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + EVERGREEN_CRTC0_REGISTER_OFFSET,
-	       (u32)rdev->mc.vram_start);
-
-	WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC1_REGISTER_OFFSET,
-	       upper_32_bits(rdev->mc.vram_start));
-	WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC1_REGISTER_OFFSET,
-	       upper_32_bits(rdev->mc.vram_start));
-	WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + EVERGREEN_CRTC1_REGISTER_OFFSET,
-	       (u32)rdev->mc.vram_start);
-	WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + EVERGREEN_CRTC1_REGISTER_OFFSET,
-	       (u32)rdev->mc.vram_start);
-
-	if (rdev->num_crtc >= 4) {
-		WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC2_REGISTER_OFFSET,
-		       upper_32_bits(rdev->mc.vram_start));
-		WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC2_REGISTER_OFFSET,
-		       upper_32_bits(rdev->mc.vram_start));
-		WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + EVERGREEN_CRTC2_REGISTER_OFFSET,
-		       (u32)rdev->mc.vram_start);
-		WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + EVERGREEN_CRTC2_REGISTER_OFFSET,
-		       (u32)rdev->mc.vram_start);
-
-		WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC3_REGISTER_OFFSET,
-		       upper_32_bits(rdev->mc.vram_start));
-		WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC3_REGISTER_OFFSET,
-		       upper_32_bits(rdev->mc.vram_start));
-		WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + EVERGREEN_CRTC3_REGISTER_OFFSET,
-		       (u32)rdev->mc.vram_start);
-		WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + EVERGREEN_CRTC3_REGISTER_OFFSET,
-		       (u32)rdev->mc.vram_start);
-	}
-	if (rdev->num_crtc >= 6) {
-		WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC4_REGISTER_OFFSET,
-		       upper_32_bits(rdev->mc.vram_start));
-		WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC4_REGISTER_OFFSET,
-		       upper_32_bits(rdev->mc.vram_start));
-		WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + EVERGREEN_CRTC4_REGISTER_OFFSET,
-		       (u32)rdev->mc.vram_start);
-		WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + EVERGREEN_CRTC4_REGISTER_OFFSET,
-		       (u32)rdev->mc.vram_start);
+	u32 tmp, frame_count;
+	int i, j;
 
-		WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC5_REGISTER_OFFSET,
+	/* update crtc base addresses */
+	for (i = 0; i < rdev->num_crtc; i++) {
+		WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + crtc_offsets[i],
 		       upper_32_bits(rdev->mc.vram_start));
-		WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC5_REGISTER_OFFSET,
+		WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + crtc_offsets[i],
 		       upper_32_bits(rdev->mc.vram_start));
-		WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + EVERGREEN_CRTC5_REGISTER_OFFSET,
+		WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + crtc_offsets[i],
 		       (u32)rdev->mc.vram_start);
-		WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + EVERGREEN_CRTC5_REGISTER_OFFSET,
+		WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + crtc_offsets[i],
 		       (u32)rdev->mc.vram_start);
 	}
-
 	WREG32(EVERGREEN_VGA_MEMORY_BASE_ADDRESS_HIGH, upper_32_bits(rdev->mc.vram_start));
 	WREG32(EVERGREEN_VGA_MEMORY_BASE_ADDRESS, (u32)rdev->mc.vram_start);
-	/* Unlock host access */
+
+	/* unblackout the MC */
+	tmp = RREG32(MC_SHARED_BLACKOUT_CNTL);
+	tmp &= ~BLACKOUT_MODE_MASK;
+	WREG32(MC_SHARED_BLACKOUT_CNTL, tmp);
+	/* allow CPU access */
+	WREG32(BIF_FB_EN, FB_READ_EN | FB_WRITE_EN);
+
+	for (i = 0; i < rdev->num_crtc; i++) {
+		if (save->crtc_enabled) {
+			if (ASIC_IS_DCE6(rdev)) {
+				tmp = RREG32(EVERGREEN_CRTC_BLANK_CONTROL + crtc_offsets[i]);
+				tmp |= EVERGREEN_CRTC_BLANK_DATA_EN;
+				WREG32(EVERGREEN_CRTC_BLANK_CONTROL + crtc_offsets[i], tmp);
+			} else {
+				tmp = RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i]);
+				tmp &= ~EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE;
+				WREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i], tmp);
+			}
+			/* wait for the next frame */
+			frame_count = radeon_get_vblank_counter(rdev, i);
+			for (j = 0; j < rdev->usec_timeout; j++) {
+				if (radeon_get_vblank_counter(rdev, i) != frame_count)
+					break;
+				udelay(1);
+			}
+		}
+	}
+	/* Unlock vga access */
 	WREG32(VGA_HDP_CONTROL, save->vga_hdp_control);
 	mdelay(1);
 	WREG32(VGA_RENDER_CONTROL, save->vga_render_control);
@@ -1553,7 +1610,7 @@ static int evergreen_cp_start(struct radeon_device *rdev)
 	return 0;
 }
 
-int evergreen_cp_resume(struct radeon_device *rdev)
+static int evergreen_cp_resume(struct radeon_device *rdev)
 {
 	struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
 	u32 tmp;
@@ -2329,22 +2386,10 @@ int evergreen_asic_reset(struct radeon_device *rdev)
 
 u32 evergreen_get_vblank_counter(struct radeon_device *rdev, int crtc)
 {
-	switch (crtc) {
-	case 0:
-		return RREG32(CRTC_STATUS_FRAME_COUNT + EVERGREEN_CRTC0_REGISTER_OFFSET);
-	case 1:
-		return RREG32(CRTC_STATUS_FRAME_COUNT + EVERGREEN_CRTC1_REGISTER_OFFSET);
-	case 2:
-		return RREG32(CRTC_STATUS_FRAME_COUNT + EVERGREEN_CRTC2_REGISTER_OFFSET);
-	case 3:
-		return RREG32(CRTC_STATUS_FRAME_COUNT + EVERGREEN_CRTC3_REGISTER_OFFSET);
-	case 4:
-		return RREG32(CRTC_STATUS_FRAME_COUNT + EVERGREEN_CRTC4_REGISTER_OFFSET);
-	case 5:
-		return RREG32(CRTC_STATUS_FRAME_COUNT + EVERGREEN_CRTC5_REGISTER_OFFSET);
-	default:
+	if (crtc >= rdev->num_crtc)
 		return 0;
-	}
+	else
+		return RREG32(CRTC_STATUS_FRAME_COUNT + crtc_offsets[crtc]);
 }
 
 void evergreen_disable_interrupt_state(struct radeon_device *rdev)
@@ -2537,10 +2582,6 @@ int evergreen_irq_set(struct radeon_device *rdev)
 		DRM_DEBUG("evergreen_irq_set: hdmi 5\n");
 		afmt6 |= AFMT_AZ_FORMAT_WTRIG_MASK;
 	}
-	if (rdev->irq.gui_idle) {
-		DRM_DEBUG("gui idle\n");
-		grbm_int_cntl |= GUI_IDLE_INT_ENABLE;
-	}
 
 	if (rdev->family >= CHIP_CAYMAN) {
 		cayman_cp_int_cntl_setup(rdev, 0, cp_int_cntl);
@@ -2722,7 +2763,7 @@ static void evergreen_irq_ack(struct radeon_device *rdev)
 	}
 }
 
-void evergreen_irq_disable(struct radeon_device *rdev)
+static void evergreen_irq_disable(struct radeon_device *rdev)
 {
 	r600_disable_interrupts(rdev);
 	/* Wait and acknowledge irq */
@@ -3075,7 +3116,6 @@ restart_ih:
 			break;
 		case 233: /* GUI IDLE */
 			DRM_DEBUG("IH: GUI idle\n");
-			wake_up(&rdev->irq.idle_queue);
 			break;
 		default:
 			DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
diff --git a/drivers/gpu/drm/radeon/evergreen_cs.c b/drivers/gpu/drm/radeon/evergreen_cs.c
index 101acd618f67..573ed1bc6cf7 100644
--- a/drivers/gpu/drm/radeon/evergreen_cs.c
+++ b/drivers/gpu/drm/radeon/evergreen_cs.c
@@ -846,6 +846,16 @@ static int evergreen_cs_track_validate_texture(struct radeon_cs_parser *p,
 		return -EINVAL;
 	}
 
+	if (!mipmap) {
+		if (llevel) {
+			dev_warn(p->dev, "%s:%i got NULL MIP_ADDRESS relocation\n",
+				 __func__, __LINE__);
+			return -EINVAL;
+		} else {
+			return 0; /* everything's ok */
+		}
+	}
+
 	/* check mipmap size */
 	for (i = 1; i <= llevel; i++) {
 		unsigned w, h, d;
@@ -995,7 +1005,7 @@ static int evergreen_cs_track_check(struct radeon_cs_parser *p)
  * Assume that chunk_ib_index is properly set. Will return -EINVAL
  * if packet is bigger than remaining ib size. or if packets is unknown.
  **/
-int evergreen_cs_packet_parse(struct radeon_cs_parser *p,
+static int evergreen_cs_packet_parse(struct radeon_cs_parser *p,
 			      struct radeon_cs_packet *pkt,
 			      unsigned idx)
 {
@@ -1081,6 +1091,27 @@ static int evergreen_cs_packet_next_reloc(struct radeon_cs_parser *p,
 }
 
 /**
+ * evergreen_cs_packet_next_is_pkt3_nop() - test if the next packet is NOP
+ * @p:		structure holding the parser context.
+ *
+ * Check if the next packet is a relocation packet3.
+ **/
+static bool evergreen_cs_packet_next_is_pkt3_nop(struct radeon_cs_parser *p)
+{
+	struct radeon_cs_packet p3reloc;
+	int r;
+
+	r = evergreen_cs_packet_parse(p, &p3reloc, p->idx);
+	if (r) {
+		return false;
+	}
+	if (p3reloc.type != PACKET_TYPE3 || p3reloc.opcode != PACKET3_NOP) {
+		return false;
+	}
+	return true;
+}
+
+/**
  * evergreen_cs_packet_next_vline() - parse userspace VLINE packet
  * @parser:		parser structure holding parsing context.
  *
@@ -2330,7 +2361,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
 		for (i = 0; i < (pkt->count / 8); i++) {
 			struct radeon_bo *texture, *mipmap;
 			u32 toffset, moffset;
-			u32 size, offset;
+			u32 size, offset, mip_address, tex_dim;
 
 			switch (G__SQ_CONSTANT_TYPE(radeon_get_ib_value(p, idx+1+(i*8)+7))) {
 			case SQ_TEX_VTX_VALID_TEXTURE:
@@ -2359,14 +2390,28 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
 				}
 				texture = reloc->robj;
 				toffset = (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
+
 				/* tex mip base */
-				r = evergreen_cs_packet_next_reloc(p, &reloc);
-				if (r) {
-					DRM_ERROR("bad SET_RESOURCE (tex)\n");
-					return -EINVAL;
+				tex_dim = ib[idx+1+(i*8)+0] & 0x7;
+				mip_address = ib[idx+1+(i*8)+3];
+
+				if ((tex_dim == SQ_TEX_DIM_2D_MSAA || tex_dim == SQ_TEX_DIM_2D_ARRAY_MSAA) &&
+				    !mip_address &&
+				    !evergreen_cs_packet_next_is_pkt3_nop(p)) {
+					/* MIP_ADDRESS should point to FMASK for an MSAA texture.
+					 * It should be 0 if FMASK is disabled. */
+					moffset = 0;
+					mipmap = NULL;
+				} else {
+					r = evergreen_cs_packet_next_reloc(p, &reloc);
+					if (r) {
+						DRM_ERROR("bad SET_RESOURCE (tex)\n");
+						return -EINVAL;
+					}
+					moffset = (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
+					mipmap = reloc->robj;
 				}
-				moffset = (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
-				mipmap = reloc->robj;
+
 				r = evergreen_cs_track_validate_texture(p, texture, mipmap, idx+1+(i*8));
 				if (r)
 					return r;
diff --git a/drivers/gpu/drm/radeon/evergreen_reg.h b/drivers/gpu/drm/radeon/evergreen_reg.h
index 8beac1065025..034f4c22e5db 100644
--- a/drivers/gpu/drm/radeon/evergreen_reg.h
+++ b/drivers/gpu/drm/radeon/evergreen_reg.h
@@ -218,6 +218,8 @@
 #define EVERGREEN_CRTC_CONTROL                          0x6e70
 #       define EVERGREEN_CRTC_MASTER_EN                 (1 << 0)
 #       define EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE (1 << 24)
+#define EVERGREEN_CRTC_BLANK_CONTROL                    0x6e74
+#       define EVERGREEN_CRTC_BLANK_DATA_EN             (1 << 8)
 #define EVERGREEN_CRTC_STATUS                           0x6e8c
 #       define EVERGREEN_CRTC_V_BLANK                   (1 << 0)
 #define EVERGREEN_CRTC_STATUS_POSITION                  0x6e90
diff --git a/drivers/gpu/drm/radeon/evergreend.h b/drivers/gpu/drm/radeon/evergreend.h
index 79347855d9bf..df542f1a5dfb 100644
--- a/drivers/gpu/drm/radeon/evergreend.h
+++ b/drivers/gpu/drm/radeon/evergreend.h
@@ -87,6 +87,10 @@
 
 #define	CONFIG_MEMSIZE					0x5428
 
+#define	BIF_FB_EN						0x5490
+#define		FB_READ_EN					(1 << 0)
+#define		FB_WRITE_EN					(1 << 1)
+
 #define	CP_COHER_BASE					0x85F8
 #define	CP_STALLED_STAT1			0x8674
 #define	CP_STALLED_STAT2			0x8678
@@ -430,6 +434,9 @@
 #define		NOOFCHAN_MASK					0x00003000
 #define MC_SHARED_CHREMAP					0x2008
 
+#define MC_SHARED_BLACKOUT_CNTL           		0x20ac
+#define		BLACKOUT_MODE_MASK			0x00000007
+
 #define	MC_ARB_RAMCFG					0x2760
 #define		NOOFBANK_SHIFT					0
 #define		NOOFBANK_MASK					0x00000003
diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c
index 174462519f15..8bcb554ea0c5 100644
--- a/drivers/gpu/drm/radeon/ni.c
+++ b/drivers/gpu/drm/radeon/ni.c
@@ -726,7 +726,7 @@ void cayman_pcie_gart_tlb_flush(struct radeon_device *rdev)
 	WREG32(VM_INVALIDATE_REQUEST, 1);
 }
 
-int cayman_pcie_gart_enable(struct radeon_device *rdev)
+static int cayman_pcie_gart_enable(struct radeon_device *rdev)
 {
 	int i, r;
 
@@ -782,7 +782,7 @@ int cayman_pcie_gart_enable(struct radeon_device *rdev)
 	       (u32)(rdev->dummy_page.addr >> 12));
 	WREG32(VM_CONTEXT1_CNTL2, 0);
 	WREG32(VM_CONTEXT1_CNTL, 0);
-	WREG32(VM_CONTEXT1_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(0) |
+	WREG32(VM_CONTEXT1_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(1) |
 				RANGE_PROTECTION_FAULT_ENABLE_DEFAULT);
 
 	cayman_pcie_gart_tlb_flush(rdev);
@@ -793,7 +793,7 @@ int cayman_pcie_gart_enable(struct radeon_device *rdev)
 	return 0;
 }
 
-void cayman_pcie_gart_disable(struct radeon_device *rdev)
+static void cayman_pcie_gart_disable(struct radeon_device *rdev)
 {
 	/* Disable all tables */
 	WREG32(VM_CONTEXT0_CNTL, 0);
@@ -813,7 +813,7 @@ void cayman_pcie_gart_disable(struct radeon_device *rdev)
 	radeon_gart_table_vram_unpin(rdev);
 }
 
-void cayman_pcie_gart_fini(struct radeon_device *rdev)
+static void cayman_pcie_gart_fini(struct radeon_device *rdev)
 {
 	cayman_pcie_gart_disable(rdev);
 	radeon_gart_table_vram_free(rdev);
@@ -879,12 +879,13 @@ void cayman_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
 #endif
 			  (ib->gpu_addr & 0xFFFFFFFC));
 	radeon_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xFF);
-	radeon_ring_write(ring, ib->length_dw | (ib->vm_id << 24));
+	radeon_ring_write(ring, ib->length_dw | 
+			  (ib->vm ? (ib->vm->id << 24) : 0));
 
 	/* flush read cache over gart for this vmid */
 	radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
 	radeon_ring_write(ring, (CP_COHER_CNTL2 - PACKET3_SET_CONFIG_REG_START) >> 2);
-	radeon_ring_write(ring, ib->vm_id);
+	radeon_ring_write(ring, ib->vm ? ib->vm->id : 0);
 	radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3));
 	radeon_ring_write(ring, PACKET3_TC_ACTION_ENA | PACKET3_SH_ACTION_ENA);
 	radeon_ring_write(ring, 0xFFFFFFFF);
@@ -1004,7 +1005,7 @@ static void cayman_cp_fini(struct radeon_device *rdev)
 	radeon_scratch_free(rdev, ring->rptr_save_reg);
 }
 
-int cayman_cp_resume(struct radeon_device *rdev)
+static int cayman_cp_resume(struct radeon_device *rdev)
 {
 	static const int ridx[] = {
 		RADEON_RING_TYPE_GFX_INDEX,
@@ -1496,53 +1497,16 @@ void cayman_vm_fini(struct radeon_device *rdev)
 {
 }
 
-int cayman_vm_bind(struct radeon_device *rdev, struct radeon_vm *vm, int id)
-{
-	WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR + (id << 2), 0);
-	WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR + (id << 2), vm->last_pfn);
-	WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (id << 2), vm->pt_gpu_addr >> 12);
-	/* flush hdp cache */
-	WREG32(HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1);
-	/* bits 0-7 are the VM contexts0-7 */
-	WREG32(VM_INVALIDATE_REQUEST, 1 << id);
-	return 0;
-}
-
-void cayman_vm_unbind(struct radeon_device *rdev, struct radeon_vm *vm)
-{
-	WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR + (vm->id << 2), 0);
-	WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR + (vm->id << 2), 0);
-	WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (vm->id << 2), 0);
-	/* flush hdp cache */
-	WREG32(HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1);
-	/* bits 0-7 are the VM contexts0-7 */
-	WREG32(VM_INVALIDATE_REQUEST, 1 << vm->id);
-}
-
-void cayman_vm_tlb_flush(struct radeon_device *rdev, struct radeon_vm *vm)
-{
-	if (vm->id == -1)
-		return;
-
-	/* flush hdp cache */
-	WREG32(HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1);
-	/* bits 0-7 are the VM contexts0-7 */
-	WREG32(VM_INVALIDATE_REQUEST, 1 << vm->id);
-}
-
-#define R600_PTE_VALID     (1 << 0)
+#define R600_ENTRY_VALID   (1 << 0)
 #define R600_PTE_SYSTEM    (1 << 1)
 #define R600_PTE_SNOOPED   (1 << 2)
 #define R600_PTE_READABLE  (1 << 5)
 #define R600_PTE_WRITEABLE (1 << 6)
 
-uint32_t cayman_vm_page_flags(struct radeon_device *rdev,
-			      struct radeon_vm *vm,
-			      uint32_t flags)
+uint32_t cayman_vm_page_flags(struct radeon_device *rdev, uint32_t flags)
 {
 	uint32_t r600_flags = 0;
-
-	r600_flags |= (flags & RADEON_VM_PAGE_VALID) ? R600_PTE_VALID : 0;
+	r600_flags |= (flags & RADEON_VM_PAGE_VALID) ? R600_ENTRY_VALID : 0;
 	r600_flags |= (flags & RADEON_VM_PAGE_READABLE) ? R600_PTE_READABLE : 0;
 	r600_flags |= (flags & RADEON_VM_PAGE_WRITEABLE) ? R600_PTE_WRITEABLE : 0;
 	if (flags & RADEON_VM_PAGE_SYSTEM) {
@@ -1552,12 +1516,76 @@ uint32_t cayman_vm_page_flags(struct radeon_device *rdev,
 	return r600_flags;
 }
 
-void cayman_vm_set_page(struct radeon_device *rdev, struct radeon_vm *vm,
-			unsigned pfn, uint64_t addr, uint32_t flags)
+/**
+ * cayman_vm_set_page - update the page tables using the CP
+ *
+ * @rdev: radeon_device pointer
+ * @pe: addr of the page entry
+ * @addr: dst addr to write into pe
+ * @count: number of page entries to update
+ * @incr: increase next addr by incr bytes
+ * @flags: access flags
+ *
+ * Update the page tables using the CP (cayman-si).
+ */
+void cayman_vm_set_page(struct radeon_device *rdev, uint64_t pe,
+			uint64_t addr, unsigned count,
+			uint32_t incr, uint32_t flags)
+{
+	struct radeon_ring *ring = &rdev->ring[rdev->asic->vm.pt_ring_index];
+	uint32_t r600_flags = cayman_vm_page_flags(rdev, flags);
+	int i;
+
+	radeon_ring_write(ring, PACKET3(PACKET3_ME_WRITE, 1 + count * 2));
+	radeon_ring_write(ring, pe);
+	radeon_ring_write(ring, upper_32_bits(pe) & 0xff);
+	for (i = 0; i < count; ++i) {
+		uint64_t value = 0;
+		if (flags & RADEON_VM_PAGE_SYSTEM) {
+			value = radeon_vm_map_gart(rdev, addr);
+			value &= 0xFFFFFFFFFFFFF000ULL;
+			addr += incr;
+
+		} else if (flags & RADEON_VM_PAGE_VALID) {
+			value = addr;
+			addr += incr;
+		}
+
+		value |= r600_flags;
+		radeon_ring_write(ring, value);
+		radeon_ring_write(ring, upper_32_bits(value));
+	}
+}
+
+/**
+ * cayman_vm_flush - vm flush using the CP
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Update the page table base and flush the VM TLB
+ * using the CP (cayman-si).
+ */
+void cayman_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm)
 {
-	void __iomem *ptr = (void *)vm->pt;
+	struct radeon_ring *ring = &rdev->ring[ridx];
+
+	if (vm == NULL)
+		return;
+
+	radeon_ring_write(ring, PACKET0(VM_CONTEXT0_PAGE_TABLE_START_ADDR + (vm->id << 2), 0));
+	radeon_ring_write(ring, 0);
+
+	radeon_ring_write(ring, PACKET0(VM_CONTEXT0_PAGE_TABLE_END_ADDR + (vm->id << 2), 0));
+	radeon_ring_write(ring, vm->last_pfn);
 
-	addr = addr & 0xFFFFFFFFFFFFF000ULL;
-	addr |= flags;
-	writeq(addr, ptr + (pfn * 8));
+	radeon_ring_write(ring, PACKET0(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (vm->id << 2), 0));
+	radeon_ring_write(ring, vm->pd_gpu_addr >> 12);
+
+	/* flush hdp cache */
+	radeon_ring_write(ring, PACKET0(HDP_MEM_COHERENCY_FLUSH_CNTL, 0));
+	radeon_ring_write(ring, 0x1);
+
+	/* bits 0-7 are the VM contexts0-7 */
+	radeon_ring_write(ring, PACKET0(VM_INVALIDATE_REQUEST, 0));
+	radeon_ring_write(ring, 1 << vm->id);
 }
diff --git a/drivers/gpu/drm/radeon/nid.h b/drivers/gpu/drm/radeon/nid.h
index 870db340d377..2423d1b5d385 100644
--- a/drivers/gpu/drm/radeon/nid.h
+++ b/drivers/gpu/drm/radeon/nid.h
@@ -585,6 +585,7 @@
 #define	PACKET3_SET_CONTEXT_REG_INDIRECT		0x73
 #define	PACKET3_SET_RESOURCE_INDIRECT			0x74
 #define	PACKET3_SET_APPEND_CNT			        0x75
+#define	PACKET3_ME_WRITE				0x7A
 
 #endif
 
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
index 3183a815f71c..376884f1bcd2 100644
--- a/drivers/gpu/drm/radeon/r100.c
+++ b/drivers/gpu/drm/radeon/r100.c
@@ -79,10 +79,12 @@ MODULE_FIRMWARE(FIRMWARE_R520);
  */
 void r100_wait_for_vblank(struct radeon_device *rdev, int crtc)
 {
-	struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc];
 	int i;
 
-	if (radeon_crtc->crtc_id == 0) {
+	if (crtc >= rdev->num_crtc)
+		return;
+
+	if (crtc == 0) {
 		if (RREG32(RADEON_CRTC_GEN_CNTL) & RADEON_CRTC_EN) {
 			for (i = 0; i < rdev->usec_timeout; i++) {
 				if (!(RREG32(RADEON_CRTC_STATUS) & RADEON_CRTC_VBLANK_CUR))
@@ -697,9 +699,6 @@ int r100_irq_set(struct radeon_device *rdev)
 	if (atomic_read(&rdev->irq.ring_int[RADEON_RING_TYPE_GFX_INDEX])) {
 		tmp |= RADEON_SW_INT_ENABLE;
 	}
-	if (rdev->irq.gui_idle) {
-		tmp |= RADEON_GUI_IDLE_MASK;
-	}
 	if (rdev->irq.crtc_vblank_int[0] ||
 	    atomic_read(&rdev->irq.pflip[0])) {
 		tmp |= RADEON_CRTC_VBLANK_MASK;
@@ -736,12 +735,6 @@ static uint32_t r100_irq_ack(struct radeon_device *rdev)
 		RADEON_CRTC_VBLANK_STAT | RADEON_CRTC2_VBLANK_STAT |
 		RADEON_FP_DETECT_STAT | RADEON_FP2_DETECT_STAT;
 
-	/* the interrupt works, but the status bit is permanently asserted */
-	if (rdev->irq.gui_idle && radeon_gui_idle(rdev)) {
-		if (!rdev->irq.gui_idle_acked)
-			irq_mask |= RADEON_GUI_IDLE_STAT;
-	}
-
 	if (irqs) {
 		WREG32(RADEON_GEN_INT_STATUS, irqs);
 	}
@@ -753,9 +746,6 @@ int r100_irq_process(struct radeon_device *rdev)
 	uint32_t status, msi_rearm;
 	bool queue_hotplug = false;
 
-	/* reset gui idle ack.  the status bit is broken */
-	rdev->irq.gui_idle_acked = false;
-
 	status = r100_irq_ack(rdev);
 	if (!status) {
 		return IRQ_NONE;
@@ -768,11 +758,6 @@ int r100_irq_process(struct radeon_device *rdev)
 		if (status & RADEON_SW_INT_TEST) {
 			radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX);
 		}
-		/* gui idle interrupt */
-		if (status & RADEON_GUI_IDLE_STAT) {
-			rdev->irq.gui_idle_acked = true;
-			wake_up(&rdev->irq.idle_queue);
-		}
 		/* Vertical blank interrupts */
 		if (status & RADEON_CRTC_VBLANK_STAT) {
 			if (rdev->irq.crtc_vblank_int[0]) {
@@ -802,8 +787,6 @@ int r100_irq_process(struct radeon_device *rdev)
 		}
 		status = r100_irq_ack(rdev);
 	}
-	/* reset gui idle ack.  the status bit is broken */
-	rdev->irq.gui_idle_acked = false;
 	if (queue_hotplug)
 		schedule_work(&rdev->hotplug_work);
 	if (rdev->msi_enabled) {
@@ -2529,7 +2512,7 @@ void r100_cs_track_clear(struct radeon_device *rdev, struct r100_cs_track *track
 /*
  * Global GPU functions
  */
-void r100_errata(struct radeon_device *rdev)
+static void r100_errata(struct radeon_device *rdev)
 {
 	rdev->pll_errata = 0;
 
@@ -2544,51 +2527,7 @@ void r100_errata(struct radeon_device *rdev)
 	}
 }
 
-/* Wait for vertical sync on primary CRTC */
-void r100_gpu_wait_for_vsync(struct radeon_device *rdev)
-{
-	uint32_t crtc_gen_cntl, tmp;
-	int i;
-
-	crtc_gen_cntl = RREG32(RADEON_CRTC_GEN_CNTL);
-	if ((crtc_gen_cntl & RADEON_CRTC_DISP_REQ_EN_B) ||
-	    !(crtc_gen_cntl & RADEON_CRTC_EN)) {
-		return;
-	}
-	/* Clear the CRTC_VBLANK_SAVE bit */
-	WREG32(RADEON_CRTC_STATUS, RADEON_CRTC_VBLANK_SAVE_CLEAR);
-	for (i = 0; i < rdev->usec_timeout; i++) {
-		tmp = RREG32(RADEON_CRTC_STATUS);
-		if (tmp & RADEON_CRTC_VBLANK_SAVE) {
-			return;
-		}
-		DRM_UDELAY(1);
-	}
-}
-
-/* Wait for vertical sync on secondary CRTC */
-void r100_gpu_wait_for_vsync2(struct radeon_device *rdev)
-{
-	uint32_t crtc2_gen_cntl, tmp;
-	int i;
-
-	crtc2_gen_cntl = RREG32(RADEON_CRTC2_GEN_CNTL);
-	if ((crtc2_gen_cntl & RADEON_CRTC2_DISP_REQ_EN_B) ||
-	    !(crtc2_gen_cntl & RADEON_CRTC2_EN))
-		return;
-
-	/* Clear the CRTC_VBLANK_SAVE bit */
-	WREG32(RADEON_CRTC2_STATUS, RADEON_CRTC2_VBLANK_SAVE_CLEAR);
-	for (i = 0; i < rdev->usec_timeout; i++) {
-		tmp = RREG32(RADEON_CRTC2_STATUS);
-		if (tmp & RADEON_CRTC2_VBLANK_SAVE) {
-			return;
-		}
-		DRM_UDELAY(1);
-	}
-}
-
-int r100_rbbm_fifo_wait_for_entry(struct radeon_device *rdev, unsigned n)
+static int r100_rbbm_fifo_wait_for_entry(struct radeon_device *rdev, unsigned n)
 {
 	unsigned i;
 	uint32_t tmp;
@@ -2949,7 +2888,7 @@ void r100_vga_set_state(struct radeon_device *rdev, bool state)
 	WREG32(RADEON_CONFIG_CNTL, temp);
 }
 
-void r100_mc_init(struct radeon_device *rdev)
+static void r100_mc_init(struct radeon_device *rdev)
 {
 	u64 base;
 
@@ -3021,7 +2960,7 @@ void r100_pll_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
 	r100_pll_errata_after_data(rdev);
 }
 
-void r100_set_safe_registers(struct radeon_device *rdev)
+static void r100_set_safe_registers(struct radeon_device *rdev)
 {
 	if (ASIC_IS_RN50(rdev)) {
 		rdev->config.r100.reg_safe_bm = rn50_reg_safe_bm;
@@ -3816,9 +3755,10 @@ int r100_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
 		return r;
 	}
 	WREG32(scratch, 0xCAFEDEAD);
-	r = radeon_ib_get(rdev, RADEON_RING_TYPE_GFX_INDEX, &ib, 256);
+	r = radeon_ib_get(rdev, RADEON_RING_TYPE_GFX_INDEX, &ib, NULL, 256);
 	if (r) {
-		return r;
+		DRM_ERROR("radeon: failed to get ib (%d).\n", r);
+		goto free_scratch;
 	}
 	ib.ptr[0] = PACKET0(scratch, 0);
 	ib.ptr[1] = 0xDEADBEEF;
@@ -3831,13 +3771,13 @@ int r100_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
 	ib.length_dw = 8;
 	r = radeon_ib_schedule(rdev, &ib, NULL);
 	if (r) {
-		radeon_scratch_free(rdev, scratch);
-		radeon_ib_free(rdev, &ib);
-		return r;
+		DRM_ERROR("radeon: failed to schedule ib (%d).\n", r);
+		goto free_ib;
 	}
 	r = radeon_fence_wait(ib.fence, false);
 	if (r) {
-		return r;
+		DRM_ERROR("radeon: fence wait failed (%d).\n", r);
+		goto free_ib;
 	}
 	for (i = 0; i < rdev->usec_timeout; i++) {
 		tmp = RREG32(scratch);
@@ -3853,8 +3793,10 @@ int r100_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
 			  scratch, tmp);
 		r = -EINVAL;
 	}
-	radeon_scratch_free(rdev, scratch);
+free_ib:
 	radeon_ib_free(rdev, &ib);
+free_scratch:
+	radeon_scratch_free(rdev, scratch);
 	return r;
 }
 
@@ -3963,7 +3905,7 @@ static void r100_mc_program(struct radeon_device *rdev)
 	r100_mc_resume(rdev, &save);
 }
 
-void r100_clock_startup(struct radeon_device *rdev)
+static void r100_clock_startup(struct radeon_device *rdev)
 {
 	u32 tmp;
 
diff --git a/drivers/gpu/drm/radeon/r300.c b/drivers/gpu/drm/radeon/r300.c
index 1e10df214271..d0ba6023a1f8 100644
--- a/drivers/gpu/drm/radeon/r300.c
+++ b/drivers/gpu/drm/radeon/r300.c
@@ -296,7 +296,7 @@ void r300_ring_start(struct radeon_device *rdev, struct radeon_ring *ring)
 	radeon_ring_unlock_commit(rdev, ring);
 }
 
-void r300_errata(struct radeon_device *rdev)
+static void r300_errata(struct radeon_device *rdev)
 {
 	rdev->pll_errata = 0;
 
@@ -322,7 +322,7 @@ int r300_mc_wait_for_idle(struct radeon_device *rdev)
 	return -1;
 }
 
-void r300_gpu_init(struct radeon_device *rdev)
+static void r300_gpu_init(struct radeon_device *rdev)
 {
 	uint32_t gb_tile_config, tmp;
 
diff --git a/drivers/gpu/drm/radeon/r520.c b/drivers/gpu/drm/radeon/r520.c
index 90703d539e04..f795a4e092cb 100644
--- a/drivers/gpu/drm/radeon/r520.c
+++ b/drivers/gpu/drm/radeon/r520.c
@@ -119,7 +119,7 @@ static void r520_vram_get_type(struct radeon_device *rdev)
 		rdev->mc.vram_width *= 2;
 }
 
-void r520_mc_init(struct radeon_device *rdev)
+static void r520_mc_init(struct radeon_device *rdev)
 {
 
 	r520_vram_get_type(rdev);
@@ -131,7 +131,7 @@ void r520_mc_init(struct radeon_device *rdev)
 	radeon_update_bandwidth_info(rdev);
 }
 
-void r520_mc_program(struct radeon_device *rdev)
+static void r520_mc_program(struct radeon_device *rdev)
 {
 	struct rv515_mc_save save;
 
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index 9f2cafd10f4a..70c800ff6190 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -98,7 +98,7 @@ int r600_debugfs_mc_info_init(struct radeon_device *rdev);
 
 /* r600,rv610,rv630,rv620,rv635,rv670 */
 int r600_mc_wait_for_idle(struct radeon_device *rdev);
-void r600_gpu_init(struct radeon_device *rdev);
+static void r600_gpu_init(struct radeon_device *rdev);
 void r600_fini(struct radeon_device *rdev);
 void r600_irq_disable(struct radeon_device *rdev);
 static void r600_pcie_gen2_enable(struct radeon_device *rdev);
@@ -881,7 +881,7 @@ int r600_pcie_gart_init(struct radeon_device *rdev)
 	return radeon_gart_table_vram_alloc(rdev);
 }
 
-int r600_pcie_gart_enable(struct radeon_device *rdev)
+static int r600_pcie_gart_enable(struct radeon_device *rdev)
 {
 	u32 tmp;
 	int r, i;
@@ -938,7 +938,7 @@ int r600_pcie_gart_enable(struct radeon_device *rdev)
 	return 0;
 }
 
-void r600_pcie_gart_disable(struct radeon_device *rdev)
+static void r600_pcie_gart_disable(struct radeon_device *rdev)
 {
 	u32 tmp;
 	int i;
@@ -971,14 +971,14 @@ void r600_pcie_gart_disable(struct radeon_device *rdev)
 	radeon_gart_table_vram_unpin(rdev);
 }
 
-void r600_pcie_gart_fini(struct radeon_device *rdev)
+static void r600_pcie_gart_fini(struct radeon_device *rdev)
 {
 	radeon_gart_fini(rdev);
 	r600_pcie_gart_disable(rdev);
 	radeon_gart_table_vram_free(rdev);
 }
 
-void r600_agp_enable(struct radeon_device *rdev)
+static void r600_agp_enable(struct radeon_device *rdev)
 {
 	u32 tmp;
 	int i;
@@ -1158,7 +1158,7 @@ static void r600_vram_gtt_location(struct radeon_device *rdev, struct radeon_mc
 	}
 }
 
-int r600_mc_init(struct radeon_device *rdev)
+static int r600_mc_init(struct radeon_device *rdev)
 {
 	u32 tmp;
 	int chansize, numchan;
@@ -1258,7 +1258,7 @@ void r600_vram_scratch_fini(struct radeon_device *rdev)
  * reset, it's up to the caller to determine if the GPU needs one. We
  * might add an helper function to check that.
  */
-int r600_gpu_soft_reset(struct radeon_device *rdev)
+static int r600_gpu_soft_reset(struct radeon_device *rdev)
 {
 	struct rv515_mc_save save;
 	u32 grbm_busy_mask = S_008010_VC_BUSY(1) | S_008010_VGT_BUSY_NO_DMA(1) |
@@ -1433,7 +1433,7 @@ int r600_count_pipe_bits(uint32_t val)
 	return ret;
 }
 
-void r600_gpu_init(struct radeon_device *rdev)
+static void r600_gpu_init(struct radeon_device *rdev)
 {
 	u32 tiling_config;
 	u32 ramcfg;
@@ -2347,7 +2347,7 @@ void r600_clear_surface_reg(struct radeon_device *rdev, int reg)
 	/* FIXME: implement */
 }
 
-int r600_startup(struct radeon_device *rdev)
+static int r600_startup(struct radeon_device *rdev)
 {
 	struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
 	int r;
@@ -2635,10 +2635,10 @@ int r600_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
 		return r;
 	}
 	WREG32(scratch, 0xCAFEDEAD);
-	r = radeon_ib_get(rdev, ring->idx, &ib, 256);
+	r = radeon_ib_get(rdev, ring->idx, &ib, NULL, 256);
 	if (r) {
 		DRM_ERROR("radeon: failed to get ib (%d).\n", r);
-		return r;
+		goto free_scratch;
 	}
 	ib.ptr[0] = PACKET3(PACKET3_SET_CONFIG_REG, 1);
 	ib.ptr[1] = ((scratch - PACKET3_SET_CONFIG_REG_OFFSET) >> 2);
@@ -2646,15 +2646,13 @@ int r600_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
 	ib.length_dw = 3;
 	r = radeon_ib_schedule(rdev, &ib, NULL);
 	if (r) {
-		radeon_scratch_free(rdev, scratch);
-		radeon_ib_free(rdev, &ib);
 		DRM_ERROR("radeon: failed to schedule ib (%d).\n", r);
-		return r;
+		goto free_ib;
 	}
 	r = radeon_fence_wait(ib.fence, false);
 	if (r) {
 		DRM_ERROR("radeon: fence wait failed (%d).\n", r);
-		return r;
+		goto free_ib;
 	}
 	for (i = 0; i < rdev->usec_timeout; i++) {
 		tmp = RREG32(scratch);
@@ -2669,8 +2667,10 @@ int r600_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
 			  scratch, tmp);
 		r = -EINVAL;
 	}
-	radeon_scratch_free(rdev, scratch);
+free_ib:
 	radeon_ib_free(rdev, &ib);
+free_scratch:
+	radeon_scratch_free(rdev, scratch);
 	return r;
 }
 
@@ -3088,10 +3088,6 @@ int r600_irq_set(struct radeon_device *rdev)
 		DRM_DEBUG("r600_irq_set: hdmi 0\n");
 		hdmi1 |= HDMI0_AZ_FORMAT_WTRIG_MASK;
 	}
-	if (rdev->irq.gui_idle) {
-		DRM_DEBUG("gui idle\n");
-		grbm_int_cntl |= GUI_IDLE_INT_ENABLE;
-	}
 
 	WREG32(CP_INT_CNTL, cp_int_cntl);
 	WREG32(DxMODE_INT_MASK, mode_int);
@@ -3475,7 +3471,6 @@ restart_ih:
 			break;
 		case 233: /* GUI IDLE */
 			DRM_DEBUG("IH: GUI idle\n");
-			wake_up(&rdev->irq.idle_queue);
 			break;
 		default:
 			DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
diff --git a/drivers/gpu/drm/radeon/r600_blit.c b/drivers/gpu/drm/radeon/r600_blit.c
index 26ace5623dc7..77da1f9c0b8e 100644
--- a/drivers/gpu/drm/radeon/r600_blit.c
+++ b/drivers/gpu/drm/radeon/r600_blit.c
@@ -488,31 +488,36 @@ set_default_state(drm_radeon_private_t *dev_priv)
 	ADVANCE_RING();
 }
 
-static uint32_t i2f(uint32_t input)
+/* 23 bits of float fractional data */
+#define I2F_FRAC_BITS  23
+#define I2F_MASK ((1 << I2F_FRAC_BITS) - 1)
+
+/*
+ * Converts unsigned integer into 32-bit IEEE floating point representation.
+ * Will be exact from 0 to 2^24.  Above that, we round towards zero
+ * as the fractional bits will not fit in a float.  (It would be better to
+ * round towards even as the fpu does, but that is slower.)
+ */
+__pure uint32_t int2float(uint32_t x)
 {
-	u32 result, i, exponent, fraction;
-
-	if ((input & 0x3fff) == 0)
-		result = 0; /* 0 is a special case */
-	else {
-		exponent = 140; /* exponent biased by 127; */
-		fraction = (input & 0x3fff) << 10; /* cheat and only
-						      handle numbers below 2^^15 */
-		for (i = 0; i < 14; i++) {
-			if (fraction & 0x800000)
-				break;
-			else {
-				fraction = fraction << 1; /* keep
-							     shifting left until top bit = 1 */
-				exponent = exponent - 1;
-			}
-		}
-		result = exponent << 23 | (fraction & 0x7fffff); /* mask
-								    off top bit; assumed 1 */
-	}
-	return result;
-}
+	uint32_t msb, exponent, fraction;
+
+	/* Zero is special */
+	if (!x) return 0;
+
+	/* Get location of the most significant bit */
+	msb = __fls(x);
 
+	/*
+	 * Use a rotate instead of a shift because that works both leftwards
+	 * and rightwards due to the mod(32) behaviour.  This means we don't
+	 * need to check to see if we are above 2^24 or not.
+	 */
+	fraction = ror32(x, (msb - I2F_FRAC_BITS) & 0x1f) & I2F_MASK;
+	exponent = (127 + msb) << I2F_FRAC_BITS;
+
+	return fraction + exponent;
+}
 
 static int r600_nomm_get_vb(struct drm_device *dev)
 {
@@ -631,20 +636,20 @@ r600_blit_copy(struct drm_device *dev,
 				vb = r600_nomm_get_vb_ptr(dev);
 			}
 
-			vb[0] = i2f(dst_x);
+			vb[0] = int2float(dst_x);
 			vb[1] = 0;
-			vb[2] = i2f(src_x);
+			vb[2] = int2float(src_x);
 			vb[3] = 0;
 
-			vb[4] = i2f(dst_x);
-			vb[5] = i2f(h);
-			vb[6] = i2f(src_x);
-			vb[7] = i2f(h);
+			vb[4] = int2float(dst_x);
+			vb[5] = int2float(h);
+			vb[6] = int2float(src_x);
+			vb[7] = int2float(h);
 
-			vb[8] = i2f(dst_x + cur_size);
-			vb[9] = i2f(h);
-			vb[10] = i2f(src_x + cur_size);
-			vb[11] = i2f(h);
+			vb[8] = int2float(dst_x + cur_size);
+			vb[9] = int2float(h);
+			vb[10] = int2float(src_x + cur_size);
+			vb[11] = int2float(h);
 
 			/* src */
 			set_tex_resource(dev_priv, FMT_8,
@@ -720,20 +725,20 @@ r600_blit_copy(struct drm_device *dev,
 				vb = r600_nomm_get_vb_ptr(dev);
 			}
 
-			vb[0] = i2f(dst_x / 4);
+			vb[0] = int2float(dst_x / 4);
 			vb[1] = 0;
-			vb[2] = i2f(src_x / 4);
+			vb[2] = int2float(src_x / 4);
 			vb[3] = 0;
 
-			vb[4] = i2f(dst_x / 4);
-			vb[5] = i2f(h);
-			vb[6] = i2f(src_x / 4);
-			vb[7] = i2f(h);
+			vb[4] = int2float(dst_x / 4);
+			vb[5] = int2float(h);
+			vb[6] = int2float(src_x / 4);
+			vb[7] = int2float(h);
 
-			vb[8] = i2f((dst_x + cur_size) / 4);
-			vb[9] = i2f(h);
-			vb[10] = i2f((src_x + cur_size) / 4);
-			vb[11] = i2f(h);
+			vb[8] = int2float((dst_x + cur_size) / 4);
+			vb[9] = int2float(h);
+			vb[10] = int2float((src_x + cur_size) / 4);
+			vb[11] = int2float(h);
 
 			/* src */
 			set_tex_resource(dev_priv, FMT_8_8_8_8,
@@ -803,20 +808,20 @@ r600_blit_swap(struct drm_device *dev,
 	dx2 = dx + w;
 	dy2 = dy + h;
 
-	vb[0] = i2f(dx);
-	vb[1] = i2f(dy);
-	vb[2] = i2f(sx);
-	vb[3] = i2f(sy);
+	vb[0] = int2float(dx);
+	vb[1] = int2float(dy);
+	vb[2] = int2float(sx);
+	vb[3] = int2float(sy);
 
-	vb[4] = i2f(dx);
-	vb[5] = i2f(dy2);
-	vb[6] = i2f(sx);
-	vb[7] = i2f(sy2);
+	vb[4] = int2float(dx);
+	vb[5] = int2float(dy2);
+	vb[6] = int2float(sx);
+	vb[7] = int2float(sy2);
 
-	vb[8] = i2f(dx2);
-	vb[9] = i2f(dy2);
-	vb[10] = i2f(sx2);
-	vb[11] = i2f(sy2);
+	vb[8] = int2float(dx2);
+	vb[9] = int2float(dy2);
+	vb[10] = int2float(sx2);
+	vb[11] = int2float(sy2);
 
 	switch(cpp) {
 	case 4:
diff --git a/drivers/gpu/drm/radeon/r600_blit_kms.c b/drivers/gpu/drm/radeon/r600_blit_kms.c
index aec8487662c4..e082dca6feee 100644
--- a/drivers/gpu/drm/radeon/r600_blit_kms.c
+++ b/drivers/gpu/drm/radeon/r600_blit_kms.c
@@ -454,46 +454,6 @@ set_default_state(struct radeon_device *rdev)
 	radeon_ring_write(ring, sq_stack_resource_mgmt_2);
 }
 
-#define I2F_MAX_BITS 15
-#define I2F_MAX_INPUT  ((1 << I2F_MAX_BITS) - 1)
-#define I2F_SHIFT (24 - I2F_MAX_BITS)
-
-/*
- * Converts unsigned integer into 32-bit IEEE floating point representation.
- * Conversion is not universal and only works for the range from 0
- * to 2^I2F_MAX_BITS-1. Currently we only use it with inputs between
- * 0 and 16384 (inclusive), so I2F_MAX_BITS=15 is enough. If necessary,
- * I2F_MAX_BITS can be increased, but that will add to the loop iterations
- * and slow us down. Conversion is done by shifting the input and counting
- * down until the first 1 reaches bit position 23. The resulting counter
- * and the shifted input are, respectively, the exponent and the fraction.
- * The sign is always zero.
- */
-static uint32_t i2f(uint32_t input)
-{
-	u32 result, i, exponent, fraction;
-
-	WARN_ON_ONCE(input > I2F_MAX_INPUT);
-
-	if ((input & I2F_MAX_INPUT) == 0)
-		result = 0;
-	else {
-		exponent = 126 + I2F_MAX_BITS;
-		fraction = (input & I2F_MAX_INPUT) << I2F_SHIFT;
-
-		for (i = 0; i < I2F_MAX_BITS; i++) {
-			if (fraction & 0x800000)
-				break;
-			else {
-				fraction = fraction << 1;
-				exponent = exponent - 1;
-			}
-		}
-		result = exponent << 23 | (fraction & 0x7fffff);
-	}
-	return result;
-}
-
 int r600_blit_init(struct radeon_device *rdev)
 {
 	u32 obj_size;
@@ -765,14 +725,14 @@ void r600_kms_blit_copy(struct radeon_device *rdev,
 		vb_cpu_addr[3] = 0;
 
 		vb_cpu_addr[4] = 0;
-		vb_cpu_addr[5] = i2f(h);
+		vb_cpu_addr[5] = int2float(h);
 		vb_cpu_addr[6] = 0;
-		vb_cpu_addr[7] = i2f(h);
+		vb_cpu_addr[7] = int2float(h);
 
-		vb_cpu_addr[8] = i2f(w);
-		vb_cpu_addr[9] = i2f(h);
-		vb_cpu_addr[10] = i2f(w);
-		vb_cpu_addr[11] = i2f(h);
+		vb_cpu_addr[8] = int2float(w);
+		vb_cpu_addr[9] = int2float(h);
+		vb_cpu_addr[10] = int2float(w);
+		vb_cpu_addr[11] = int2float(h);
 
 		rdev->r600_blit.primitives.set_tex_resource(rdev, FMT_8_8_8_8,
 							    w, h, w, src_gpu_addr, size_in_bytes);
diff --git a/drivers/gpu/drm/radeon/r600_blit_shaders.h b/drivers/gpu/drm/radeon/r600_blit_shaders.h
index f437d36dd98c..2f3ce7a75976 100644
--- a/drivers/gpu/drm/radeon/r600_blit_shaders.h
+++ b/drivers/gpu/drm/radeon/r600_blit_shaders.h
@@ -35,4 +35,5 @@ extern const u32 r6xx_default_state[];
 extern const u32 r6xx_ps_size, r6xx_vs_size;
 extern const u32 r6xx_default_size, r7xx_default_size;
 
+__pure uint32_t int2float(uint32_t x);
 #endif
diff --git a/drivers/gpu/drm/radeon/r600_cs.c b/drivers/gpu/drm/radeon/r600_cs.c
index cb92646a5e55..211c40252fe0 100644
--- a/drivers/gpu/drm/radeon/r600_cs.c
+++ b/drivers/gpu/drm/radeon/r600_cs.c
@@ -847,7 +847,7 @@ static int r600_cs_track_check(struct radeon_cs_parser *p)
  * Assume that chunk_ib_index is properly set. Will return -EINVAL
  * if packet is bigger than remaining ib size. or if packets is unknown.
  **/
-int r600_cs_packet_parse(struct radeon_cs_parser *p,
+static int r600_cs_packet_parse(struct radeon_cs_parser *p,
 			struct radeon_cs_packet *pkt,
 			unsigned idx)
 {
@@ -2180,7 +2180,8 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
 		}
 		break;
 	case PACKET3_STRMOUT_BASE_UPDATE:
-		if (p->family < CHIP_RV770) {
+		/* RS780 and RS880 also need this */
+		if (p->family < CHIP_RS780) {
 			DRM_ERROR("STRMOUT_BASE_UPDATE only supported on 7xx\n");
 			return -EINVAL;
 		}
diff --git a/drivers/gpu/drm/radeon/r600_hdmi.c b/drivers/gpu/drm/radeon/r600_hdmi.c
index 23be9319c729..ff80efe9cb7d 100644
--- a/drivers/gpu/drm/radeon/r600_hdmi.c
+++ b/drivers/gpu/drm/radeon/r600_hdmi.c
@@ -53,7 +53,7 @@ enum r600_hdmi_iec_status_bits {
 	AUDIO_STATUS_LEVEL        = 0x80
 };
 
-struct radeon_hdmi_acr r600_hdmi_predefined_acr[] = {
+static const struct radeon_hdmi_acr r600_hdmi_predefined_acr[] = {
     /*	     32kHz	  44.1kHz	48kHz    */
     /* Clock      N     CTS      N     CTS      N     CTS */
     {  25174,  4576,  28125,  7007,  31250,  6864,  28125 }, /*  25,20/1.001 MHz */
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index 59a15315ae9f..b04c06444d8b 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -123,6 +123,7 @@ extern int radeon_lockup_timeout;
 #define CAYMAN_RING_TYPE_CP2_INDEX		2
 
 /* hardcode those limit for now */
+#define RADEON_VA_IB_OFFSET			(1 << 20)
 #define RADEON_VA_RESERVED_SIZE			(8 << 20)
 #define RADEON_IB_VM_MAX_SIZE			(64 << 10)
 
@@ -253,6 +254,22 @@ static inline struct radeon_fence *radeon_fence_later(struct radeon_fence *a,
 	}
 }
 
+static inline bool radeon_fence_is_earlier(struct radeon_fence *a,
+					   struct radeon_fence *b)
+{
+	if (!a) {
+		return false;
+	}
+
+	if (!b) {
+		return true;
+	}
+
+	BUG_ON(a->ring != b->ring);
+
+	return a->seq < b->seq;
+}
+
 /*
  * Tiling registers
  */
@@ -275,18 +292,20 @@ struct radeon_mman {
 
 /* bo virtual address in a specific vm */
 struct radeon_bo_va {
-	/* bo list is protected by bo being reserved */
+	/* protected by bo being reserved */
 	struct list_head		bo_list;
-	/* vm list is protected by vm mutex */
-	struct list_head		vm_list;
-	/* constant after initialization */
-	struct radeon_vm		*vm;
-	struct radeon_bo		*bo;
 	uint64_t			soffset;
 	uint64_t			eoffset;
 	uint32_t			flags;
-	struct radeon_fence		*fence;
 	bool				valid;
+	unsigned			ref_count;
+
+	/* protected by vm mutex */
+	struct list_head		vm_list;
+
+	/* constant after initialization */
+	struct radeon_vm		*vm;
+	struct radeon_bo		*bo;
 };
 
 struct radeon_bo {
@@ -566,9 +585,6 @@ struct radeon_irq {
 	atomic_t			pflip[RADEON_MAX_CRTCS];
 	wait_queue_head_t		vblank_queue;
 	bool				hpd[RADEON_MAX_HPD_PINS];
-	bool				gui_idle;
-	bool				gui_idle_acked;
-	wait_queue_head_t		idle_queue;
 	bool				afmt[RADEON_MAX_AFMT_BLOCKS];
 	union radeon_irq_stat_regs	stat_regs;
 };
@@ -583,7 +599,6 @@ void radeon_irq_kms_enable_afmt(struct radeon_device *rdev, int block);
 void radeon_irq_kms_disable_afmt(struct radeon_device *rdev, int block);
 void radeon_irq_kms_enable_hpd(struct radeon_device *rdev, unsigned hpd_mask);
 void radeon_irq_kms_disable_hpd(struct radeon_device *rdev, unsigned hpd_mask);
-int radeon_irq_kms_wait_gui_idle(struct radeon_device *rdev);
 
 /*
  * CP & rings.
@@ -596,7 +611,7 @@ struct radeon_ib {
 	uint32_t			*ptr;
 	int				ring;
 	struct radeon_fence		*fence;
-	unsigned			vm_id;
+	struct radeon_vm		*vm;
 	bool				is_const_ib;
 	struct radeon_fence		*sync_to[RADEON_NUM_RINGS];
 	struct radeon_semaphore		*semaphore;
@@ -632,41 +647,38 @@ struct radeon_ring {
 /*
  * VM
  */
+
+/* maximum number of VMIDs */
+#define RADEON_NUM_VM	16
+
+/* defines number of bits in page table versus page directory,
+ * a page is 4KB so we have 12 bits offset, 9 bits in the page
+ * table and the remaining 19 bits are in the page directory */
+#define RADEON_VM_BLOCK_SIZE   9
+
+/* number of entries in page table */
+#define RADEON_VM_PTE_COUNT (1 << RADEON_VM_BLOCK_SIZE)
+
 struct radeon_vm {
 	struct list_head		list;
 	struct list_head		va;
-	int				id;
+	unsigned			id;
 	unsigned			last_pfn;
-	u64				pt_gpu_addr;
-	u64				*pt;
+	u64				pd_gpu_addr;
 	struct radeon_sa_bo		*sa_bo;
 	struct mutex			mutex;
 	/* last fence for cs using this vm */
 	struct radeon_fence		*fence;
-};
-
-struct radeon_vm_funcs {
-	int (*init)(struct radeon_device *rdev);
-	void (*fini)(struct radeon_device *rdev);
-	/* cs mutex must be lock for schedule_ib */
-	int (*bind)(struct radeon_device *rdev, struct radeon_vm *vm, int id);
-	void (*unbind)(struct radeon_device *rdev, struct radeon_vm *vm);
-	void (*tlb_flush)(struct radeon_device *rdev, struct radeon_vm *vm);
-	uint32_t (*page_flags)(struct radeon_device *rdev,
-			       struct radeon_vm *vm,
-			       uint32_t flags);
-	void (*set_page)(struct radeon_device *rdev, struct radeon_vm *vm,
-			unsigned pfn, uint64_t addr, uint32_t flags);
+	/* last flush or NULL if we still need to flush */
+	struct radeon_fence		*last_flush;
 };
 
 struct radeon_vm_manager {
 	struct mutex			lock;
 	struct list_head		lru_vm;
-	uint32_t			use_bitmap;
+	struct radeon_fence		*active[RADEON_NUM_VM];
 	struct radeon_sa_manager	sa_manager;
 	uint32_t			max_pfn;
-	/* fields constant after init */
-	const struct radeon_vm_funcs	*funcs;
 	/* number of VMIDs */
 	unsigned			nvm;
 	/* vram base address for page table entry  */
@@ -738,7 +750,8 @@ struct si_rlc {
 };
 
 int radeon_ib_get(struct radeon_device *rdev, int ring,
-		  struct radeon_ib *ib, unsigned size);
+		  struct radeon_ib *ib, struct radeon_vm *vm,
+		  unsigned size);
 void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib *ib);
 int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib,
 		       struct radeon_ib *const_ib);
@@ -1131,6 +1144,15 @@ struct radeon_asic {
 		void (*tlb_flush)(struct radeon_device *rdev);
 		int (*set_page)(struct radeon_device *rdev, int i, uint64_t addr);
 	} gart;
+	struct {
+		int (*init)(struct radeon_device *rdev);
+		void (*fini)(struct radeon_device *rdev);
+
+		u32 pt_ring_index;
+		void (*set_page)(struct radeon_device *rdev, uint64_t pe,
+				 uint64_t addr, unsigned count,
+				 uint32_t incr, uint32_t flags);
+	} vm;
 	/* ring specific callbacks */
 	struct {
 		void (*ib_execute)(struct radeon_device *rdev, struct radeon_ib *ib);
@@ -1143,6 +1165,7 @@ struct radeon_asic {
 		int (*ring_test)(struct radeon_device *rdev, struct radeon_ring *cp);
 		int (*ib_test)(struct radeon_device *rdev, struct radeon_ring *cp);
 		bool (*is_lockup)(struct radeon_device *rdev, struct radeon_ring *cp);
+		void (*vm_flush)(struct radeon_device *rdev, int ridx, struct radeon_vm *vm);
 	} ring[RADEON_NUM_RINGS];
 	/* irqs */
 	struct {
@@ -1157,6 +1180,10 @@ struct radeon_asic {
 		u32 (*get_vblank_counter)(struct radeon_device *rdev, int crtc);
 		/* wait for vblank */
 		void (*wait_for_vblank)(struct radeon_device *rdev, int crtc);
+		/* set backlight level */
+		void (*set_backlight_level)(struct radeon_encoder *radeon_encoder, u8 level);
+		/* get backlight level */
+		u8 (*get_backlight_level)(struct radeon_encoder *radeon_encoder);
 	} display;
 	/* copy functions for bo handling */
 	struct {
@@ -1428,6 +1455,56 @@ struct r600_vram_scratch {
 	u64				gpu_addr;
 };
 
+/*
+ * ACPI
+ */
+struct radeon_atif_notification_cfg {
+	bool enabled;
+	int command_code;
+};
+
+struct radeon_atif_notifications {
+	bool display_switch;
+	bool expansion_mode_change;
+	bool thermal_state;
+	bool forced_power_state;
+	bool system_power_state;
+	bool display_conf_change;
+	bool px_gfx_switch;
+	bool brightness_change;
+	bool dgpu_display_event;
+};
+
+struct radeon_atif_functions {
+	bool system_params;
+	bool sbios_requests;
+	bool select_active_disp;
+	bool lid_state;
+	bool get_tv_standard;
+	bool set_tv_standard;
+	bool get_panel_expansion_mode;
+	bool set_panel_expansion_mode;
+	bool temperature_change;
+	bool graphics_device_types;
+};
+
+struct radeon_atif {
+	struct radeon_atif_notifications notifications;
+	struct radeon_atif_functions functions;
+	struct radeon_atif_notification_cfg notification_cfg;
+	struct radeon_encoder *encoder_for_bl;
+};
+
+struct radeon_atcs_functions {
+	bool get_ext_state;
+	bool pcie_perf_req;
+	bool pcie_dev_rdy;
+	bool pcie_bus_width;
+};
+
+struct radeon_atcs {
+	struct radeon_atcs_functions functions;
+};
 
 /*
  * Core structure, functions and helpers.
@@ -1520,6 +1597,9 @@ struct radeon_device {
 	/* virtual memory */
 	struct radeon_vm_manager	vm_manager;
 	struct mutex			gpu_clock_mutex;
+	/* ACPI interface */
+	struct radeon_atif		atif;
+	struct radeon_atcs		atcs;
 };
 
 int radeon_device_init(struct radeon_device *rdev,
@@ -1683,15 +1763,21 @@ void radeon_ring_write(struct radeon_ring *ring, uint32_t v);
 #define radeon_asic_reset(rdev) (rdev)->asic->asic_reset((rdev))
 #define radeon_gart_tlb_flush(rdev) (rdev)->asic->gart.tlb_flush((rdev))
 #define radeon_gart_set_page(rdev, i, p) (rdev)->asic->gart.set_page((rdev), (i), (p))
+#define radeon_asic_vm_init(rdev) (rdev)->asic->vm.init((rdev))
+#define radeon_asic_vm_fini(rdev) (rdev)->asic->vm.fini((rdev))
+#define radeon_asic_vm_set_page(rdev, pe, addr, count, incr, flags) ((rdev)->asic->vm.set_page((rdev), (pe), (addr), (count), (incr), (flags)))
 #define radeon_ring_start(rdev, r, cp) (rdev)->asic->ring[(r)].ring_start((rdev), (cp))
 #define radeon_ring_test(rdev, r, cp) (rdev)->asic->ring[(r)].ring_test((rdev), (cp))
 #define radeon_ib_test(rdev, r, cp) (rdev)->asic->ring[(r)].ib_test((rdev), (cp))
 #define radeon_ring_ib_execute(rdev, r, ib) (rdev)->asic->ring[(r)].ib_execute((rdev), (ib))
 #define radeon_ring_ib_parse(rdev, r, ib) (rdev)->asic->ring[(r)].ib_parse((rdev), (ib))
 #define radeon_ring_is_lockup(rdev, r, cp) (rdev)->asic->ring[(r)].is_lockup((rdev), (cp))
+#define radeon_ring_vm_flush(rdev, r, vm) (rdev)->asic->ring[(r)].vm_flush((rdev), (r), (vm))
 #define radeon_irq_set(rdev) (rdev)->asic->irq.set((rdev))
 #define radeon_irq_process(rdev) (rdev)->asic->irq.process((rdev))
 #define radeon_get_vblank_counter(rdev, crtc) (rdev)->asic->display.get_vblank_counter((rdev), (crtc))
+#define radeon_set_backlight_level(rdev, e, l) (rdev)->asic->display.set_backlight_level((e), (l))
+#define radeon_get_backlight_level(rdev, e) (rdev)->asic->display.get_backlight_level((e))
 #define radeon_fence_ring_emit(rdev, r, fence) (rdev)->asic->ring[(r)].emit_fence((rdev), (fence))
 #define radeon_semaphore_ring_emit(rdev, r, cp, semaphore, emit_wait) (rdev)->asic->ring[(r)].emit_semaphore((rdev), (cp), (semaphore), (emit_wait))
 #define radeon_copy_blit(rdev, s, d, np, f) (rdev)->asic->copy.blit((rdev), (s), (d), (np), (f))
@@ -1759,22 +1845,30 @@ int radeon_vm_manager_init(struct radeon_device *rdev);
 void radeon_vm_manager_fini(struct radeon_device *rdev);
 int radeon_vm_init(struct radeon_device *rdev, struct radeon_vm *vm);
 void radeon_vm_fini(struct radeon_device *rdev, struct radeon_vm *vm);
-int radeon_vm_bind(struct radeon_device *rdev, struct radeon_vm *vm);
-void radeon_vm_unbind(struct radeon_device *rdev, struct radeon_vm *vm);
+int radeon_vm_alloc_pt(struct radeon_device *rdev, struct radeon_vm *vm);
+struct radeon_fence *radeon_vm_grab_id(struct radeon_device *rdev,
+				       struct radeon_vm *vm, int ring);
+void radeon_vm_fence(struct radeon_device *rdev,
+		     struct radeon_vm *vm,
+		     struct radeon_fence *fence);
+uint64_t radeon_vm_map_gart(struct radeon_device *rdev, uint64_t addr);
 int radeon_vm_bo_update_pte(struct radeon_device *rdev,
 			    struct radeon_vm *vm,
 			    struct radeon_bo *bo,
 			    struct ttm_mem_reg *mem);
 void radeon_vm_bo_invalidate(struct radeon_device *rdev,
 			     struct radeon_bo *bo);
-int radeon_vm_bo_add(struct radeon_device *rdev,
-		     struct radeon_vm *vm,
-		     struct radeon_bo *bo,
-		     uint64_t offset,
-		     uint32_t flags);
+struct radeon_bo_va *radeon_vm_bo_find(struct radeon_vm *vm,
+				       struct radeon_bo *bo);
+struct radeon_bo_va *radeon_vm_bo_add(struct radeon_device *rdev,
+				      struct radeon_vm *vm,
+				      struct radeon_bo *bo);
+int radeon_vm_bo_set_addr(struct radeon_device *rdev,
+			  struct radeon_bo_va *bo_va,
+			  uint64_t offset,
+			  uint32_t flags);
 int radeon_vm_bo_rmv(struct radeon_device *rdev,
-		     struct radeon_vm *vm,
-		     struct radeon_bo *bo);
+		     struct radeon_bo_va *bo_va);
 
 /* audio */
 void r600_audio_update_hdmi(struct work_struct *work);
@@ -1832,12 +1926,14 @@ extern void evergreen_hdmi_setmode(struct drm_encoder *encoder, struct drm_displ
 extern int ni_init_microcode(struct radeon_device *rdev);
 extern int ni_mc_load_microcode(struct radeon_device *rdev);
 
-/* radeon_acpi.c */ 
-#if defined(CONFIG_ACPI) 
-extern int radeon_acpi_init(struct radeon_device *rdev); 
-#else 
-static inline int radeon_acpi_init(struct radeon_device *rdev) { return 0; } 
-#endif 
+/* radeon_acpi.c */
+#if defined(CONFIG_ACPI)
+extern int radeon_acpi_init(struct radeon_device *rdev);
+extern void radeon_acpi_fini(struct radeon_device *rdev);
+#else
+static inline int radeon_acpi_init(struct radeon_device *rdev) { return 0; }
+static inline void radeon_acpi_fini(struct radeon_device *rdev) { }
+#endif
 
 #include "radeon_object.h"
 
diff --git a/drivers/gpu/drm/radeon/radeon_acpi.c b/drivers/gpu/drm/radeon/radeon_acpi.c
index 215063e1a292..b0a5688c67f8 100644
--- a/drivers/gpu/drm/radeon/radeon_acpi.c
+++ b/drivers/gpu/drm/radeon/radeon_acpi.c
@@ -1,33 +1,118 @@
+/*
+ * Copyright 2012 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
 #include <linux/pci.h>
 #include <linux/acpi.h>
 #include <linux/slab.h>
+#include <linux/power_supply.h>
 #include <acpi/acpi_drivers.h>
 #include <acpi/acpi_bus.h>
+#include <acpi/video.h>
 
 #include <drm/drmP.h>
 #include <drm/drm_crtc_helper.h>
 #include "radeon.h"
+#include "radeon_acpi.h"
+#include "atom.h"
 
 #include <linux/vga_switcheroo.h>
 
+#define ACPI_AC_CLASS           "ac_adapter"
+
+extern void radeon_pm_acpi_event_handler(struct radeon_device *rdev);
+
+struct atif_verify_interface {
+	u16 size;		/* structure size in bytes (includes size field) */
+	u16 version;		/* version */
+	u32 notification_mask;	/* supported notifications mask */
+	u32 function_bits;	/* supported functions bit vector */
+} __packed;
+
+struct atif_system_params {
+	u16 size;		/* structure size in bytes (includes size field) */
+	u32 valid_mask;		/* valid flags mask */
+	u32 flags;		/* flags */
+	u8 command_code;	/* notify command code */
+} __packed;
+
+struct atif_sbios_requests {
+	u16 size;		/* structure size in bytes (includes size field) */
+	u32 pending;		/* pending sbios requests */
+	u8 panel_exp_mode;	/* panel expansion mode */
+	u8 thermal_gfx;		/* thermal state: target gfx controller */
+	u8 thermal_state;	/* thermal state: state id (0: exit state, non-0: state) */
+	u8 forced_power_gfx;	/* forced power state: target gfx controller */
+	u8 forced_power_state;	/* forced power state: state id */
+	u8 system_power_src;	/* system power source */
+	u8 backlight_level;	/* panel backlight level (0-255) */
+} __packed;
+
+#define ATIF_NOTIFY_MASK	0x3
+#define ATIF_NOTIFY_NONE	0
+#define ATIF_NOTIFY_81		1
+#define ATIF_NOTIFY_N		2
+
+struct atcs_verify_interface {
+	u16 size;		/* structure size in bytes (includes size field) */
+	u16 version;		/* version */
+	u32 function_bits;	/* supported functions bit vector */
+} __packed;
+
 /* Call the ATIF method
+ */
+/**
+ * radeon_atif_call - call an ATIF method
  *
- * Note: currently we discard the output
+ * @handle: acpi handle
+ * @function: the ATIF function to execute
+ * @params: ATIF function params
+ *
+ * Executes the requested ATIF function (all asics).
+ * Returns a pointer to the acpi output buffer.
  */
-static int radeon_atif_call(acpi_handle handle)
+static union acpi_object *radeon_atif_call(acpi_handle handle, int function,
+		struct acpi_buffer *params)
 {
 	acpi_status status;
 	union acpi_object atif_arg_elements[2];
 	struct acpi_object_list atif_arg;
-	struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL};
+	struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
 
 	atif_arg.count = 2;
 	atif_arg.pointer = &atif_arg_elements[0];
 
 	atif_arg_elements[0].type = ACPI_TYPE_INTEGER;
-	atif_arg_elements[0].integer.value = 0;
-	atif_arg_elements[1].type = ACPI_TYPE_INTEGER;
-	atif_arg_elements[1].integer.value = 0;
+	atif_arg_elements[0].integer.value = function;
+
+	if (params) {
+		atif_arg_elements[1].type = ACPI_TYPE_BUFFER;
+		atif_arg_elements[1].buffer.length = params->length;
+		atif_arg_elements[1].buffer.pointer = params->pointer;
+	} else {
+		/* We need a second fake parameter */
+		atif_arg_elements[1].type = ACPI_TYPE_INTEGER;
+		atif_arg_elements[1].integer.value = 0;
+	}
 
 	status = acpi_evaluate_object(handle, "ATIF", &atif_arg, &buffer);
 
@@ -36,17 +121,434 @@ static int radeon_atif_call(acpi_handle handle)
 		DRM_DEBUG_DRIVER("failed to evaluate ATIF got %s\n",
 				 acpi_format_exception(status));
 		kfree(buffer.pointer);
-		return 1;
+		return NULL;
 	}
 
-	kfree(buffer.pointer);
-	return 0;
+	return buffer.pointer;
+}
+
+/**
+ * radeon_atif_parse_notification - parse supported notifications
+ *
+ * @n: supported notifications struct
+ * @mask: supported notifications mask from ATIF
+ *
+ * Use the supported notifications mask from ATIF function
+ * ATIF_FUNCTION_VERIFY_INTERFACE to determine what notifications
+ * are supported (all asics).
+ */
+static void radeon_atif_parse_notification(struct radeon_atif_notifications *n, u32 mask)
+{
+	n->display_switch = mask & ATIF_DISPLAY_SWITCH_REQUEST_SUPPORTED;
+	n->expansion_mode_change = mask & ATIF_EXPANSION_MODE_CHANGE_REQUEST_SUPPORTED;
+	n->thermal_state = mask & ATIF_THERMAL_STATE_CHANGE_REQUEST_SUPPORTED;
+	n->forced_power_state = mask & ATIF_FORCED_POWER_STATE_CHANGE_REQUEST_SUPPORTED;
+	n->system_power_state = mask & ATIF_SYSTEM_POWER_SOURCE_CHANGE_REQUEST_SUPPORTED;
+	n->display_conf_change = mask & ATIF_DISPLAY_CONF_CHANGE_REQUEST_SUPPORTED;
+	n->px_gfx_switch = mask & ATIF_PX_GFX_SWITCH_REQUEST_SUPPORTED;
+	n->brightness_change = mask & ATIF_PANEL_BRIGHTNESS_CHANGE_REQUEST_SUPPORTED;
+	n->dgpu_display_event = mask & ATIF_DGPU_DISPLAY_EVENT_SUPPORTED;
+}
+
+/**
+ * radeon_atif_parse_functions - parse supported functions
+ *
+ * @f: supported functions struct
+ * @mask: supported functions mask from ATIF
+ *
+ * Use the supported functions mask from ATIF function
+ * ATIF_FUNCTION_VERIFY_INTERFACE to determine what functions
+ * are supported (all asics).
+ */
+static void radeon_atif_parse_functions(struct radeon_atif_functions *f, u32 mask)
+{
+	f->system_params = mask & ATIF_GET_SYSTEM_PARAMETERS_SUPPORTED;
+	f->sbios_requests = mask & ATIF_GET_SYSTEM_BIOS_REQUESTS_SUPPORTED;
+	f->select_active_disp = mask & ATIF_SELECT_ACTIVE_DISPLAYS_SUPPORTED;
+	f->lid_state = mask & ATIF_GET_LID_STATE_SUPPORTED;
+	f->get_tv_standard = mask & ATIF_GET_TV_STANDARD_FROM_CMOS_SUPPORTED;
+	f->set_tv_standard = mask & ATIF_SET_TV_STANDARD_IN_CMOS_SUPPORTED;
+	f->get_panel_expansion_mode = mask & ATIF_GET_PANEL_EXPANSION_MODE_FROM_CMOS_SUPPORTED;
+	f->set_panel_expansion_mode = mask & ATIF_SET_PANEL_EXPANSION_MODE_IN_CMOS_SUPPORTED;
+	f->temperature_change = mask & ATIF_TEMPERATURE_CHANGE_NOTIFICATION_SUPPORTED;
+	f->graphics_device_types = mask & ATIF_GET_GRAPHICS_DEVICE_TYPES_SUPPORTED;
+}
+
+/**
+ * radeon_atif_verify_interface - verify ATIF
+ *
+ * @handle: acpi handle
+ * @atif: radeon atif struct
+ *
+ * Execute the ATIF_FUNCTION_VERIFY_INTERFACE ATIF function
+ * to initialize ATIF and determine what features are supported
+ * (all asics).
+ * returns 0 on success, error on failure.
+ */
+static int radeon_atif_verify_interface(acpi_handle handle,
+		struct radeon_atif *atif)
+{
+	union acpi_object *info;
+	struct atif_verify_interface output;
+	size_t size;
+	int err = 0;
+
+	info = radeon_atif_call(handle, ATIF_FUNCTION_VERIFY_INTERFACE, NULL);
+	if (!info)
+		return -EIO;
+
+	memset(&output, 0, sizeof(output));
+
+	size = *(u16 *) info->buffer.pointer;
+	if (size < 12) {
+		DRM_INFO("ATIF buffer is too small: %lu\n", size);
+		err = -EINVAL;
+		goto out;
+	}
+	size = min(sizeof(output), size);
+
+	memcpy(&output, info->buffer.pointer, size);
+
+	/* TODO: check version? */
+	DRM_DEBUG_DRIVER("ATIF version %u\n", output.version);
+
+	radeon_atif_parse_notification(&atif->notifications, output.notification_mask);
+	radeon_atif_parse_functions(&atif->functions, output.function_bits);
+
+out:
+	kfree(info);
+	return err;
+}
+
+/**
+ * radeon_atif_get_notification_params - determine notify configuration
+ *
+ * @handle: acpi handle
+ * @n: atif notification configuration struct
+ *
+ * Execute the ATIF_FUNCTION_GET_SYSTEM_PARAMETERS ATIF function
+ * to determine if a notifier is used and if so which one
+ * (all asics).  This is either Notify(VGA, 0x81) or Notify(VGA, n)
+ * where n is specified in the result if a notifier is used.
+ * Returns 0 on success, error on failure.
+ */
+static int radeon_atif_get_notification_params(acpi_handle handle,
+		struct radeon_atif_notification_cfg *n)
+{
+	union acpi_object *info;
+	struct atif_system_params params;
+	size_t size;
+	int err = 0;
+
+	info = radeon_atif_call(handle, ATIF_FUNCTION_GET_SYSTEM_PARAMETERS, NULL);
+	if (!info) {
+		err = -EIO;
+		goto out;
+	}
+
+	size = *(u16 *) info->buffer.pointer;
+	if (size < 10) {
+		err = -EINVAL;
+		goto out;
+	}
+
+	memset(&params, 0, sizeof(params));
+	size = min(sizeof(params), size);
+	memcpy(&params, info->buffer.pointer, size);
+
+	DRM_DEBUG_DRIVER("SYSTEM_PARAMS: mask = %#x, flags = %#x\n",
+			params.flags, params.valid_mask);
+	params.flags = params.flags & params.valid_mask;
+
+	if ((params.flags & ATIF_NOTIFY_MASK) == ATIF_NOTIFY_NONE) {
+		n->enabled = false;
+		n->command_code = 0;
+	} else if ((params.flags & ATIF_NOTIFY_MASK) == ATIF_NOTIFY_81) {
+		n->enabled = true;
+		n->command_code = 0x81;
+	} else {
+		if (size < 11) {
+			err = -EINVAL;
+			goto out;
+		}
+		n->enabled = true;
+		n->command_code = params.command_code;
+	}
+
+out:
+	DRM_DEBUG_DRIVER("Notification %s, command code = %#x\n",
+			(n->enabled ? "enabled" : "disabled"),
+			n->command_code);
+	kfree(info);
+	return err;
+}
+
+/**
+ * radeon_atif_get_sbios_requests - get requested sbios event
+ *
+ * @handle: acpi handle
+ * @req: atif sbios request struct
+ *
+ * Execute the ATIF_FUNCTION_GET_SYSTEM_BIOS_REQUESTS ATIF function
+ * to determine what requests the sbios is making to the driver
+ * (all asics).
+ * Returns 0 on success, error on failure.
+ */
+static int radeon_atif_get_sbios_requests(acpi_handle handle,
+		struct atif_sbios_requests *req)
+{
+	union acpi_object *info;
+	size_t size;
+	int count = 0;
+
+	info = radeon_atif_call(handle, ATIF_FUNCTION_GET_SYSTEM_BIOS_REQUESTS, NULL);
+	if (!info)
+		return -EIO;
+
+	size = *(u16 *)info->buffer.pointer;
+	if (size < 0xd) {
+		count = -EINVAL;
+		goto out;
+	}
+	memset(req, 0, sizeof(*req));
+
+	size = min(sizeof(*req), size);
+	memcpy(req, info->buffer.pointer, size);
+	DRM_DEBUG_DRIVER("SBIOS pending requests: %#x\n", req->pending);
+
+	count = hweight32(req->pending);
+
+out:
+	kfree(info);
+	return count;
+}
+
+/**
+ * radeon_atif_handler - handle ATIF notify requests
+ *
+ * @rdev: radeon_device pointer
+ * @event: atif sbios request struct
+ *
+ * Checks the acpi event and if it matches an atif event,
+ * handles it.
+ * Returns NOTIFY code
+ */
+int radeon_atif_handler(struct radeon_device *rdev,
+		struct acpi_bus_event *event)
+{
+	struct radeon_atif *atif = &rdev->atif;
+	struct atif_sbios_requests req;
+	acpi_handle handle;
+	int count;
+
+	DRM_DEBUG_DRIVER("event, device_class = %s, type = %#x\n",
+			event->device_class, event->type);
+
+	if (strcmp(event->device_class, ACPI_VIDEO_CLASS) != 0)
+		return NOTIFY_DONE;
+
+	if (!atif->notification_cfg.enabled ||
+			event->type != atif->notification_cfg.command_code)
+		/* Not our event */
+		return NOTIFY_DONE;
+
+	/* Check pending SBIOS requests */
+	handle = DEVICE_ACPI_HANDLE(&rdev->pdev->dev);
+	count = radeon_atif_get_sbios_requests(handle, &req);
+
+	if (count <= 0)
+		return NOTIFY_DONE;
+
+	DRM_DEBUG_DRIVER("ATIF: %d pending SBIOS requests\n", count);
+
+	if (req.pending & ATIF_PANEL_BRIGHTNESS_CHANGE_REQUEST) {
+		struct radeon_encoder *enc = atif->encoder_for_bl;
+
+		if (enc) {
+			DRM_DEBUG_DRIVER("Changing brightness to %d\n",
+					req.backlight_level);
+
+			radeon_set_backlight_level(rdev, enc, req.backlight_level);
+
+			if (rdev->is_atom_bios) {
+				struct radeon_encoder_atom_dig *dig = enc->enc_priv;
+				backlight_force_update(dig->bl_dev,
+						       BACKLIGHT_UPDATE_HOTKEY);
+			} else {
+				struct radeon_encoder_lvds *dig = enc->enc_priv;
+				backlight_force_update(dig->bl_dev,
+						       BACKLIGHT_UPDATE_HOTKEY);
+			}
+		}
+	}
+	/* TODO: check other events */
+
+	/* We've handled the event, stop the notifier chain. The ACPI interface
+	 * overloads ACPI_VIDEO_NOTIFY_PROBE, we don't want to send that to
+	 * userspace if the event was generated only to signal a SBIOS
+	 * request.
+	 */
+	return NOTIFY_BAD;
+}
+
+/* Call the ATCS method
+ */
+/**
+ * radeon_atcs_call - call an ATCS method
+ *
+ * @handle: acpi handle
+ * @function: the ATCS function to execute
+ * @params: ATCS function params
+ *
+ * Executes the requested ATCS function (all asics).
+ * Returns a pointer to the acpi output buffer.
+ */
+static union acpi_object *radeon_atcs_call(acpi_handle handle, int function,
+					   struct acpi_buffer *params)
+{
+	acpi_status status;
+	union acpi_object atcs_arg_elements[2];
+	struct acpi_object_list atcs_arg;
+	struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
+
+	atcs_arg.count = 2;
+	atcs_arg.pointer = &atcs_arg_elements[0];
+
+	atcs_arg_elements[0].type = ACPI_TYPE_INTEGER;
+	atcs_arg_elements[0].integer.value = function;
+
+	if (params) {
+		atcs_arg_elements[1].type = ACPI_TYPE_BUFFER;
+		atcs_arg_elements[1].buffer.length = params->length;
+		atcs_arg_elements[1].buffer.pointer = params->pointer;
+	} else {
+		/* We need a second fake parameter */
+		atcs_arg_elements[1].type = ACPI_TYPE_INTEGER;
+		atcs_arg_elements[1].integer.value = 0;
+	}
+
+	status = acpi_evaluate_object(handle, "ATCS", &atcs_arg, &buffer);
+
+	/* Fail only if calling the method fails and ATIF is supported */
+	if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) {
+		DRM_DEBUG_DRIVER("failed to evaluate ATCS got %s\n",
+				 acpi_format_exception(status));
+		kfree(buffer.pointer);
+		return NULL;
+	}
+
+	return buffer.pointer;
+}
+
+/**
+ * radeon_atcs_parse_functions - parse supported functions
+ *
+ * @f: supported functions struct
+ * @mask: supported functions mask from ATCS
+ *
+ * Use the supported functions mask from ATCS function
+ * ATCS_FUNCTION_VERIFY_INTERFACE to determine what functions
+ * are supported (all asics).
+ */
+static void radeon_atcs_parse_functions(struct radeon_atcs_functions *f, u32 mask)
+{
+	f->get_ext_state = mask & ATCS_GET_EXTERNAL_STATE_SUPPORTED;
+	f->pcie_perf_req = mask & ATCS_PCIE_PERFORMANCE_REQUEST_SUPPORTED;
+	f->pcie_dev_rdy = mask & ATCS_PCIE_DEVICE_READY_NOTIFICATION_SUPPORTED;
+	f->pcie_bus_width = mask & ATCS_SET_PCIE_BUS_WIDTH_SUPPORTED;
+}
+
+/**
+ * radeon_atcs_verify_interface - verify ATCS
+ *
+ * @handle: acpi handle
+ * @atcs: radeon atcs struct
+ *
+ * Execute the ATCS_FUNCTION_VERIFY_INTERFACE ATCS function
+ * to initialize ATCS and determine what features are supported
+ * (all asics).
+ * returns 0 on success, error on failure.
+ */
+static int radeon_atcs_verify_interface(acpi_handle handle,
+					struct radeon_atcs *atcs)
+{
+	union acpi_object *info;
+	struct atcs_verify_interface output;
+	size_t size;
+	int err = 0;
+
+	info = radeon_atcs_call(handle, ATCS_FUNCTION_VERIFY_INTERFACE, NULL);
+	if (!info)
+		return -EIO;
+
+	memset(&output, 0, sizeof(output));
+
+	size = *(u16 *) info->buffer.pointer;
+	if (size < 8) {
+		DRM_INFO("ATCS buffer is too small: %lu\n", size);
+		err = -EINVAL;
+		goto out;
+	}
+	size = min(sizeof(output), size);
+
+	memcpy(&output, info->buffer.pointer, size);
+
+	/* TODO: check version? */
+	DRM_DEBUG_DRIVER("ATCS version %u\n", output.version);
+
+	radeon_atcs_parse_functions(&atcs->functions, output.function_bits);
+
+out:
+	kfree(info);
+	return err;
+}
+
+/**
+ * radeon_acpi_event - handle notify events
+ *
+ * @nb: notifier block
+ * @val: val
+ * @data: acpi event
+ *
+ * Calls relevant radeon functions in response to various
+ * acpi events.
+ * Returns NOTIFY code
+ */
+static int radeon_acpi_event(struct notifier_block *nb,
+			     unsigned long val,
+			     void *data)
+{
+	struct radeon_device *rdev = container_of(nb, struct radeon_device, acpi_nb);
+	struct acpi_bus_event *entry = (struct acpi_bus_event *)data;
+
+	if (strcmp(entry->device_class, ACPI_AC_CLASS) == 0) {
+		if (power_supply_is_system_supplied() > 0)
+			DRM_DEBUG_DRIVER("pm: AC\n");
+		else
+			DRM_DEBUG_DRIVER("pm: DC\n");
+
+		radeon_pm_acpi_event_handler(rdev);
+	}
+
+	/* Check for pending SBIOS requests */
+	return radeon_atif_handler(rdev, entry);
 }
 
 /* Call all ACPI methods here */
+/**
+ * radeon_acpi_init - init driver acpi support
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Verifies the AMD ACPI interfaces and registers with the acpi
+ * notifier chain (all asics).
+ * Returns 0 on success, error on failure.
+ */
 int radeon_acpi_init(struct radeon_device *rdev)
 {
 	acpi_handle handle;
+	struct radeon_atif *atif = &rdev->atif;
+	struct radeon_atcs *atcs = &rdev->atcs;
 	int ret;
 
 	/* Get the device handle */
@@ -56,11 +558,90 @@ int radeon_acpi_init(struct radeon_device *rdev)
 	if (!ASIC_IS_AVIVO(rdev) || !rdev->bios || !handle)
 		return 0;
 
+	/* Call the ATCS method */
+	ret = radeon_atcs_verify_interface(handle, atcs);
+	if (ret) {
+		DRM_DEBUG_DRIVER("Call to ATCS verify_interface failed: %d\n", ret);
+	}
+
 	/* Call the ATIF method */
-	ret = radeon_atif_call(handle);
-	if (ret)
-		return ret;
+	ret = radeon_atif_verify_interface(handle, atif);
+	if (ret) {
+		DRM_DEBUG_DRIVER("Call to ATIF verify_interface failed: %d\n", ret);
+		goto out;
+	}
+
+	if (atif->notifications.brightness_change) {
+		struct drm_encoder *tmp;
+		struct radeon_encoder *target = NULL;
+
+		/* Find the encoder controlling the brightness */
+		list_for_each_entry(tmp, &rdev->ddev->mode_config.encoder_list,
+				head) {
+			struct radeon_encoder *enc = to_radeon_encoder(tmp);
+
+			if ((enc->devices & (ATOM_DEVICE_LCD_SUPPORT)) &&
+			    enc->enc_priv) {
+				if (rdev->is_atom_bios) {
+					struct radeon_encoder_atom_dig *dig = enc->enc_priv;
+					if (dig->bl_dev) {
+						target = enc;
+						break;
+					}
+				} else {
+					struct radeon_encoder_lvds *dig = enc->enc_priv;
+					if (dig->bl_dev) {
+						target = enc;
+						break;
+					}
+				}
+			}
+		}
+
+		atif->encoder_for_bl = target;
+		if (!target) {
+			/* Brightness change notification is enabled, but we
+			 * didn't find a backlight controller, this should
+			 * never happen.
+			 */
+			DRM_ERROR("Cannot find a backlight controller\n");
+		}
+	}
 
-	return 0;
+	if (atif->functions.sbios_requests && !atif->functions.system_params) {
+		/* XXX check this workraround, if sbios request function is
+		 * present we have to see how it's configured in the system
+		 * params
+		 */
+		atif->functions.system_params = true;
+	}
+
+	if (atif->functions.system_params) {
+		ret = radeon_atif_get_notification_params(handle,
+				&atif->notification_cfg);
+		if (ret) {
+			DRM_DEBUG_DRIVER("Call to GET_SYSTEM_PARAMS failed: %d\n",
+					ret);
+			/* Disable notification */
+			atif->notification_cfg.enabled = false;
+		}
+	}
+
+out:
+	rdev->acpi_nb.notifier_call = radeon_acpi_event;
+	register_acpi_notifier(&rdev->acpi_nb);
+
+	return ret;
 }
 
+/**
+ * radeon_acpi_fini - tear down driver acpi support
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Unregisters with the acpi notifier chain (all asics).
+ */
+void radeon_acpi_fini(struct radeon_device *rdev)
+{
+	unregister_acpi_notifier(&rdev->acpi_nb);
+}
diff --git a/drivers/gpu/drm/radeon/radeon_acpi.h b/drivers/gpu/drm/radeon/radeon_acpi.h
new file mode 100644
index 000000000000..be4af76f213d
--- /dev/null
+++ b/drivers/gpu/drm/radeon/radeon_acpi.h
@@ -0,0 +1,445 @@
+/*
+ * Copyright 2012 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef RADEON_ACPI_H
+#define RADEON_ACPI_H
+
+struct radeon_device;
+struct acpi_bus_event;
+
+int radeon_atif_handler(struct radeon_device *rdev,
+		struct acpi_bus_event *event);
+
+/* AMD hw uses four ACPI control methods:
+ * 1. ATIF
+ * ARG0: (ACPI_INTEGER) function code
+ * ARG1: (ACPI_BUFFER) parameter buffer, 256 bytes
+ * OUTPUT: (ACPI_BUFFER) output buffer, 256 bytes
+ * ATIF provides an entry point for the gfx driver to interact with the sbios.
+ * The AMD ACPI notification mechanism uses Notify (VGA, 0x81) or a custom
+ * notification. Which notification is used as indicated by the ATIF Control
+ * Method GET_SYSTEM_PARAMETERS. When the driver receives Notify (VGA, 0x81) or
+ * a custom notification it invokes ATIF Control Method GET_SYSTEM_BIOS_REQUESTS
+ * to identify pending System BIOS requests and associated parameters. For
+ * example, if one of the pending requests is DISPLAY_SWITCH_REQUEST, the driver
+ * will perform display device detection and invoke ATIF Control Method
+ * SELECT_ACTIVE_DISPLAYS.
+ *
+ * 2. ATPX
+ * ARG0: (ACPI_INTEGER) function code
+ * ARG1: (ACPI_BUFFER) parameter buffer, 256 bytes
+ * OUTPUT: (ACPI_BUFFER) output buffer, 256 bytes
+ * ATPX methods are used on PowerXpress systems to handle mux switching and
+ * discrete GPU power control.
+ *
+ * 3. ATRM
+ * ARG0: (ACPI_INTEGER) offset of vbios rom data
+ * ARG1: (ACPI_BUFFER) size of the buffer to fill (up to 4K).
+ * OUTPUT: (ACPI_BUFFER) output buffer
+ * ATRM provides an interfacess to access the discrete GPU vbios image on
+ * PowerXpress systems with multiple GPUs.
+ *
+ * 4. ATCS
+ * ARG0: (ACPI_INTEGER) function code
+ * ARG1: (ACPI_BUFFER) parameter buffer, 256 bytes
+ * OUTPUT: (ACPI_BUFFER) output buffer, 256 bytes
+ * ATCS provides an interface to AMD chipset specific functionality.
+ *
+ */
+/* ATIF */
+#define ATIF_FUNCTION_VERIFY_INTERFACE                             0x0
+/* ARG0: ATIF_FUNCTION_VERIFY_INTERFACE
+ * ARG1: none
+ * OUTPUT:
+ * WORD  - structure size in bytes (includes size field)
+ * WORD  - version
+ * DWORD - supported notifications mask
+ * DWORD - supported functions bit vector
+ */
+/* Notifications mask */
+#       define ATIF_DISPLAY_SWITCH_REQUEST_SUPPORTED               (1 << 0)
+#       define ATIF_EXPANSION_MODE_CHANGE_REQUEST_SUPPORTED        (1 << 1)
+#       define ATIF_THERMAL_STATE_CHANGE_REQUEST_SUPPORTED         (1 << 2)
+#       define ATIF_FORCED_POWER_STATE_CHANGE_REQUEST_SUPPORTED    (1 << 3)
+#       define ATIF_SYSTEM_POWER_SOURCE_CHANGE_REQUEST_SUPPORTED   (1 << 4)
+#       define ATIF_DISPLAY_CONF_CHANGE_REQUEST_SUPPORTED          (1 << 5)
+#       define ATIF_PX_GFX_SWITCH_REQUEST_SUPPORTED                (1 << 6)
+#       define ATIF_PANEL_BRIGHTNESS_CHANGE_REQUEST_SUPPORTED      (1 << 7)
+#       define ATIF_DGPU_DISPLAY_EVENT_SUPPORTED                   (1 << 8)
+/* supported functions vector */
+#       define ATIF_GET_SYSTEM_PARAMETERS_SUPPORTED               (1 << 0)
+#       define ATIF_GET_SYSTEM_BIOS_REQUESTS_SUPPORTED            (1 << 1)
+#       define ATIF_SELECT_ACTIVE_DISPLAYS_SUPPORTED              (1 << 2)
+#       define ATIF_GET_LID_STATE_SUPPORTED                       (1 << 3)
+#       define ATIF_GET_TV_STANDARD_FROM_CMOS_SUPPORTED           (1 << 4)
+#       define ATIF_SET_TV_STANDARD_IN_CMOS_SUPPORTED             (1 << 5)
+#       define ATIF_GET_PANEL_EXPANSION_MODE_FROM_CMOS_SUPPORTED  (1 << 6)
+#       define ATIF_SET_PANEL_EXPANSION_MODE_IN_CMOS_SUPPORTED    (1 << 7)
+#       define ATIF_TEMPERATURE_CHANGE_NOTIFICATION_SUPPORTED     (1 << 12)
+#       define ATIF_GET_GRAPHICS_DEVICE_TYPES_SUPPORTED           (1 << 14)
+#define ATIF_FUNCTION_GET_SYSTEM_PARAMETERS                        0x1
+/* ARG0: ATIF_FUNCTION_GET_SYSTEM_PARAMETERS
+ * ARG1: none
+ * OUTPUT:
+ * WORD  - structure size in bytes (includes size field)
+ * DWORD - valid flags mask
+ * DWORD - flags
+ *
+ * OR
+ *
+ * WORD  - structure size in bytes (includes size field)
+ * DWORD - valid flags mask
+ * DWORD - flags
+ * BYTE  - notify command code
+ *
+ * flags
+ * bits 1:0:
+ * 0 - Notify(VGA, 0x81) is not used for notification
+ * 1 - Notify(VGA, 0x81) is used for notification
+ * 2 - Notify(VGA, n) is used for notification where
+ * n (0xd0-0xd9) is specified in notify command code.
+ * bit 2:
+ * 1 - lid changes not reported though int10
+ */
+#define ATIF_FUNCTION_GET_SYSTEM_BIOS_REQUESTS                     0x2
+/* ARG0: ATIF_FUNCTION_GET_SYSTEM_BIOS_REQUESTS
+ * ARG1: none
+ * OUTPUT:
+ * WORD  - structure size in bytes (includes size field)
+ * DWORD - pending sbios requests
+ * BYTE  - panel expansion mode
+ * BYTE  - thermal state: target gfx controller
+ * BYTE  - thermal state: state id (0: exit state, non-0: state)
+ * BYTE  - forced power state: target gfx controller
+ * BYTE  - forced power state: state id
+ * BYTE  - system power source
+ * BYTE  - panel backlight level (0-255)
+ */
+/* pending sbios requests */
+#       define ATIF_DISPLAY_SWITCH_REQUEST                         (1 << 0)
+#       define ATIF_EXPANSION_MODE_CHANGE_REQUEST                  (1 << 1)
+#       define ATIF_THERMAL_STATE_CHANGE_REQUEST                   (1 << 2)
+#       define ATIF_FORCED_POWER_STATE_CHANGE_REQUEST              (1 << 3)
+#       define ATIF_SYSTEM_POWER_SOURCE_CHANGE_REQUEST             (1 << 4)
+#       define ATIF_DISPLAY_CONF_CHANGE_REQUEST                    (1 << 5)
+#       define ATIF_PX_GFX_SWITCH_REQUEST                          (1 << 6)
+#       define ATIF_PANEL_BRIGHTNESS_CHANGE_REQUEST                (1 << 7)
+#       define ATIF_DGPU_DISPLAY_EVENT                             (1 << 8)
+/* panel expansion mode */
+#       define ATIF_PANEL_EXPANSION_DISABLE                        0
+#       define ATIF_PANEL_EXPANSION_FULL                           1
+#       define ATIF_PANEL_EXPANSION_ASPECT                         2
+/* target gfx controller */
+#       define ATIF_TARGET_GFX_SINGLE                              0
+#       define ATIF_TARGET_GFX_PX_IGPU                             1
+#       define ATIF_TARGET_GFX_PX_DGPU                             2
+/* system power source */
+#       define ATIF_POWER_SOURCE_AC                                1
+#       define ATIF_POWER_SOURCE_DC                                2
+#       define ATIF_POWER_SOURCE_RESTRICTED_AC_1                   3
+#       define ATIF_POWER_SOURCE_RESTRICTED_AC_2                   4
+#define ATIF_FUNCTION_SELECT_ACTIVE_DISPLAYS                       0x3
+/* ARG0: ATIF_FUNCTION_SELECT_ACTIVE_DISPLAYS
+ * ARG1:
+ * WORD  - structure size in bytes (includes size field)
+ * WORD  - selected displays
+ * WORD  - connected displays
+ * OUTPUT:
+ * WORD  - structure size in bytes (includes size field)
+ * WORD  - selected displays
+ */
+#       define ATIF_LCD1                                           (1 << 0)
+#       define ATIF_CRT1                                           (1 << 1)
+#       define ATIF_TV                                             (1 << 2)
+#       define ATIF_DFP1                                           (1 << 3)
+#       define ATIF_CRT2                                           (1 << 4)
+#       define ATIF_LCD2                                           (1 << 5)
+#       define ATIF_DFP2                                           (1 << 7)
+#       define ATIF_CV                                             (1 << 8)
+#       define ATIF_DFP3                                           (1 << 9)
+#       define ATIF_DFP4                                           (1 << 10)
+#       define ATIF_DFP5                                           (1 << 11)
+#       define ATIF_DFP6                                           (1 << 12)
+#define ATIF_FUNCTION_GET_LID_STATE                                0x4
+/* ARG0: ATIF_FUNCTION_GET_LID_STATE
+ * ARG1: none
+ * OUTPUT:
+ * WORD  - structure size in bytes (includes size field)
+ * BYTE  - lid state (0: open, 1: closed)
+ *
+ * GET_LID_STATE only works at boot and resume, for general lid
+ * status, use the kernel provided status
+ */
+#define ATIF_FUNCTION_GET_TV_STANDARD_FROM_CMOS                    0x5
+/* ARG0: ATIF_FUNCTION_GET_TV_STANDARD_FROM_CMOS
+ * ARG1: none
+ * OUTPUT:
+ * WORD  - structure size in bytes (includes size field)
+ * BYTE  - 0
+ * BYTE  - TV standard
+ */
+#       define ATIF_TV_STD_NTSC                                    0
+#       define ATIF_TV_STD_PAL                                     1
+#       define ATIF_TV_STD_PALM                                    2
+#       define ATIF_TV_STD_PAL60                                   3
+#       define ATIF_TV_STD_NTSCJ                                   4
+#       define ATIF_TV_STD_PALCN                                   5
+#       define ATIF_TV_STD_PALN                                    6
+#       define ATIF_TV_STD_SCART_RGB                               9
+#define ATIF_FUNCTION_SET_TV_STANDARD_IN_CMOS                      0x6
+/* ARG0: ATIF_FUNCTION_SET_TV_STANDARD_IN_CMOS
+ * ARG1:
+ * WORD  - structure size in bytes (includes size field)
+ * BYTE  - 0
+ * BYTE  - TV standard
+ * OUTPUT: none
+ */
+#define ATIF_FUNCTION_GET_PANEL_EXPANSION_MODE_FROM_CMOS           0x7
+/* ARG0: ATIF_FUNCTION_GET_PANEL_EXPANSION_MODE_FROM_CMOS
+ * ARG1: none
+ * OUTPUT:
+ * WORD  - structure size in bytes (includes size field)
+ * BYTE  - panel expansion mode
+ */
+#define ATIF_FUNCTION_SET_PANEL_EXPANSION_MODE_IN_CMOS             0x8
+/* ARG0: ATIF_FUNCTION_SET_PANEL_EXPANSION_MODE_IN_CMOS
+ * ARG1:
+ * WORD  - structure size in bytes (includes size field)
+ * BYTE  - panel expansion mode
+ * OUTPUT: none
+ */
+#define ATIF_FUNCTION_TEMPERATURE_CHANGE_NOTIFICATION              0xD
+/* ARG0: ATIF_FUNCTION_TEMPERATURE_CHANGE_NOTIFICATION
+ * ARG1:
+ * WORD  - structure size in bytes (includes size field)
+ * WORD  - gfx controller id
+ * BYTE  - current temperature (degress Celsius)
+ * OUTPUT: none
+ */
+#define ATIF_FUNCTION_GET_GRAPHICS_DEVICE_TYPES                    0xF
+/* ARG0: ATIF_FUNCTION_GET_GRAPHICS_DEVICE_TYPES
+ * ARG1: none
+ * OUTPUT:
+ * WORD  - number of gfx devices
+ * WORD  - device structure size in bytes (excludes device size field)
+ * DWORD - flags         \
+ * WORD  - bus number     } repeated structure
+ * WORD  - device number /
+ */
+/* flags */
+#       define ATIF_PX_REMOVABLE_GRAPHICS_DEVICE                   (1 << 0)
+#       define ATIF_XGP_PORT                                       (1 << 1)
+#       define ATIF_VGA_ENABLED_GRAPHICS_DEVICE                    (1 << 2)
+#       define ATIF_XGP_PORT_IN_DOCK                               (1 << 3)
+
+/* ATPX */
+#define ATPX_FUNCTION_VERIFY_INTERFACE                             0x0
+/* ARG0: ATPX_FUNCTION_VERIFY_INTERFACE
+ * ARG1: none
+ * OUTPUT:
+ * WORD  - structure size in bytes (includes size field)
+ * WORD  - version
+ * DWORD - supported functions bit vector
+ */
+/* supported functions vector */
+#       define ATPX_GET_PX_PARAMETERS_SUPPORTED                    (1 << 0)
+#       define ATPX_POWER_CONTROL_SUPPORTED                        (1 << 1)
+#       define ATPX_DISPLAY_MUX_CONTROL_SUPPORTED                  (1 << 2)
+#       define ATPX_I2C_MUX_CONTROL_SUPPORTED                      (1 << 3)
+#       define ATPX_GRAPHICS_DEVICE_SWITCH_START_NOTIFICATION_SUPPORTED (1 << 4)
+#       define ATPX_GRAPHICS_DEVICE_SWITCH_END_NOTIFICATION_SUPPORTED   (1 << 5)
+#       define ATPX_GET_DISPLAY_CONNECTORS_MAPPING_SUPPORTED       (1 << 7)
+#       define ATPX_GET_DISPLAY_DETECTION_PORTS_SUPPORTED          (1 << 8)
+#define ATPX_FUNCTION_GET_PX_PARAMETERS                            0x1
+/* ARG0: ATPX_FUNCTION_GET_PX_PARAMETERS
+ * ARG1: none
+ * OUTPUT:
+ * WORD  - structure size in bytes (includes size field)
+ * DWORD - valid flags mask
+ * DWORD - flags
+ */
+/* flags */
+#       define ATPX_LVDS_I2C_AVAILABLE_TO_BOTH_GPUS                (1 << 0)
+#       define ATPX_CRT1_I2C_AVAILABLE_TO_BOTH_GPUS                (1 << 1)
+#       define ATPX_DVI1_I2C_AVAILABLE_TO_BOTH_GPUS                (1 << 2)
+#       define ATPX_CRT1_RGB_SIGNAL_MUXED                          (1 << 3)
+#       define ATPX_TV_SIGNAL_MUXED                                (1 << 4)
+#       define ATPX_DFP_SIGNAL_MUXED                               (1 << 5)
+#       define ATPX_SEPARATE_MUX_FOR_I2C                           (1 << 6)
+#       define ATPX_DYNAMIC_PX_SUPPORTED                           (1 << 7)
+#       define ATPX_ACF_NOT_SUPPORTED                              (1 << 8)
+#       define ATPX_FIXED_NOT_SUPPORTED                            (1 << 9)
+#       define ATPX_DYNAMIC_DGPU_POWER_OFF_SUPPORTED               (1 << 10)
+#       define ATPX_DGPU_REQ_POWER_FOR_DISPLAYS                    (1 << 11)
+#define ATPX_FUNCTION_POWER_CONTROL                                0x2
+/* ARG0: ATPX_FUNCTION_POWER_CONTROL
+ * ARG1:
+ * WORD  - structure size in bytes (includes size field)
+ * BYTE  - dGPU power state (0: power off, 1: power on)
+ * OUTPUT: none
+ */
+#define ATPX_FUNCTION_DISPLAY_MUX_CONTROL                          0x3
+/* ARG0: ATPX_FUNCTION_DISPLAY_MUX_CONTROL
+ * ARG1:
+ * WORD  - structure size in bytes (includes size field)
+ * WORD  - display mux control (0: iGPU, 1: dGPU)
+ * OUTPUT: none
+ */
+#       define ATPX_INTEGRATED_GPU                                 0
+#       define ATPX_DISCRETE_GPU                                   1
+#define ATPX_FUNCTION_I2C_MUX_CONTROL                              0x4
+/* ARG0: ATPX_FUNCTION_I2C_MUX_CONTROL
+ * ARG1:
+ * WORD  - structure size in bytes (includes size field)
+ * WORD  - i2c/aux/hpd mux control (0: iGPU, 1: dGPU)
+ * OUTPUT: none
+ */
+#define ATPX_FUNCTION_GRAPHICS_DEVICE_SWITCH_START_NOTIFICATION    0x5
+/* ARG0: ATPX_FUNCTION_GRAPHICS_DEVICE_SWITCH_START_NOTIFICATION
+ * ARG1:
+ * WORD  - structure size in bytes (includes size field)
+ * WORD  - target gpu (0: iGPU, 1: dGPU)
+ * OUTPUT: none
+ */
+#define ATPX_FUNCTION_GRAPHICS_DEVICE_SWITCH_END_NOTIFICATION      0x6
+/* ARG0: ATPX_FUNCTION_GRAPHICS_DEVICE_SWITCH_END_NOTIFICATION
+ * ARG1:
+ * WORD  - structure size in bytes (includes size field)
+ * WORD  - target gpu (0: iGPU, 1: dGPU)
+ * OUTPUT: none
+ */
+#define ATPX_FUNCTION_GET_DISPLAY_CONNECTORS_MAPPING               0x8
+/* ARG0: ATPX_FUNCTION_GET_DISPLAY_CONNECTORS_MAPPING
+ * ARG1: none
+ * OUTPUT:
+ * WORD  - number of display connectors
+ * WORD  - connector structure size in bytes (excludes connector size field)
+ * BYTE  - flags                                                     \
+ * BYTE  - ATIF display vector bit position                           } repeated
+ * BYTE  - adapter id (0: iGPU, 1-n: dGPU ordered by pcie bus number) } structure
+ * WORD  - connector ACPI id                                         /
+ */
+/* flags */
+#       define ATPX_DISPLAY_OUTPUT_SUPPORTED_BY_ADAPTER_ID_DEVICE  (1 << 0)
+#       define ATPX_DISPLAY_HPD_SUPPORTED_BY_ADAPTER_ID_DEVICE     (1 << 1)
+#       define ATPX_DISPLAY_I2C_SUPPORTED_BY_ADAPTER_ID_DEVICE     (1 << 2)
+#define ATPX_FUNCTION_GET_DISPLAY_DETECTION_PORTS                  0x9
+/* ARG0: ATPX_FUNCTION_GET_DISPLAY_DETECTION_PORTS
+ * ARG1: none
+ * OUTPUT:
+ * WORD  - number of HPD/DDC ports
+ * WORD  - port structure size in bytes (excludes port size field)
+ * BYTE  - ATIF display vector bit position \
+ * BYTE  - hpd id                            } reapeated structure
+ * BYTE  - ddc id                           /
+ *
+ * available on A+A systems only
+ */
+/* hpd id */
+#       define ATPX_HPD_NONE                                       0
+#       define ATPX_HPD1                                           1
+#       define ATPX_HPD2                                           2
+#       define ATPX_HPD3                                           3
+#       define ATPX_HPD4                                           4
+#       define ATPX_HPD5                                           5
+#       define ATPX_HPD6                                           6
+/* ddc id */
+#       define ATPX_DDC_NONE                                       0
+#       define ATPX_DDC1                                           1
+#       define ATPX_DDC2                                           2
+#       define ATPX_DDC3                                           3
+#       define ATPX_DDC4                                           4
+#       define ATPX_DDC5                                           5
+#       define ATPX_DDC6                                           6
+#       define ATPX_DDC7                                           7
+#       define ATPX_DDC8                                           8
+
+/* ATCS */
+#define ATCS_FUNCTION_VERIFY_INTERFACE                             0x0
+/* ARG0: ATCS_FUNCTION_VERIFY_INTERFACE
+ * ARG1: none
+ * OUTPUT:
+ * WORD  - structure size in bytes (includes size field)
+ * WORD  - version
+ * DWORD - supported functions bit vector
+ */
+/* supported functions vector */
+#       define ATCS_GET_EXTERNAL_STATE_SUPPORTED                   (1 << 0)
+#       define ATCS_PCIE_PERFORMANCE_REQUEST_SUPPORTED             (1 << 1)
+#       define ATCS_PCIE_DEVICE_READY_NOTIFICATION_SUPPORTED       (1 << 2)
+#       define ATCS_SET_PCIE_BUS_WIDTH_SUPPORTED                   (1 << 3)
+#define ATCS_FUNCTION_GET_EXTERNAL_STATE                           0x1
+/* ARG0: ATCS_FUNCTION_GET_EXTERNAL_STATE
+ * ARG1: none
+ * OUTPUT:
+ * WORD  - structure size in bytes (includes size field)
+ * DWORD - valid flags mask
+ * DWORD - flags (0: undocked, 1: docked)
+ */
+/* flags */
+#       define ATCS_DOCKED                                         (1 << 0)
+#define ATCS_FUNCTION_PCIE_PERFORMANCE_REQUEST                     0x2
+/* ARG0: ATCS_FUNCTION_PCIE_PERFORMANCE_REQUEST
+ * ARG1:
+ * WORD  - structure size in bytes (includes size field)
+ * WORD  - client id (bit 2-0: func num, 7-3: dev num, 15-8: bus num)
+ * WORD  - valid flags mask
+ * WORD  - flags
+ * BYTE  - request type
+ * BYTE  - performance request
+ * OUTPUT:
+ * WORD  - structure size in bytes (includes size field)
+ * BYTE  - return value
+ */
+/* flags */
+#       define ATCS_ADVERTISE_CAPS                                 (1 << 0)
+#       define ATCS_WAIT_FOR_COMPLETION                            (1 << 1)
+/* request type */
+#       define ATCS_PCIE_LINK_SPEED                                1
+/* performance request */
+#       define ATCS_REMOVE                                         0
+#       define ATCS_FORCE_LOW_POWER                                1
+#       define ATCS_PERF_LEVEL_1                                   2 /* PCIE Gen 1 */
+#       define ATCS_PERF_LEVEL_2                                   3 /* PCIE Gen 2 */
+#       define ATCS_PERF_LEVEL_3                                   4 /* PCIE Gen 3 */
+/* return value */
+#       define ATCS_REQUEST_REFUSED                                1
+#       define ATCS_REQUEST_COMPLETE                               2
+#       define ATCS_REQUEST_IN_PROGRESS                            3
+#define ATCS_FUNCTION_PCIE_DEVICE_READY_NOTIFICATION               0x3
+/* ARG0: ATCS_FUNCTION_PCIE_DEVICE_READY_NOTIFICATION
+ * ARG1: none
+ * OUTPUT: none
+ */
+#define ATCS_FUNCTION_SET_PCIE_BUS_WIDTH                           0x4
+/* ARG0: ATCS_FUNCTION_SET_PCIE_BUS_WIDTH
+ * ARG1:
+ * WORD  - structure size in bytes (includes size field)
+ * WORD  - client id (bit 2-0: func num, 7-3: dev num, 15-8: bus num)
+ * BYTE  - number of active lanes
+ * OUTPUT:
+ * WORD  - structure size in bytes (includes size field)
+ * BYTE  - number of active lanes
+ */
+
+#endif
diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c
index 973417c4b014..654520b95ab7 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.c
+++ b/drivers/gpu/drm/radeon/radeon_asic.c
@@ -198,6 +198,8 @@ static struct radeon_asic r100_asic = {
 		.bandwidth_update = &r100_bandwidth_update,
 		.get_vblank_counter = &r100_get_vblank_counter,
 		.wait_for_vblank = &r100_wait_for_vblank,
+		.set_backlight_level = &radeon_legacy_set_backlight_level,
+		.get_backlight_level = &radeon_legacy_get_backlight_level,
 	},
 	.copy = {
 		.blit = &r100_copy_blit,
@@ -272,6 +274,8 @@ static struct radeon_asic r200_asic = {
 		.bandwidth_update = &r100_bandwidth_update,
 		.get_vblank_counter = &r100_get_vblank_counter,
 		.wait_for_vblank = &r100_wait_for_vblank,
+		.set_backlight_level = &radeon_legacy_set_backlight_level,
+		.get_backlight_level = &radeon_legacy_get_backlight_level,
 	},
 	.copy = {
 		.blit = &r100_copy_blit,
@@ -346,6 +350,8 @@ static struct radeon_asic r300_asic = {
 		.bandwidth_update = &r100_bandwidth_update,
 		.get_vblank_counter = &r100_get_vblank_counter,
 		.wait_for_vblank = &r100_wait_for_vblank,
+		.set_backlight_level = &radeon_legacy_set_backlight_level,
+		.get_backlight_level = &radeon_legacy_get_backlight_level,
 	},
 	.copy = {
 		.blit = &r100_copy_blit,
@@ -420,6 +426,8 @@ static struct radeon_asic r300_asic_pcie = {
 		.bandwidth_update = &r100_bandwidth_update,
 		.get_vblank_counter = &r100_get_vblank_counter,
 		.wait_for_vblank = &r100_wait_for_vblank,
+		.set_backlight_level = &radeon_legacy_set_backlight_level,
+		.get_backlight_level = &radeon_legacy_get_backlight_level,
 	},
 	.copy = {
 		.blit = &r100_copy_blit,
@@ -494,6 +502,8 @@ static struct radeon_asic r420_asic = {
 		.bandwidth_update = &r100_bandwidth_update,
 		.get_vblank_counter = &r100_get_vblank_counter,
 		.wait_for_vblank = &r100_wait_for_vblank,
+		.set_backlight_level = &atombios_set_backlight_level,
+		.get_backlight_level = &atombios_get_backlight_level,
 	},
 	.copy = {
 		.blit = &r100_copy_blit,
@@ -568,6 +578,8 @@ static struct radeon_asic rs400_asic = {
 		.bandwidth_update = &r100_bandwidth_update,
 		.get_vblank_counter = &r100_get_vblank_counter,
 		.wait_for_vblank = &r100_wait_for_vblank,
+		.set_backlight_level = &radeon_legacy_set_backlight_level,
+		.get_backlight_level = &radeon_legacy_get_backlight_level,
 	},
 	.copy = {
 		.blit = &r100_copy_blit,
@@ -642,6 +654,8 @@ static struct radeon_asic rs600_asic = {
 		.bandwidth_update = &rs600_bandwidth_update,
 		.get_vblank_counter = &rs600_get_vblank_counter,
 		.wait_for_vblank = &avivo_wait_for_vblank,
+		.set_backlight_level = &atombios_set_backlight_level,
+		.get_backlight_level = &atombios_get_backlight_level,
 	},
 	.copy = {
 		.blit = &r100_copy_blit,
@@ -716,6 +730,8 @@ static struct radeon_asic rs690_asic = {
 		.get_vblank_counter = &rs600_get_vblank_counter,
 		.bandwidth_update = &rs690_bandwidth_update,
 		.wait_for_vblank = &avivo_wait_for_vblank,
+		.set_backlight_level = &atombios_set_backlight_level,
+		.get_backlight_level = &atombios_get_backlight_level,
 	},
 	.copy = {
 		.blit = &r100_copy_blit,
@@ -790,6 +806,8 @@ static struct radeon_asic rv515_asic = {
 		.get_vblank_counter = &rs600_get_vblank_counter,
 		.bandwidth_update = &rv515_bandwidth_update,
 		.wait_for_vblank = &avivo_wait_for_vblank,
+		.set_backlight_level = &atombios_set_backlight_level,
+		.get_backlight_level = &atombios_get_backlight_level,
 	},
 	.copy = {
 		.blit = &r100_copy_blit,
@@ -864,6 +882,8 @@ static struct radeon_asic r520_asic = {
 		.bandwidth_update = &rv515_bandwidth_update,
 		.get_vblank_counter = &rs600_get_vblank_counter,
 		.wait_for_vblank = &avivo_wait_for_vblank,
+		.set_backlight_level = &atombios_set_backlight_level,
+		.get_backlight_level = &atombios_get_backlight_level,
 	},
 	.copy = {
 		.blit = &r100_copy_blit,
@@ -937,6 +957,8 @@ static struct radeon_asic r600_asic = {
 		.bandwidth_update = &rv515_bandwidth_update,
 		.get_vblank_counter = &rs600_get_vblank_counter,
 		.wait_for_vblank = &avivo_wait_for_vblank,
+		.set_backlight_level = &atombios_set_backlight_level,
+		.get_backlight_level = &atombios_get_backlight_level,
 	},
 	.copy = {
 		.blit = &r600_copy_blit,
@@ -1010,6 +1032,8 @@ static struct radeon_asic rs780_asic = {
 		.bandwidth_update = &rs690_bandwidth_update,
 		.get_vblank_counter = &rs600_get_vblank_counter,
 		.wait_for_vblank = &avivo_wait_for_vblank,
+		.set_backlight_level = &atombios_set_backlight_level,
+		.get_backlight_level = &atombios_get_backlight_level,
 	},
 	.copy = {
 		.blit = &r600_copy_blit,
@@ -1083,6 +1107,8 @@ static struct radeon_asic rv770_asic = {
 		.bandwidth_update = &rv515_bandwidth_update,
 		.get_vblank_counter = &rs600_get_vblank_counter,
 		.wait_for_vblank = &avivo_wait_for_vblank,
+		.set_backlight_level = &atombios_set_backlight_level,
+		.get_backlight_level = &atombios_get_backlight_level,
 	},
 	.copy = {
 		.blit = &r600_copy_blit,
@@ -1156,6 +1182,8 @@ static struct radeon_asic evergreen_asic = {
 		.bandwidth_update = &evergreen_bandwidth_update,
 		.get_vblank_counter = &evergreen_get_vblank_counter,
 		.wait_for_vblank = &dce4_wait_for_vblank,
+		.set_backlight_level = &atombios_set_backlight_level,
+		.get_backlight_level = &atombios_get_backlight_level,
 	},
 	.copy = {
 		.blit = &r600_copy_blit,
@@ -1229,6 +1257,8 @@ static struct radeon_asic sumo_asic = {
 		.bandwidth_update = &evergreen_bandwidth_update,
 		.get_vblank_counter = &evergreen_get_vblank_counter,
 		.wait_for_vblank = &dce4_wait_for_vblank,
+		.set_backlight_level = &atombios_set_backlight_level,
+		.get_backlight_level = &atombios_get_backlight_level,
 	},
 	.copy = {
 		.blit = &r600_copy_blit,
@@ -1302,6 +1332,8 @@ static struct radeon_asic btc_asic = {
 		.bandwidth_update = &evergreen_bandwidth_update,
 		.get_vblank_counter = &evergreen_get_vblank_counter,
 		.wait_for_vblank = &dce4_wait_for_vblank,
+		.set_backlight_level = &atombios_set_backlight_level,
+		.get_backlight_level = &atombios_get_backlight_level,
 	},
 	.copy = {
 		.blit = &r600_copy_blit,
@@ -1325,7 +1357,7 @@ static struct radeon_asic btc_asic = {
 		.misc = &evergreen_pm_misc,
 		.prepare = &evergreen_pm_prepare,
 		.finish = &evergreen_pm_finish,
-		.init_profile = &r600_pm_init_profile,
+		.init_profile = &btc_pm_init_profile,
 		.get_dynpm_state = &r600_pm_get_dynpm_state,
 		.get_engine_clock = &radeon_atom_get_engine_clock,
 		.set_engine_clock = &radeon_atom_set_engine_clock,
@@ -1342,16 +1374,6 @@ static struct radeon_asic btc_asic = {
 	},
 };
 
-static const struct radeon_vm_funcs cayman_vm_funcs = {
-	.init = &cayman_vm_init,
-	.fini = &cayman_vm_fini,
-	.bind = &cayman_vm_bind,
-	.unbind = &cayman_vm_unbind,
-	.tlb_flush = &cayman_vm_tlb_flush,
-	.page_flags = &cayman_vm_page_flags,
-	.set_page = &cayman_vm_set_page,
-};
-
 static struct radeon_asic cayman_asic = {
 	.init = &cayman_init,
 	.fini = &cayman_fini,
@@ -1366,6 +1388,12 @@ static struct radeon_asic cayman_asic = {
 		.tlb_flush = &cayman_pcie_gart_tlb_flush,
 		.set_page = &rs600_gart_set_page,
 	},
+	.vm = {
+		.init = &cayman_vm_init,
+		.fini = &cayman_vm_fini,
+		.pt_ring_index = RADEON_RING_TYPE_GFX_INDEX,
+		.set_page = &cayman_vm_set_page,
+	},
 	.ring = {
 		[RADEON_RING_TYPE_GFX_INDEX] = {
 			.ib_execute = &cayman_ring_ib_execute,
@@ -1376,6 +1404,7 @@ static struct radeon_asic cayman_asic = {
 			.ring_test = &r600_ring_test,
 			.ib_test = &r600_ib_test,
 			.is_lockup = &evergreen_gpu_is_lockup,
+			.vm_flush = &cayman_vm_flush,
 		},
 		[CAYMAN_RING_TYPE_CP1_INDEX] = {
 			.ib_execute = &cayman_ring_ib_execute,
@@ -1386,6 +1415,7 @@ static struct radeon_asic cayman_asic = {
 			.ring_test = &r600_ring_test,
 			.ib_test = &r600_ib_test,
 			.is_lockup = &evergreen_gpu_is_lockup,
+			.vm_flush = &cayman_vm_flush,
 		},
 		[CAYMAN_RING_TYPE_CP2_INDEX] = {
 			.ib_execute = &cayman_ring_ib_execute,
@@ -1396,6 +1426,7 @@ static struct radeon_asic cayman_asic = {
 			.ring_test = &r600_ring_test,
 			.ib_test = &r600_ib_test,
 			.is_lockup = &evergreen_gpu_is_lockup,
+			.vm_flush = &cayman_vm_flush,
 		}
 	},
 	.irq = {
@@ -1406,6 +1437,8 @@ static struct radeon_asic cayman_asic = {
 		.bandwidth_update = &evergreen_bandwidth_update,
 		.get_vblank_counter = &evergreen_get_vblank_counter,
 		.wait_for_vblank = &dce4_wait_for_vblank,
+		.set_backlight_level = &atombios_set_backlight_level,
+		.get_backlight_level = &atombios_get_backlight_level,
 	},
 	.copy = {
 		.blit = &r600_copy_blit,
@@ -1429,7 +1462,7 @@ static struct radeon_asic cayman_asic = {
 		.misc = &evergreen_pm_misc,
 		.prepare = &evergreen_pm_prepare,
 		.finish = &evergreen_pm_finish,
-		.init_profile = &r600_pm_init_profile,
+		.init_profile = &btc_pm_init_profile,
 		.get_dynpm_state = &r600_pm_get_dynpm_state,
 		.get_engine_clock = &radeon_atom_get_engine_clock,
 		.set_engine_clock = &radeon_atom_set_engine_clock,
@@ -1460,6 +1493,12 @@ static struct radeon_asic trinity_asic = {
 		.tlb_flush = &cayman_pcie_gart_tlb_flush,
 		.set_page = &rs600_gart_set_page,
 	},
+	.vm = {
+		.init = &cayman_vm_init,
+		.fini = &cayman_vm_fini,
+		.pt_ring_index = RADEON_RING_TYPE_GFX_INDEX,
+		.set_page = &cayman_vm_set_page,
+	},
 	.ring = {
 		[RADEON_RING_TYPE_GFX_INDEX] = {
 			.ib_execute = &cayman_ring_ib_execute,
@@ -1470,6 +1509,7 @@ static struct radeon_asic trinity_asic = {
 			.ring_test = &r600_ring_test,
 			.ib_test = &r600_ib_test,
 			.is_lockup = &evergreen_gpu_is_lockup,
+			.vm_flush = &cayman_vm_flush,
 		},
 		[CAYMAN_RING_TYPE_CP1_INDEX] = {
 			.ib_execute = &cayman_ring_ib_execute,
@@ -1480,6 +1520,7 @@ static struct radeon_asic trinity_asic = {
 			.ring_test = &r600_ring_test,
 			.ib_test = &r600_ib_test,
 			.is_lockup = &evergreen_gpu_is_lockup,
+			.vm_flush = &cayman_vm_flush,
 		},
 		[CAYMAN_RING_TYPE_CP2_INDEX] = {
 			.ib_execute = &cayman_ring_ib_execute,
@@ -1490,6 +1531,7 @@ static struct radeon_asic trinity_asic = {
 			.ring_test = &r600_ring_test,
 			.ib_test = &r600_ib_test,
 			.is_lockup = &evergreen_gpu_is_lockup,
+			.vm_flush = &cayman_vm_flush,
 		}
 	},
 	.irq = {
@@ -1500,6 +1542,8 @@ static struct radeon_asic trinity_asic = {
 		.bandwidth_update = &dce6_bandwidth_update,
 		.get_vblank_counter = &evergreen_get_vblank_counter,
 		.wait_for_vblank = &dce4_wait_for_vblank,
+		.set_backlight_level = &atombios_set_backlight_level,
+		.get_backlight_level = &atombios_get_backlight_level,
 	},
 	.copy = {
 		.blit = &r600_copy_blit,
@@ -1540,16 +1584,6 @@ static struct radeon_asic trinity_asic = {
 	},
 };
 
-static const struct radeon_vm_funcs si_vm_funcs = {
-	.init = &si_vm_init,
-	.fini = &si_vm_fini,
-	.bind = &si_vm_bind,
-	.unbind = &si_vm_unbind,
-	.tlb_flush = &si_vm_tlb_flush,
-	.page_flags = &cayman_vm_page_flags,
-	.set_page = &cayman_vm_set_page,
-};
-
 static struct radeon_asic si_asic = {
 	.init = &si_init,
 	.fini = &si_fini,
@@ -1564,6 +1598,12 @@ static struct radeon_asic si_asic = {
 		.tlb_flush = &si_pcie_gart_tlb_flush,
 		.set_page = &rs600_gart_set_page,
 	},
+	.vm = {
+		.init = &si_vm_init,
+		.fini = &si_vm_fini,
+		.pt_ring_index = RADEON_RING_TYPE_GFX_INDEX,
+		.set_page = &si_vm_set_page,
+	},
 	.ring = {
 		[RADEON_RING_TYPE_GFX_INDEX] = {
 			.ib_execute = &si_ring_ib_execute,
@@ -1574,6 +1614,7 @@ static struct radeon_asic si_asic = {
 			.ring_test = &r600_ring_test,
 			.ib_test = &r600_ib_test,
 			.is_lockup = &si_gpu_is_lockup,
+			.vm_flush = &si_vm_flush,
 		},
 		[CAYMAN_RING_TYPE_CP1_INDEX] = {
 			.ib_execute = &si_ring_ib_execute,
@@ -1584,6 +1625,7 @@ static struct radeon_asic si_asic = {
 			.ring_test = &r600_ring_test,
 			.ib_test = &r600_ib_test,
 			.is_lockup = &si_gpu_is_lockup,
+			.vm_flush = &si_vm_flush,
 		},
 		[CAYMAN_RING_TYPE_CP2_INDEX] = {
 			.ib_execute = &si_ring_ib_execute,
@@ -1594,6 +1636,7 @@ static struct radeon_asic si_asic = {
 			.ring_test = &r600_ring_test,
 			.ib_test = &r600_ib_test,
 			.is_lockup = &si_gpu_is_lockup,
+			.vm_flush = &si_vm_flush,
 		}
 	},
 	.irq = {
@@ -1604,6 +1647,8 @@ static struct radeon_asic si_asic = {
 		.bandwidth_update = &dce6_bandwidth_update,
 		.get_vblank_counter = &evergreen_get_vblank_counter,
 		.wait_for_vblank = &dce4_wait_for_vblank,
+		.set_backlight_level = &atombios_set_backlight_level,
+		.get_backlight_level = &atombios_get_backlight_level,
 	},
 	.copy = {
 		.blit = NULL,
@@ -1697,6 +1742,7 @@ int radeon_asic_init(struct radeon_device *rdev)
 			rdev->asic->pm.set_engine_clock = &radeon_legacy_set_engine_clock;
 			rdev->asic->pm.get_memory_clock = &radeon_legacy_get_memory_clock;
 			rdev->asic->pm.set_memory_clock = NULL;
+			rdev->asic->display.set_backlight_level = &radeon_legacy_set_backlight_level;
 		}
 		break;
 	case CHIP_RS400:
@@ -1769,13 +1815,11 @@ int radeon_asic_init(struct radeon_device *rdev)
 		rdev->asic = &cayman_asic;
 		/* set num crtcs */
 		rdev->num_crtc = 6;
-		rdev->vm_manager.funcs = &cayman_vm_funcs;
 		break;
 	case CHIP_ARUBA:
 		rdev->asic = &trinity_asic;
 		/* set num crtcs */
 		rdev->num_crtc = 4;
-		rdev->vm_manager.funcs = &cayman_vm_funcs;
 		break;
 	case CHIP_TAHITI:
 	case CHIP_PITCAIRN:
@@ -1783,7 +1827,6 @@ int radeon_asic_init(struct radeon_device *rdev)
 		rdev->asic = &si_asic;
 		/* set num crtcs */
 		rdev->num_crtc = 6;
-		rdev->vm_manager.funcs = &si_vm_funcs;
 		break;
 	default:
 		/* FIXME: not supported yet */
diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h
index 18c38d14c8cd..5e3a0e5c6be1 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.h
+++ b/drivers/gpu/drm/radeon/radeon_asic.h
@@ -42,6 +42,12 @@ uint32_t radeon_atom_get_memory_clock(struct radeon_device *rdev);
 void radeon_atom_set_memory_clock(struct radeon_device *rdev, uint32_t mem_clock);
 void radeon_atom_set_clock_gating(struct radeon_device *rdev, int enable);
 
+void atombios_set_backlight_level(struct radeon_encoder *radeon_encoder, u8 level);
+u8 atombios_get_backlight_level(struct radeon_encoder *radeon_encoder);
+void radeon_legacy_set_backlight_level(struct radeon_encoder *radeon_encoder, u8 level);
+u8 radeon_legacy_get_backlight_level(struct radeon_encoder *radeon_encoder);
+
+
 /*
  * r100,rv100,rs100,rv200,rs200
  */
@@ -389,6 +395,7 @@ void r700_cp_fini(struct radeon_device *rdev);
 struct evergreen_mc_save {
 	u32 vga_render_control;
 	u32 vga_hdp_control;
+	bool crtc_enabled[RADEON_MAX_CRTCS];
 };
 
 void evergreen_pcie_gart_tlb_flush(struct radeon_device *rdev);
@@ -413,6 +420,7 @@ extern void evergreen_pm_misc(struct radeon_device *rdev);
 extern void evergreen_pm_prepare(struct radeon_device *rdev);
 extern void evergreen_pm_finish(struct radeon_device *rdev);
 extern void sumo_pm_init_profile(struct radeon_device *rdev);
+extern void btc_pm_init_profile(struct radeon_device *rdev);
 extern void evergreen_pre_page_flip(struct radeon_device *rdev, int crtc);
 extern u32 evergreen_page_flip(struct radeon_device *rdev, int crtc, u64 crtc_base);
 extern void evergreen_post_page_flip(struct radeon_device *rdev, int crtc);
@@ -435,14 +443,11 @@ int cayman_asic_reset(struct radeon_device *rdev);
 void cayman_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib);
 int cayman_vm_init(struct radeon_device *rdev);
 void cayman_vm_fini(struct radeon_device *rdev);
-int cayman_vm_bind(struct radeon_device *rdev, struct radeon_vm *vm, int id);
-void cayman_vm_unbind(struct radeon_device *rdev, struct radeon_vm *vm);
-void cayman_vm_tlb_flush(struct radeon_device *rdev, struct radeon_vm *vm);
-uint32_t cayman_vm_page_flags(struct radeon_device *rdev,
-			      struct radeon_vm *vm,
-			      uint32_t flags);
-void cayman_vm_set_page(struct radeon_device *rdev, struct radeon_vm *vm,
-			unsigned pfn, uint64_t addr, uint32_t flags);
+void cayman_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm);
+uint32_t cayman_vm_page_flags(struct radeon_device *rdev, uint32_t flags);
+void cayman_vm_set_page(struct radeon_device *rdev, uint64_t pe,
+			uint64_t addr, unsigned count,
+			uint32_t incr, uint32_t flags);
 int evergreen_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib);
 
 /* DCE6 - SI */
@@ -465,9 +470,10 @@ int si_irq_set(struct radeon_device *rdev);
 int si_irq_process(struct radeon_device *rdev);
 int si_vm_init(struct radeon_device *rdev);
 void si_vm_fini(struct radeon_device *rdev);
-int si_vm_bind(struct radeon_device *rdev, struct radeon_vm *vm, int id);
-void si_vm_unbind(struct radeon_device *rdev, struct radeon_vm *vm);
-void si_vm_tlb_flush(struct radeon_device *rdev, struct radeon_vm *vm);
+void si_vm_set_page(struct radeon_device *rdev, uint64_t pe,
+		    uint64_t addr, unsigned count,
+		    uint32_t incr, uint32_t flags);
+void si_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm);
 int si_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib);
 uint64_t si_get_gpu_clock(struct radeon_device *rdev);
 
diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
index c4b5d0542ee2..f22eb5713528 100644
--- a/drivers/gpu/drm/radeon/radeon_atombios.c
+++ b/drivers/gpu/drm/radeon/radeon_atombios.c
@@ -1254,6 +1254,10 @@ bool radeon_atom_get_clock_info(struct drm_device *dev)
 		if (rdev->clock.max_pixel_clock == 0)
 			rdev->clock.max_pixel_clock = 40000;
 
+		/* not technically a clock, but... */
+		rdev->mode_info.firmware_flags =
+			le16_to_cpu(firmware_info->info.usFirmwareCapability.susAccess);
+
 		return true;
 	}
 
@@ -2005,7 +2009,8 @@ static int radeon_atombios_parse_power_table_1_3(struct radeon_device *rdev)
 	power_info = (union power_info *)(mode_info->atom_context->bios + data_offset);
 
 	/* add the i2c bus for thermal/fan chip */
-	if (power_info->info.ucOverdriveThermalController > 0) {
+	if ((power_info->info.ucOverdriveThermalController > 0) &&
+	    (power_info->info.ucOverdriveThermalController < ARRAY_SIZE(thermal_controller_names))) {
 		DRM_INFO("Possible %s thermal controller at 0x%02x\n",
 			 thermal_controller_names[power_info->info.ucOverdriveThermalController],
 			 power_info->info.ucOverdriveControllerAddress >> 1);
@@ -2209,7 +2214,7 @@ static void radeon_atombios_add_pplib_thermal_controller(struct radeon_device *r
 			   (controller->ucType ==
 			    ATOM_PP_THERMALCONTROLLER_EMC2103_WITH_INTERNAL)) {
 			DRM_INFO("Special thermal controller config\n");
-		} else {
+		} else if (controller->ucType < ARRAY_SIZE(pp_lib_thermal_controller_names)) {
 			DRM_INFO("Possible %s thermal controller at 0x%02x %s fan control\n",
 				 pp_lib_thermal_controller_names[controller->ucType],
 				 controller->ucI2cAddress >> 1,
@@ -2224,6 +2229,12 @@ static void radeon_atombios_add_pplib_thermal_controller(struct radeon_device *r
 				strlcpy(info.type, name, sizeof(info.type));
 				i2c_new_device(&rdev->pm.i2c_bus->adapter, &info);
 			}
+		} else {
+			DRM_INFO("Unknown thermal controller type %d at 0x%02x %s fan control\n",
+				 controller->ucType,
+				 controller->ucI2cAddress >> 1,
+				 (controller->ucFanParameters &
+				  ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
 		}
 	}
 }
diff --git a/drivers/gpu/drm/radeon/radeon_atpx_handler.c b/drivers/gpu/drm/radeon/radeon_atpx_handler.c
index 2a2cf0b88a28..582e99449c12 100644
--- a/drivers/gpu/drm/radeon/radeon_atpx_handler.c
+++ b/drivers/gpu/drm/radeon/radeon_atpx_handler.c
@@ -12,30 +12,62 @@
 #include <acpi/acpi_bus.h>
 #include <linux/pci.h>
 
-#define ATPX_VERSION 0
-#define ATPX_GPU_PWR 2
-#define ATPX_MUX_SELECT 3
-#define ATPX_I2C_MUX_SELECT 4
-#define ATPX_SWITCH_START 5
-#define ATPX_SWITCH_END 6
-
-#define ATPX_INTEGRATED 0
-#define ATPX_DISCRETE 1
+#include "radeon_acpi.h"
+
+struct radeon_atpx_functions {
+	bool px_params;
+	bool power_cntl;
+	bool disp_mux_cntl;
+	bool i2c_mux_cntl;
+	bool switch_start;
+	bool switch_end;
+	bool disp_connectors_mapping;
+	bool disp_detetion_ports;
+};
 
-#define ATPX_MUX_IGD 0
-#define ATPX_MUX_DISCRETE 1
+struct radeon_atpx {
+	acpi_handle handle;
+	struct radeon_atpx_functions functions;
+};
 
 static struct radeon_atpx_priv {
 	bool atpx_detected;
 	/* handle for device - and atpx */
 	acpi_handle dhandle;
-	acpi_handle atpx_handle;
+	struct radeon_atpx atpx;
 } radeon_atpx_priv;
 
-static int radeon_atpx_get_version(acpi_handle handle)
+struct atpx_verify_interface {
+	u16 size;		/* structure size in bytes (includes size field) */
+	u16 version;		/* version */
+	u32 function_bits;	/* supported functions bit vector */
+} __packed;
+
+struct atpx_power_control {
+	u16 size;
+	u8 dgpu_state;
+} __packed;
+
+struct atpx_mux {
+	u16 size;
+	u16 mux;
+} __packed;
+
+/**
+ * radeon_atpx_call - call an ATPX method
+ *
+ * @handle: acpi handle
+ * @function: the ATPX function to execute
+ * @params: ATPX function params
+ *
+ * Executes the requested ATPX function (all asics).
+ * Returns a pointer to the acpi output buffer.
+ */
+static union acpi_object *radeon_atpx_call(acpi_handle handle, int function,
+					   struct acpi_buffer *params)
 {
 	acpi_status status;
-	union acpi_object atpx_arg_elements[2], *obj;
+	union acpi_object atpx_arg_elements[2];
 	struct acpi_object_list atpx_arg;
 	struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
 
@@ -43,99 +75,292 @@ static int radeon_atpx_get_version(acpi_handle handle)
 	atpx_arg.pointer = &atpx_arg_elements[0];
 
 	atpx_arg_elements[0].type = ACPI_TYPE_INTEGER;
-	atpx_arg_elements[0].integer.value = ATPX_VERSION;
+	atpx_arg_elements[0].integer.value = function;
+
+	if (params) {
+		atpx_arg_elements[1].type = ACPI_TYPE_BUFFER;
+		atpx_arg_elements[1].buffer.length = params->length;
+		atpx_arg_elements[1].buffer.pointer = params->pointer;
+	} else {
+		/* We need a second fake parameter */
+		atpx_arg_elements[1].type = ACPI_TYPE_INTEGER;
+		atpx_arg_elements[1].integer.value = 0;
+	}
 
-	atpx_arg_elements[1].type = ACPI_TYPE_INTEGER;
-	atpx_arg_elements[1].integer.value = ATPX_VERSION;
+	status = acpi_evaluate_object(handle, "ATPX", &atpx_arg, &buffer);
 
-	status = acpi_evaluate_object(handle, NULL, &atpx_arg, &buffer);
-	if (ACPI_FAILURE(status)) {
-		printk("%s: failed to call ATPX: %s\n", __func__, acpi_format_exception(status));
-		return -ENOSYS;
+	/* Fail only if calling the method fails and ATPX is supported */
+	if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) {
+		printk("failed to evaluate ATPX got %s\n",
+		       acpi_format_exception(status));
+		kfree(buffer.pointer);
+		return NULL;
 	}
-	obj = (union acpi_object *)buffer.pointer;
-	if (obj && (obj->type == ACPI_TYPE_BUFFER))
-		printk(KERN_INFO "radeon atpx: version is %d\n", *((u8 *)(obj->buffer.pointer) + 2));
-	kfree(buffer.pointer);
-	return 0;
+
+	return buffer.pointer;
 }
 
-static int radeon_atpx_execute(acpi_handle handle, int cmd_id, u16 value)
+/**
+ * radeon_atpx_parse_functions - parse supported functions
+ *
+ * @f: supported functions struct
+ * @mask: supported functions mask from ATPX
+ *
+ * Use the supported functions mask from ATPX function
+ * ATPX_FUNCTION_VERIFY_INTERFACE to determine what functions
+ * are supported (all asics).
+ */
+static void radeon_atpx_parse_functions(struct radeon_atpx_functions *f, u32 mask)
 {
-	acpi_status status;
-	union acpi_object atpx_arg_elements[2];
-	struct acpi_object_list atpx_arg;
-	struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
-	uint8_t buf[4] = {0};
-
-	if (!handle)
-		return -EINVAL;
-
-	atpx_arg.count = 2;
-	atpx_arg.pointer = &atpx_arg_elements[0];
+	f->px_params = mask & ATPX_GET_PX_PARAMETERS_SUPPORTED;
+	f->power_cntl = mask & ATPX_POWER_CONTROL_SUPPORTED;
+	f->disp_mux_cntl = mask & ATPX_DISPLAY_MUX_CONTROL_SUPPORTED;
+	f->i2c_mux_cntl = mask & ATPX_I2C_MUX_CONTROL_SUPPORTED;
+	f->switch_start = mask & ATPX_GRAPHICS_DEVICE_SWITCH_START_NOTIFICATION_SUPPORTED;
+	f->switch_end = mask & ATPX_GRAPHICS_DEVICE_SWITCH_END_NOTIFICATION_SUPPORTED;
+	f->disp_connectors_mapping = mask & ATPX_GET_DISPLAY_CONNECTORS_MAPPING_SUPPORTED;
+	f->disp_detetion_ports = mask & ATPX_GET_DISPLAY_DETECTION_PORTS_SUPPORTED;
+}
 
-	atpx_arg_elements[0].type = ACPI_TYPE_INTEGER;
-	atpx_arg_elements[0].integer.value = cmd_id;
+/**
+ * radeon_atpx_verify_interface - verify ATPX
+ *
+ * @handle: acpi handle
+ * @atpx: radeon atpx struct
+ *
+ * Execute the ATPX_FUNCTION_VERIFY_INTERFACE ATPX function
+ * to initialize ATPX and determine what features are supported
+ * (all asics).
+ * returns 0 on success, error on failure.
+ */
+static int radeon_atpx_verify_interface(struct radeon_atpx *atpx)
+{
+	union acpi_object *info;
+	struct atpx_verify_interface output;
+	size_t size;
+	int err = 0;
+
+	info = radeon_atpx_call(atpx->handle, ATPX_FUNCTION_VERIFY_INTERFACE, NULL);
+	if (!info)
+		return -EIO;
+
+	memset(&output, 0, sizeof(output));
+
+	size = *(u16 *) info->buffer.pointer;
+	if (size < 8) {
+		printk("ATPX buffer is too small: %lu\n", size);
+		err = -EINVAL;
+		goto out;
+	}
+	size = min(sizeof(output), size);
 
-	buf[2] = value & 0xff;
-	buf[3] = (value >> 8) & 0xff;
+	memcpy(&output, info->buffer.pointer, size);
 
-	atpx_arg_elements[1].type = ACPI_TYPE_BUFFER;
-	atpx_arg_elements[1].buffer.length = 4;
-	atpx_arg_elements[1].buffer.pointer = buf;
+	/* TODO: check version? */
+	printk("ATPX version %u\n", output.version);
 
-	status = acpi_evaluate_object(handle, NULL, &atpx_arg, &buffer);
-	if (ACPI_FAILURE(status)) {
-		printk("%s: failed to call ATPX: %s\n", __func__, acpi_format_exception(status));
-		return -ENOSYS;
-	}
-	kfree(buffer.pointer);
+	radeon_atpx_parse_functions(&atpx->functions, output.function_bits);
 
-	return 0;
+out:
+	kfree(info);
+	return err;
 }
 
-static int radeon_atpx_set_discrete_state(acpi_handle handle, int state)
+/**
+ * radeon_atpx_set_discrete_state - power up/down discrete GPU
+ *
+ * @atpx: atpx info struct
+ * @state: discrete GPU state (0 = power down, 1 = power up)
+ *
+ * Execute the ATPX_FUNCTION_POWER_CONTROL ATPX function to
+ * power down/up the discrete GPU (all asics).
+ * Returns 0 on success, error on failure.
+ */
+static int radeon_atpx_set_discrete_state(struct radeon_atpx *atpx, u8 state)
 {
-	return radeon_atpx_execute(handle, ATPX_GPU_PWR, state);
+	struct acpi_buffer params;
+	union acpi_object *info;
+	struct atpx_power_control input;
+
+	if (atpx->functions.power_cntl) {
+		input.size = 3;
+		input.dgpu_state = state;
+		params.length = input.size;
+		params.pointer = &input;
+		info = radeon_atpx_call(atpx->handle,
+					ATPX_FUNCTION_POWER_CONTROL,
+					&params);
+		if (!info)
+			return -EIO;
+		kfree(info);
+	}
+	return 0;
 }
 
-static int radeon_atpx_switch_mux(acpi_handle handle, int mux_id)
+/**
+ * radeon_atpx_switch_disp_mux - switch display mux
+ *
+ * @atpx: atpx info struct
+ * @mux_id: mux state (0 = integrated GPU, 1 = discrete GPU)
+ *
+ * Execute the ATPX_FUNCTION_DISPLAY_MUX_CONTROL ATPX function to
+ * switch the display mux between the discrete GPU and integrated GPU
+ * (all asics).
+ * Returns 0 on success, error on failure.
+ */
+static int radeon_atpx_switch_disp_mux(struct radeon_atpx *atpx, u16 mux_id)
 {
-	return radeon_atpx_execute(handle, ATPX_MUX_SELECT, mux_id);
+	struct acpi_buffer params;
+	union acpi_object *info;
+	struct atpx_mux input;
+
+	if (atpx->functions.disp_mux_cntl) {
+		input.size = 4;
+		input.mux = mux_id;
+		params.length = input.size;
+		params.pointer = &input;
+		info = radeon_atpx_call(atpx->handle,
+					ATPX_FUNCTION_DISPLAY_MUX_CONTROL,
+					&params);
+		if (!info)
+			return -EIO;
+		kfree(info);
+	}
+	return 0;
 }
 
-static int radeon_atpx_switch_i2c_mux(acpi_handle handle, int mux_id)
+/**
+ * radeon_atpx_switch_i2c_mux - switch i2c/hpd mux
+ *
+ * @atpx: atpx info struct
+ * @mux_id: mux state (0 = integrated GPU, 1 = discrete GPU)
+ *
+ * Execute the ATPX_FUNCTION_I2C_MUX_CONTROL ATPX function to
+ * switch the i2c/hpd mux between the discrete GPU and integrated GPU
+ * (all asics).
+ * Returns 0 on success, error on failure.
+ */
+static int radeon_atpx_switch_i2c_mux(struct radeon_atpx *atpx, u16 mux_id)
 {
-	return radeon_atpx_execute(handle, ATPX_I2C_MUX_SELECT, mux_id);
+	struct acpi_buffer params;
+	union acpi_object *info;
+	struct atpx_mux input;
+
+	if (atpx->functions.i2c_mux_cntl) {
+		input.size = 4;
+		input.mux = mux_id;
+		params.length = input.size;
+		params.pointer = &input;
+		info = radeon_atpx_call(atpx->handle,
+					ATPX_FUNCTION_I2C_MUX_CONTROL,
+					&params);
+		if (!info)
+			return -EIO;
+		kfree(info);
+	}
+	return 0;
 }
 
-static int radeon_atpx_switch_start(acpi_handle handle, int gpu_id)
+/**
+ * radeon_atpx_switch_start - notify the sbios of a GPU switch
+ *
+ * @atpx: atpx info struct
+ * @mux_id: mux state (0 = integrated GPU, 1 = discrete GPU)
+ *
+ * Execute the ATPX_FUNCTION_GRAPHICS_DEVICE_SWITCH_START_NOTIFICATION ATPX
+ * function to notify the sbios that a switch between the discrete GPU and
+ * integrated GPU has begun (all asics).
+ * Returns 0 on success, error on failure.
+ */
+static int radeon_atpx_switch_start(struct radeon_atpx *atpx, u16 mux_id)
 {
-	return radeon_atpx_execute(handle, ATPX_SWITCH_START, gpu_id);
+	struct acpi_buffer params;
+	union acpi_object *info;
+	struct atpx_mux input;
+
+	if (atpx->functions.switch_start) {
+		input.size = 4;
+		input.mux = mux_id;
+		params.length = input.size;
+		params.pointer = &input;
+		info = radeon_atpx_call(atpx->handle,
+					ATPX_FUNCTION_GRAPHICS_DEVICE_SWITCH_START_NOTIFICATION,
+					&params);
+		if (!info)
+			return -EIO;
+		kfree(info);
+	}
+	return 0;
 }
 
-static int radeon_atpx_switch_end(acpi_handle handle, int gpu_id)
+/**
+ * radeon_atpx_switch_end - notify the sbios of a GPU switch
+ *
+ * @atpx: atpx info struct
+ * @mux_id: mux state (0 = integrated GPU, 1 = discrete GPU)
+ *
+ * Execute the ATPX_FUNCTION_GRAPHICS_DEVICE_SWITCH_END_NOTIFICATION ATPX
+ * function to notify the sbios that a switch between the discrete GPU and
+ * integrated GPU has ended (all asics).
+ * Returns 0 on success, error on failure.
+ */
+static int radeon_atpx_switch_end(struct radeon_atpx *atpx, u16 mux_id)
 {
-	return radeon_atpx_execute(handle, ATPX_SWITCH_END, gpu_id);
+	struct acpi_buffer params;
+	union acpi_object *info;
+	struct atpx_mux input;
+
+	if (atpx->functions.switch_end) {
+		input.size = 4;
+		input.mux = mux_id;
+		params.length = input.size;
+		params.pointer = &input;
+		info = radeon_atpx_call(atpx->handle,
+					ATPX_FUNCTION_GRAPHICS_DEVICE_SWITCH_END_NOTIFICATION,
+					&params);
+		if (!info)
+			return -EIO;
+		kfree(info);
+	}
+	return 0;
 }
 
+/**
+ * radeon_atpx_switchto - switch to the requested GPU
+ *
+ * @id: GPU to switch to
+ *
+ * Execute the necessary ATPX functions to switch between the discrete GPU and
+ * integrated GPU (all asics).
+ * Returns 0 on success, error on failure.
+ */
 static int radeon_atpx_switchto(enum vga_switcheroo_client_id id)
 {
-	int gpu_id;
+	u16 gpu_id;
 
 	if (id == VGA_SWITCHEROO_IGD)
-		gpu_id = ATPX_INTEGRATED;
+		gpu_id = ATPX_INTEGRATED_GPU;
 	else
-		gpu_id = ATPX_DISCRETE;
+		gpu_id = ATPX_DISCRETE_GPU;
 
-	radeon_atpx_switch_start(radeon_atpx_priv.atpx_handle, gpu_id);
-	radeon_atpx_switch_mux(radeon_atpx_priv.atpx_handle, gpu_id);
-	radeon_atpx_switch_i2c_mux(radeon_atpx_priv.atpx_handle, gpu_id);
-	radeon_atpx_switch_end(radeon_atpx_priv.atpx_handle, gpu_id);
+	radeon_atpx_switch_start(&radeon_atpx_priv.atpx, gpu_id);
+	radeon_atpx_switch_disp_mux(&radeon_atpx_priv.atpx, gpu_id);
+	radeon_atpx_switch_i2c_mux(&radeon_atpx_priv.atpx, gpu_id);
+	radeon_atpx_switch_end(&radeon_atpx_priv.atpx, gpu_id);
 
 	return 0;
 }
 
+/**
+ * radeon_atpx_switchto - switch to the requested GPU
+ *
+ * @id: GPU to switch to
+ * @state: requested power state (0 = off, 1 = on)
+ *
+ * Execute the necessary ATPX function to power down/up the discrete GPU
+ * (all asics).
+ * Returns 0 on success, error on failure.
+ */
 static int radeon_atpx_power_state(enum vga_switcheroo_client_id id,
 				   enum vga_switcheroo_state state)
 {
@@ -143,10 +368,18 @@ static int radeon_atpx_power_state(enum vga_switcheroo_client_id id,
 	if (id == VGA_SWITCHEROO_IGD)
 		return 0;
 
-	radeon_atpx_set_discrete_state(radeon_atpx_priv.atpx_handle, state);
+	radeon_atpx_set_discrete_state(&radeon_atpx_priv.atpx, state);
 	return 0;
 }
 
+/**
+ * radeon_atpx_pci_probe_handle - look up the ATRM and ATPX handles
+ *
+ * @pdev: pci device
+ *
+ * Look up the ATPX and ATRM handles (all asics).
+ * Returns true if the handles are found, false if not.
+ */
 static bool radeon_atpx_pci_probe_handle(struct pci_dev *pdev)
 {
 	acpi_handle dhandle, atpx_handle;
@@ -161,18 +394,30 @@ static bool radeon_atpx_pci_probe_handle(struct pci_dev *pdev)
 		return false;
 
 	radeon_atpx_priv.dhandle = dhandle;
-	radeon_atpx_priv.atpx_handle = atpx_handle;
+	radeon_atpx_priv.atpx.handle = atpx_handle;
 	return true;
 }
 
+/**
+ * radeon_atpx_init - verify the ATPX interface
+ *
+ * Verify the ATPX interface (all asics).
+ * Returns 0 on success, error on failure.
+ */
 static int radeon_atpx_init(void)
 {
 	/* set up the ATPX handle */
-
-	radeon_atpx_get_version(radeon_atpx_priv.atpx_handle);
-	return 0;
+	return radeon_atpx_verify_interface(&radeon_atpx_priv.atpx);
 }
 
+/**
+ * radeon_atpx_get_client_id - get the client id
+ *
+ * @pdev: pci device
+ *
+ * look up whether we are the integrated or discrete GPU (all asics).
+ * Returns the client id.
+ */
 static int radeon_atpx_get_client_id(struct pci_dev *pdev)
 {
 	if (radeon_atpx_priv.dhandle == DEVICE_ACPI_HANDLE(&pdev->dev))
@@ -188,6 +433,12 @@ static struct vga_switcheroo_handler radeon_atpx_handler = {
 	.get_client_id = radeon_atpx_get_client_id,
 };
 
+/**
+ * radeon_atpx_detect - detect whether we have PX
+ *
+ * Check if we have a PX system (all asics).
+ * Returns true if we have a PX system, false if not.
+ */
 static bool radeon_atpx_detect(void)
 {
 	char acpi_method_name[255] = { 0 };
@@ -203,7 +454,7 @@ static bool radeon_atpx_detect(void)
 	}
 
 	if (has_atpx && vga_count == 2) {
-		acpi_get_name(radeon_atpx_priv.atpx_handle, ACPI_FULL_PATHNAME, &buffer);
+		acpi_get_name(radeon_atpx_priv.atpx.handle, ACPI_FULL_PATHNAME, &buffer);
 		printk(KERN_INFO "VGA switcheroo: detected switching method %s handle\n",
 		       acpi_method_name);
 		radeon_atpx_priv.atpx_detected = true;
@@ -212,6 +463,11 @@ static bool radeon_atpx_detect(void)
 	return false;
 }
 
+/**
+ * radeon_register_atpx_handler - register with vga_switcheroo
+ *
+ * Register the PX callbacks with vga_switcheroo (all asics).
+ */
 void radeon_register_atpx_handler(void)
 {
 	bool r;
@@ -224,6 +480,11 @@ void radeon_register_atpx_handler(void)
 	vga_switcheroo_register_handler(&radeon_atpx_handler);
 }
 
+/**
+ * radeon_unregister_atpx_handler - unregister with vga_switcheroo
+ *
+ * Unregister the PX callbacks with vga_switcheroo (all asics).
+ */
 void radeon_unregister_atpx_handler(void)
 {
 	vga_switcheroo_unregister_handler();
diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c
index a3900e7bd77b..45b660b27cfc 100644
--- a/drivers/gpu/drm/radeon/radeon_combios.c
+++ b/drivers/gpu/drm/radeon/radeon_combios.c
@@ -3319,15 +3319,6 @@ static void combios_write_ram_size(struct drm_device *dev)
 	WREG32(RADEON_CONFIG_MEMSIZE, mem_size);
 }
 
-void radeon_combios_dyn_clk_setup(struct drm_device *dev, int enable)
-{
-	uint16_t dyn_clk_info =
-	    combios_get_table_offset(dev, COMBIOS_DYN_CLK_1_TABLE);
-
-	if (dyn_clk_info)
-		combios_parse_pll_table(dev, dyn_clk_info);
-}
-
 void radeon_combios_asic_init(struct drm_device *dev)
 {
 	struct radeon_device *rdev = dev->dev_private;
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
index 3bc22e341719..67cfc1795ecd 100644
--- a/drivers/gpu/drm/radeon/radeon_connectors.c
+++ b/drivers/gpu/drm/radeon/radeon_connectors.c
@@ -40,10 +40,6 @@ radeon_atombios_connected_scratch_regs(struct drm_connector *connector,
 				       struct drm_encoder *encoder,
 				       bool connected);
 
-extern void
-radeon_legacy_backlight_init(struct radeon_encoder *radeon_encoder,
-			     struct drm_connector *drm_connector);
-
 void radeon_connector_hotplug(struct drm_connector *connector)
 {
 	struct drm_device *dev = connector->dev;
@@ -198,7 +194,7 @@ radeon_connector_update_scratch_regs(struct drm_connector *connector, enum drm_c
 	}
 }
 
-struct drm_encoder *radeon_find_encoder(struct drm_connector *connector, int encoder_type)
+static struct drm_encoder *radeon_find_encoder(struct drm_connector *connector, int encoder_type)
 {
 	struct drm_mode_object *obj;
 	struct drm_encoder *encoder;
@@ -219,7 +215,7 @@ struct drm_encoder *radeon_find_encoder(struct drm_connector *connector, int enc
 	return NULL;
 }
 
-struct drm_encoder *radeon_best_single_encoder(struct drm_connector *connector)
+static struct drm_encoder *radeon_best_single_encoder(struct drm_connector *connector)
 {
 	int enc_id = connector->encoder_ids[0];
 	struct drm_mode_object *obj;
@@ -370,7 +366,7 @@ static void radeon_add_common_modes(struct drm_encoder *encoder, struct drm_conn
 	}
 }
 
-int radeon_connector_set_property(struct drm_connector *connector, struct drm_property *property,
+static int radeon_connector_set_property(struct drm_connector *connector, struct drm_property *property,
 				  uint64_t val)
 {
 	struct drm_device *dev = connector->dev;
@@ -691,13 +687,13 @@ static int radeon_lvds_set_property(struct drm_connector *connector,
 }
 
 
-struct drm_connector_helper_funcs radeon_lvds_connector_helper_funcs = {
+static const struct drm_connector_helper_funcs radeon_lvds_connector_helper_funcs = {
 	.get_modes = radeon_lvds_get_modes,
 	.mode_valid = radeon_lvds_mode_valid,
 	.best_encoder = radeon_best_single_encoder,
 };
 
-struct drm_connector_funcs radeon_lvds_connector_funcs = {
+static const struct drm_connector_funcs radeon_lvds_connector_funcs = {
 	.dpms = drm_helper_connector_dpms,
 	.detect = radeon_lvds_detect,
 	.fill_modes = drm_helper_probe_single_connector_modes,
@@ -809,13 +805,13 @@ radeon_vga_detect(struct drm_connector *connector, bool force)
 	return ret;
 }
 
-struct drm_connector_helper_funcs radeon_vga_connector_helper_funcs = {
+static const struct drm_connector_helper_funcs radeon_vga_connector_helper_funcs = {
 	.get_modes = radeon_vga_get_modes,
 	.mode_valid = radeon_vga_mode_valid,
 	.best_encoder = radeon_best_single_encoder,
 };
 
-struct drm_connector_funcs radeon_vga_connector_funcs = {
+static const struct drm_connector_funcs radeon_vga_connector_funcs = {
 	.dpms = drm_helper_connector_dpms,
 	.detect = radeon_vga_detect,
 	.fill_modes = drm_helper_probe_single_connector_modes,
@@ -879,13 +875,13 @@ radeon_tv_detect(struct drm_connector *connector, bool force)
 	return ret;
 }
 
-struct drm_connector_helper_funcs radeon_tv_connector_helper_funcs = {
+static const struct drm_connector_helper_funcs radeon_tv_connector_helper_funcs = {
 	.get_modes = radeon_tv_get_modes,
 	.mode_valid = radeon_tv_mode_valid,
 	.best_encoder = radeon_best_single_encoder,
 };
 
-struct drm_connector_funcs radeon_tv_connector_funcs = {
+static const struct drm_connector_funcs radeon_tv_connector_funcs = {
 	.dpms = drm_helper_connector_dpms,
 	.detect = radeon_tv_detect,
 	.fill_modes = drm_helper_probe_single_connector_modes,
@@ -1089,7 +1085,7 @@ out:
 }
 
 /* okay need to be smart in here about which encoder to pick */
-struct drm_encoder *radeon_dvi_encoder(struct drm_connector *connector)
+static struct drm_encoder *radeon_dvi_encoder(struct drm_connector *connector)
 {
 	int enc_id = connector->encoder_ids[0];
 	struct radeon_connector *radeon_connector = to_radeon_connector(connector);
@@ -1179,13 +1175,13 @@ static int radeon_dvi_mode_valid(struct drm_connector *connector,
 	return MODE_OK;
 }
 
-struct drm_connector_helper_funcs radeon_dvi_connector_helper_funcs = {
+static const struct drm_connector_helper_funcs radeon_dvi_connector_helper_funcs = {
 	.get_modes = radeon_dvi_get_modes,
 	.mode_valid = radeon_dvi_mode_valid,
 	.best_encoder = radeon_dvi_encoder,
 };
 
-struct drm_connector_funcs radeon_dvi_connector_funcs = {
+static const struct drm_connector_funcs radeon_dvi_connector_funcs = {
 	.dpms = drm_helper_connector_dpms,
 	.detect = radeon_dvi_detect,
 	.fill_modes = drm_helper_probe_single_connector_modes,
@@ -1462,13 +1458,13 @@ static int radeon_dp_mode_valid(struct drm_connector *connector,
 	}
 }
 
-struct drm_connector_helper_funcs radeon_dp_connector_helper_funcs = {
+static const struct drm_connector_helper_funcs radeon_dp_connector_helper_funcs = {
 	.get_modes = radeon_dp_get_modes,
 	.mode_valid = radeon_dp_mode_valid,
 	.best_encoder = radeon_dvi_encoder,
 };
 
-struct drm_connector_funcs radeon_dp_connector_funcs = {
+static const struct drm_connector_funcs radeon_dp_connector_funcs = {
 	.dpms = drm_helper_connector_dpms,
 	.detect = radeon_dp_detect,
 	.fill_modes = drm_helper_probe_single_connector_modes,
@@ -2008,15 +2004,4 @@ radeon_add_legacy_connector(struct drm_device *dev,
 		connector->polled = DRM_CONNECTOR_POLL_HPD;
 	connector->display_info.subpixel_order = subpixel_order;
 	drm_sysfs_connector_add(connector);
-	if (connector_type == DRM_MODE_CONNECTOR_LVDS) {
-		struct drm_encoder *drm_encoder;
-
-		list_for_each_entry(drm_encoder, &dev->mode_config.encoder_list, head) {
-			struct radeon_encoder *radeon_encoder;
-
-			radeon_encoder = to_radeon_encoder(drm_encoder);
-			if (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_LVDS)
-				radeon_legacy_backlight_init(radeon_encoder, connector);
-		}
-	}
 }
diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c
index 891fff52ab65..cb7b7c062fef 100644
--- a/drivers/gpu/drm/radeon/radeon_cs.c
+++ b/drivers/gpu/drm/radeon/radeon_cs.c
@@ -32,7 +32,7 @@
 void r100_cs_dump_packet(struct radeon_cs_parser *p,
 			 struct radeon_cs_packet *pkt);
 
-int radeon_cs_parser_relocs(struct radeon_cs_parser *p)
+static int radeon_cs_parser_relocs(struct radeon_cs_parser *p)
 {
 	struct drm_device *ddev = p->rdev->ddev;
 	struct radeon_cs_chunk *chunk;
@@ -115,19 +115,27 @@ static int radeon_cs_get_ring(struct radeon_cs_parser *p, u32 ring, s32 priority
 	return 0;
 }
 
+static void radeon_cs_sync_to(struct radeon_cs_parser *p,
+			      struct radeon_fence *fence)
+{
+	struct radeon_fence *other;
+
+	if (!fence)
+		return;
+
+	other = p->ib.sync_to[fence->ring];
+	p->ib.sync_to[fence->ring] = radeon_fence_later(fence, other);
+}
+
 static void radeon_cs_sync_rings(struct radeon_cs_parser *p)
 {
 	int i;
 
 	for (i = 0; i < p->nrelocs; i++) {
-		struct radeon_fence *a, *b;
-
-		if (!p->relocs[i].robj || !p->relocs[i].robj->tbo.sync_obj)
+		if (!p->relocs[i].robj)
 			continue;
 
-		a = p->relocs[i].robj->tbo.sync_obj;
-		b = p->ib.sync_to[a->ring];
-		p->ib.sync_to[a->ring] = radeon_fence_later(a, b);
+		radeon_cs_sync_to(p, p->relocs[i].robj->tbo.sync_obj);
 	}
 }
 
@@ -278,30 +286,6 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
 	return 0;
 }
 
-static void radeon_bo_vm_fence_va(struct radeon_cs_parser *parser,
-				  struct radeon_fence *fence)
-{
-	struct radeon_fpriv *fpriv = parser->filp->driver_priv;
-	struct radeon_vm *vm = &fpriv->vm;
-	struct radeon_bo_list *lobj;
-
-	if (parser->chunk_ib_idx == -1) {
-		return;
-	}
-	if ((parser->cs_flags & RADEON_CS_USE_VM) == 0) {
-		return;
-	}
-
-	list_for_each_entry(lobj, &parser->validated, tv.head) {
-		struct radeon_bo_va *bo_va;
-		struct radeon_bo *rbo = lobj->bo;
-
-		bo_va = radeon_bo_va(rbo, vm);
-		radeon_fence_unref(&bo_va->fence);
-		bo_va->fence = radeon_fence_ref(fence);
-	}
-}
-
 /**
  * cs_parser_fini() - clean parser states
  * @parser:	parser structure holding parsing context.
@@ -315,8 +299,6 @@ static void radeon_cs_parser_fini(struct radeon_cs_parser *parser, int error)
 	unsigned i;
 
 	if (!error) {
-		/* fence all bo va before ttm_eu_fence_buffer_objects so bo are still reserved */
-		radeon_bo_vm_fence_va(parser, parser->ib.fence);
 		ttm_eu_fence_buffer_objects(&parser->validated,
 					    parser->ib.fence);
 	} else {
@@ -363,7 +345,7 @@ static int radeon_cs_ib_chunk(struct radeon_device *rdev,
 	 * uncached).
 	 */
 	r =  radeon_ib_get(rdev, parser->ring, &parser->ib,
-			   ib_chunk->length_dw * 4);
+			   NULL, ib_chunk->length_dw * 4);
 	if (r) {
 		DRM_ERROR("Failed to get ib !\n");
 		return r;
@@ -380,7 +362,6 @@ static int radeon_cs_ib_chunk(struct radeon_device *rdev,
 		return r;
 	}
 	radeon_cs_sync_rings(parser);
-	parser->ib.vm_id = 0;
 	r = radeon_ib_schedule(rdev, &parser->ib, NULL);
 	if (r) {
 		DRM_ERROR("Failed to schedule IB !\n");
@@ -391,10 +372,15 @@ static int radeon_cs_ib_chunk(struct radeon_device *rdev,
 static int radeon_bo_vm_update_pte(struct radeon_cs_parser *parser,
 				   struct radeon_vm *vm)
 {
+	struct radeon_device *rdev = parser->rdev;
 	struct radeon_bo_list *lobj;
 	struct radeon_bo *bo;
 	int r;
 
+	r = radeon_vm_bo_update_pte(rdev, vm, rdev->ring_tmp_bo.bo, &rdev->ring_tmp_bo.bo->tbo.mem);
+	if (r) {
+		return r;
+	}
 	list_for_each_entry(lobj, &parser->validated, tv.head) {
 		bo = lobj->bo;
 		r = radeon_vm_bo_update_pte(parser->rdev, vm, bo, &bo->tbo.mem);
@@ -426,7 +412,7 @@ static int radeon_cs_ib_vm_chunk(struct radeon_device *rdev,
 			return -EINVAL;
 		}
 		r =  radeon_ib_get(rdev, parser->ring, &parser->const_ib,
-				   ib_chunk->length_dw * 4);
+				   vm, ib_chunk->length_dw * 4);
 		if (r) {
 			DRM_ERROR("Failed to get const ib !\n");
 			return r;
@@ -450,7 +436,7 @@ static int radeon_cs_ib_vm_chunk(struct radeon_device *rdev,
 		return -EINVAL;
 	}
 	r =  radeon_ib_get(rdev, parser->ring, &parser->ib,
-			   ib_chunk->length_dw * 4);
+			   vm, ib_chunk->length_dw * 4);
 	if (r) {
 		DRM_ERROR("Failed to get ib !\n");
 		return r;
@@ -468,7 +454,7 @@ static int radeon_cs_ib_vm_chunk(struct radeon_device *rdev,
 
 	mutex_lock(&rdev->vm_manager.lock);
 	mutex_lock(&vm->mutex);
-	r = radeon_vm_bind(rdev, vm);
+	r = radeon_vm_alloc_pt(rdev, vm);
 	if (r) {
 		goto out;
 	}
@@ -477,32 +463,21 @@ static int radeon_cs_ib_vm_chunk(struct radeon_device *rdev,
 		goto out;
 	}
 	radeon_cs_sync_rings(parser);
-
-	parser->ib.vm_id = vm->id;
-	/* ib pool is bind at 0 in virtual address space,
-	 * so gpu_addr is the offset inside the pool bo
-	 */
-	parser->ib.gpu_addr = parser->ib.sa_bo->soffset;
+	radeon_cs_sync_to(parser, vm->fence);
+	radeon_cs_sync_to(parser, radeon_vm_grab_id(rdev, vm, parser->ring));
 
 	if ((rdev->family >= CHIP_TAHITI) &&
 	    (parser->chunk_const_ib_idx != -1)) {
-		parser->const_ib.vm_id = vm->id;
-		/* ib pool is bind at 0 in virtual address space,
-		 * so gpu_addr is the offset inside the pool bo
-		 */
-		parser->const_ib.gpu_addr = parser->const_ib.sa_bo->soffset;
 		r = radeon_ib_schedule(rdev, &parser->ib, &parser->const_ib);
 	} else {
 		r = radeon_ib_schedule(rdev, &parser->ib, NULL);
 	}
 
-out:
 	if (!r) {
-		if (vm->fence) {
-			radeon_fence_unref(&vm->fence);
-		}
-		vm->fence = radeon_fence_ref(parser->ib.fence);
+		radeon_vm_fence(rdev, vm, parser->ib.fence);
 	}
+
+out:
 	mutex_unlock(&vm->mutex);
 	mutex_unlock(&rdev->vm_manager.lock);
 	return r;
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index 7a3daebd732d..64a42647f08a 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -842,7 +842,7 @@ static unsigned int radeon_vga_set_decode(void *cookie, bool state)
  * Validates certain module parameters and updates
  * the associated values used by the driver (all asics).
  */
-void radeon_check_arguments(struct radeon_device *rdev)
+static void radeon_check_arguments(struct radeon_device *rdev)
 {
 	/* vramlimit must be a power of two */
 	switch (radeon_vram_limit) {
@@ -1013,13 +1013,11 @@ int radeon_device_init(struct radeon_device *rdev,
 	init_rwsem(&rdev->pm.mclk_lock);
 	init_rwsem(&rdev->exclusive_lock);
 	init_waitqueue_head(&rdev->irq.vblank_queue);
-	init_waitqueue_head(&rdev->irq.idle_queue);
 	r = radeon_gem_init(rdev);
 	if (r)
 		return r;
 	/* initialize vm here */
 	mutex_init(&rdev->vm_manager.lock);
-	rdev->vm_manager.use_bitmap = 1;
 	rdev->vm_manager.max_pfn = 1 << 20;
 	INIT_LIST_HEAD(&rdev->vm_manager.lru_vm);
 
@@ -1284,6 +1282,13 @@ int radeon_resume_kms(struct drm_device *dev)
 	if (rdev->is_atom_bios) {
 		radeon_atom_encoder_init(rdev);
 		radeon_atom_disp_eng_pll_init(rdev);
+		/* turn on the BL */
+		if (rdev->mode_info.bl_encoder) {
+			u8 bl_level = radeon_get_backlight_level(rdev,
+								 rdev->mode_info.bl_encoder);
+			radeon_set_backlight_level(rdev, rdev->mode_info.bl_encoder,
+						   bl_level);
+		}
 	}
 	/* reset hpd state */
 	radeon_hpd_init(rdev);
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index 97f3fe7dd040..07eb84e8a8a4 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -63,9 +63,11 @@
  *   2.20.0 - r600-si: RADEON_INFO_TIMESTAMP query
  *   2.21.0 - r600-r700: FMASK and CMASK
  *   2.22.0 - r600 only: RESOLVE_BOX allowed
+ *   2.23.0 - allow STRMOUT_BASE_UPDATE on RS780 and RS880
+ *   2.24.0 - eg only: allow MIP_ADDRESS=0 for MSAA textures
  */
 #define KMS_DRIVER_MAJOR	2
-#define KMS_DRIVER_MINOR	22
+#define KMS_DRIVER_MINOR	24
 #define KMS_DRIVER_PATCHLEVEL	0
 int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags);
 int radeon_driver_unload_kms(struct drm_device *dev);
diff --git a/drivers/gpu/drm/radeon/radeon_encoders.c b/drivers/gpu/drm/radeon/radeon_encoders.c
index 5a1bae3a2426..bd4959ca23aa 100644
--- a/drivers/gpu/drm/radeon/radeon_encoders.c
+++ b/drivers/gpu/drm/radeon/radeon_encoders.c
@@ -29,6 +29,14 @@
 #include "radeon.h"
 #include "atom.h"
 
+extern void
+radeon_legacy_backlight_init(struct radeon_encoder *radeon_encoder,
+			     struct drm_connector *drm_connector);
+extern void
+radeon_atom_backlight_init(struct radeon_encoder *radeon_encoder,
+			   struct drm_connector *drm_connector);
+
+
 static uint32_t radeon_encoder_clones(struct drm_encoder *encoder)
 {
 	struct drm_device *dev = encoder->dev;
@@ -153,6 +161,7 @@ radeon_get_encoder_enum(struct drm_device *dev, uint32_t supported_device, uint8
 void
 radeon_link_encoder_connector(struct drm_device *dev)
 {
+	struct radeon_device *rdev = dev->dev_private;
 	struct drm_connector *connector;
 	struct radeon_connector *radeon_connector;
 	struct drm_encoder *encoder;
@@ -163,8 +172,16 @@ radeon_link_encoder_connector(struct drm_device *dev)
 		radeon_connector = to_radeon_connector(connector);
 		list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
 			radeon_encoder = to_radeon_encoder(encoder);
-			if (radeon_encoder->devices & radeon_connector->devices)
+			if (radeon_encoder->devices & radeon_connector->devices) {
 				drm_mode_connector_attach_encoder(connector, encoder);
+				if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
+					if (rdev->is_atom_bios)
+						radeon_atom_backlight_init(radeon_encoder, connector);
+					else
+						radeon_legacy_backlight_init(radeon_encoder, connector);
+					rdev->mode_info.bl_encoder = radeon_encoder;
+				}
+			}
 		}
 	}
 }
diff --git a/drivers/gpu/drm/radeon/radeon_fb.c b/drivers/gpu/drm/radeon/radeon_fb.c
index fae493710ebf..cc8489d8c6d1 100644
--- a/drivers/gpu/drm/radeon/radeon_fb.c
+++ b/drivers/gpu/drm/radeon/radeon_fb.c
@@ -315,22 +315,6 @@ static int radeon_fb_find_or_create_single(struct drm_fb_helper *helper,
 	return new_fb;
 }
 
-static char *mode_option;
-int radeon_parse_options(char *options)
-{
-	char *this_opt;
-
-	if (!options || !*options)
-		return 0;
-
-	while ((this_opt = strsep(&options, ",")) != NULL) {
-		if (!*this_opt)
-			continue;
-		mode_option = this_opt;
-	}
-	return 0;
-}
-
 void radeon_fb_output_poll_changed(struct radeon_device *rdev)
 {
 	drm_fb_helper_hotplug_event(&rdev->mode_info.rfbdev->helper);
diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c
index 5cd47ff03e48..22bd6c2c2740 100644
--- a/drivers/gpu/drm/radeon/radeon_fence.c
+++ b/drivers/gpu/drm/radeon/radeon_fence.c
@@ -398,7 +398,7 @@ int radeon_fence_wait(struct radeon_fence *fence, bool intr)
 	return 0;
 }
 
-bool radeon_fence_any_seq_signaled(struct radeon_device *rdev, u64 *seq)
+static bool radeon_fence_any_seq_signaled(struct radeon_device *rdev, u64 *seq)
 {
 	unsigned i;
 
diff --git a/drivers/gpu/drm/radeon/radeon_gart.c b/drivers/gpu/drm/radeon/radeon_gart.c
index 33cc03e310fd..f0c06d196b75 100644
--- a/drivers/gpu/drm/radeon/radeon_gart.c
+++ b/drivers/gpu/drm/radeon/radeon_gart.c
@@ -423,6 +423,18 @@ void radeon_gart_fini(struct radeon_device *rdev)
  */
 
 /**
+ * radeon_vm_directory_size - returns the size of the page directory in bytes
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Calculate the size of the page directory in bytes (cayman+).
+ */
+static unsigned radeon_vm_directory_size(struct radeon_device *rdev)
+{
+	return (rdev->vm_manager.max_pfn >> RADEON_VM_BLOCK_SIZE) * 8;
+}
+
+/**
  * radeon_vm_manager_init - init the vm manager
  *
  * @rdev: radeon_device pointer
@@ -435,12 +447,15 @@ int radeon_vm_manager_init(struct radeon_device *rdev)
 	struct radeon_vm *vm;
 	struct radeon_bo_va *bo_va;
 	int r;
+	unsigned size;
 
 	if (!rdev->vm_manager.enabled) {
-		/* mark first vm as always in use, it's the system one */
 		/* allocate enough for 2 full VM pts */
+		size = RADEON_GPU_PAGE_ALIGN(radeon_vm_directory_size(rdev));
+		size += RADEON_GPU_PAGE_ALIGN(rdev->vm_manager.max_pfn * 8);
+		size *= 2;
 		r = radeon_sa_bo_manager_init(rdev, &rdev->vm_manager.sa_manager,
-					      rdev->vm_manager.max_pfn * 8 * 2,
+					      size,
 					      RADEON_GEM_DOMAIN_VRAM);
 		if (r) {
 			dev_err(rdev->dev, "failed to allocate vm bo (%dKB)\n",
@@ -448,10 +463,10 @@ int radeon_vm_manager_init(struct radeon_device *rdev)
 			return r;
 		}
 
-		r = rdev->vm_manager.funcs->init(rdev);
+		r = radeon_asic_vm_init(rdev);
 		if (r)
 			return r;
-	
+
 		rdev->vm_manager.enabled = true;
 
 		r = radeon_sa_bo_manager_start(rdev, &rdev->vm_manager.sa_manager);
@@ -461,73 +476,36 @@ int radeon_vm_manager_init(struct radeon_device *rdev)
 
 	/* restore page table */
 	list_for_each_entry(vm, &rdev->vm_manager.lru_vm, list) {
-		if (vm->id == -1)
+		if (vm->sa_bo == NULL)
 			continue;
 
 		list_for_each_entry(bo_va, &vm->va, vm_list) {
-			struct ttm_mem_reg *mem = NULL;
-			if (bo_va->valid)
-				mem = &bo_va->bo->tbo.mem;
-
 			bo_va->valid = false;
-			r = radeon_vm_bo_update_pte(rdev, vm, bo_va->bo, mem);
-			if (r) {
-				DRM_ERROR("Failed to update pte for vm %d!\n", vm->id);
-			}
-		}
-
-		r = rdev->vm_manager.funcs->bind(rdev, vm, vm->id);
-		if (r) {
-			DRM_ERROR("Failed to bind vm %d!\n", vm->id);
 		}
 	}
 	return 0;
 }
 
-/* global mutex must be lock */
 /**
- * radeon_vm_unbind_locked - unbind a specific vm
+ * radeon_vm_free_pt - free the page table for a specific vm
  *
  * @rdev: radeon_device pointer
  * @vm: vm to unbind
  *
- * Unbind the requested vm (cayman+).
- * Wait for use of the VM to finish, then unbind the page table,
- * and free the page table memory.
+ * Free the page table of a specific vm (cayman+).
+ *
+ * Global and local mutex must be lock!
  */
-static void radeon_vm_unbind_locked(struct radeon_device *rdev,
+static void radeon_vm_free_pt(struct radeon_device *rdev,
 				    struct radeon_vm *vm)
 {
 	struct radeon_bo_va *bo_va;
 
-	if (vm->id == -1) {
+	if (!vm->sa_bo)
 		return;
-	}
 
-	/* wait for vm use to end */
-	while (vm->fence) {
-		int r;
-		r = radeon_fence_wait(vm->fence, false);
-		if (r)
-			DRM_ERROR("error while waiting for fence: %d\n", r);
-		if (r == -EDEADLK) {
-			mutex_unlock(&rdev->vm_manager.lock);
-			r = radeon_gpu_reset(rdev);
-			mutex_lock(&rdev->vm_manager.lock);
-			if (!r)
-				continue;
-		}
-		break;
-	}
-	radeon_fence_unref(&vm->fence);
-
-	/* hw unbind */
-	rdev->vm_manager.funcs->unbind(rdev, vm);
-	rdev->vm_manager.use_bitmap &= ~(1 << vm->id);
 	list_del_init(&vm->list);
-	vm->id = -1;
-	radeon_sa_bo_free(rdev, &vm->sa_bo, NULL);
-	vm->pt = NULL;
+	radeon_sa_bo_free(rdev, &vm->sa_bo, vm->fence);
 
 	list_for_each_entry(bo_va, &vm->va, vm_list) {
 		bo_va->valid = false;
@@ -544,16 +522,22 @@ static void radeon_vm_unbind_locked(struct radeon_device *rdev,
 void radeon_vm_manager_fini(struct radeon_device *rdev)
 {
 	struct radeon_vm *vm, *tmp;
+	int i;
 
 	if (!rdev->vm_manager.enabled)
 		return;
 
 	mutex_lock(&rdev->vm_manager.lock);
-	/* unbind all active vm */
+	/* free all allocated page tables */
 	list_for_each_entry_safe(vm, tmp, &rdev->vm_manager.lru_vm, list) {
-		radeon_vm_unbind_locked(rdev, vm);
+		mutex_lock(&vm->mutex);
+		radeon_vm_free_pt(rdev, vm);
+		mutex_unlock(&vm->mutex);
 	}
-	rdev->vm_manager.funcs->fini(rdev);
+	for (i = 0; i < RADEON_NUM_VM; ++i) {
+		radeon_fence_unref(&rdev->vm_manager.active[i]);
+	}
+	radeon_asic_vm_fini(rdev);
 	mutex_unlock(&rdev->vm_manager.lock);
 
 	radeon_sa_bo_manager_suspend(rdev, &rdev->vm_manager.sa_manager);
@@ -561,46 +545,34 @@ void radeon_vm_manager_fini(struct radeon_device *rdev)
 	rdev->vm_manager.enabled = false;
 }
 
-/* global mutex must be locked */
 /**
- * radeon_vm_unbind - locked version of unbind
- *
- * @rdev: radeon_device pointer
- * @vm: vm to unbind
- *
- * Locked version that wraps radeon_vm_unbind_locked (cayman+).
- */
-void radeon_vm_unbind(struct radeon_device *rdev, struct radeon_vm *vm)
-{
-	mutex_lock(&vm->mutex);
-	radeon_vm_unbind_locked(rdev, vm);
-	mutex_unlock(&vm->mutex);
-}
-
-/* global and local mutex must be locked */
-/**
- * radeon_vm_bind - bind a page table to a VMID
+ * radeon_vm_alloc_pt - allocates a page table for a VM
  *
  * @rdev: radeon_device pointer
  * @vm: vm to bind
  *
- * Bind the requested vm (cayman+).
- * Suballocate memory for the page table, allocate a VMID
- * and bind the page table to it, and finally start to populate
- * the page table.
+ * Allocate a page table for the requested vm (cayman+).
+ * Also starts to populate the page table.
  * Returns 0 for success, error for failure.
+ *
+ * Global and local mutex must be locked!
  */
-int radeon_vm_bind(struct radeon_device *rdev, struct radeon_vm *vm)
+int radeon_vm_alloc_pt(struct radeon_device *rdev, struct radeon_vm *vm)
 {
 	struct radeon_vm *vm_evict;
-	unsigned i;
-	int id = -1, r;
+	int r;
+	u64 *pd_addr;
+	int tables_size;
 
 	if (vm == NULL) {
 		return -EINVAL;
 	}
 
-	if (vm->id != -1) {
+	/* allocate enough to cover the current VM size */
+	tables_size = RADEON_GPU_PAGE_ALIGN(radeon_vm_directory_size(rdev));
+	tables_size += RADEON_GPU_PAGE_ALIGN(vm->last_pfn * 8);
+
+	if (vm->sa_bo != NULL) {
 		/* update lru */
 		list_del_init(&vm->list);
 		list_add_tail(&vm->list, &rdev->vm_manager.lru_vm);
@@ -609,98 +581,215 @@ int radeon_vm_bind(struct radeon_device *rdev, struct radeon_vm *vm)
 
 retry:
 	r = radeon_sa_bo_new(rdev, &rdev->vm_manager.sa_manager, &vm->sa_bo,
-			     RADEON_GPU_PAGE_ALIGN(vm->last_pfn * 8),
-			     RADEON_GPU_PAGE_SIZE, false);
-	if (r) {
+			     tables_size, RADEON_GPU_PAGE_SIZE, false);
+	if (r == -ENOMEM) {
 		if (list_empty(&rdev->vm_manager.lru_vm)) {
 			return r;
 		}
 		vm_evict = list_first_entry(&rdev->vm_manager.lru_vm, struct radeon_vm, list);
-		radeon_vm_unbind(rdev, vm_evict);
+		mutex_lock(&vm_evict->mutex);
+		radeon_vm_free_pt(rdev, vm_evict);
+		mutex_unlock(&vm_evict->mutex);
 		goto retry;
+
+	} else if (r) {
+		return r;
 	}
-	vm->pt = radeon_sa_bo_cpu_addr(vm->sa_bo);
-	vm->pt_gpu_addr = radeon_sa_bo_gpu_addr(vm->sa_bo);
-	memset(vm->pt, 0, RADEON_GPU_PAGE_ALIGN(vm->last_pfn * 8));
 
-retry_id:
-	/* search for free vm */
-	for (i = 0; i < rdev->vm_manager.nvm; i++) {
-		if (!(rdev->vm_manager.use_bitmap & (1 << i))) {
-			id = i;
-			break;
+	pd_addr = radeon_sa_bo_cpu_addr(vm->sa_bo);
+	vm->pd_gpu_addr = radeon_sa_bo_gpu_addr(vm->sa_bo);
+	memset(pd_addr, 0, tables_size);
+
+	list_add_tail(&vm->list, &rdev->vm_manager.lru_vm);
+	return radeon_vm_bo_update_pte(rdev, vm, rdev->ring_tmp_bo.bo,
+				       &rdev->ring_tmp_bo.bo->tbo.mem);
+}
+
+/**
+ * radeon_vm_grab_id - allocate the next free VMID
+ *
+ * @rdev: radeon_device pointer
+ * @vm: vm to allocate id for
+ * @ring: ring we want to submit job to
+ *
+ * Allocate an id for the vm (cayman+).
+ * Returns the fence we need to sync to (if any).
+ *
+ * Global and local mutex must be locked!
+ */
+struct radeon_fence *radeon_vm_grab_id(struct radeon_device *rdev,
+				       struct radeon_vm *vm, int ring)
+{
+	struct radeon_fence *best[RADEON_NUM_RINGS] = {};
+	unsigned choices[2] = {};
+	unsigned i;
+
+	/* check if the id is still valid */
+	if (vm->fence && vm->fence == rdev->vm_manager.active[vm->id])
+		return NULL;
+
+	/* we definately need to flush */
+	radeon_fence_unref(&vm->last_flush);
+
+	/* skip over VMID 0, since it is the system VM */
+	for (i = 1; i < rdev->vm_manager.nvm; ++i) {
+		struct radeon_fence *fence = rdev->vm_manager.active[i];
+
+		if (fence == NULL) {
+			/* found a free one */
+			vm->id = i;
+			return NULL;
+		}
+
+		if (radeon_fence_is_earlier(fence, best[fence->ring])) {
+			best[fence->ring] = fence;
+			choices[fence->ring == ring ? 0 : 1] = i;
 		}
 	}
-	/* evict vm if necessary */
-	if (id == -1) {
-		vm_evict = list_first_entry(&rdev->vm_manager.lru_vm, struct radeon_vm, list);
-		radeon_vm_unbind(rdev, vm_evict);
-		goto retry_id;
+
+	for (i = 0; i < 2; ++i) {
+		if (choices[i]) {
+			vm->id = choices[i];
+			return rdev->vm_manager.active[choices[i]];
+		}
 	}
 
-	/* do hw bind */
-	r = rdev->vm_manager.funcs->bind(rdev, vm, id);
-	if (r) {
-		radeon_sa_bo_free(rdev, &vm->sa_bo, NULL);
-		return r;
+	/* should never happen */
+	BUG();
+	return NULL;
+}
+
+/**
+ * radeon_vm_fence - remember fence for vm
+ *
+ * @rdev: radeon_device pointer
+ * @vm: vm we want to fence
+ * @fence: fence to remember
+ *
+ * Fence the vm (cayman+).
+ * Set the fence used to protect page table and id.
+ *
+ * Global and local mutex must be locked!
+ */
+void radeon_vm_fence(struct radeon_device *rdev,
+		     struct radeon_vm *vm,
+		     struct radeon_fence *fence)
+{
+	radeon_fence_unref(&rdev->vm_manager.active[vm->id]);
+	rdev->vm_manager.active[vm->id] = radeon_fence_ref(fence);
+
+	radeon_fence_unref(&vm->fence);
+	vm->fence = radeon_fence_ref(fence);
+}
+
+/**
+ * radeon_vm_bo_find - find the bo_va for a specific vm & bo
+ *
+ * @vm: requested vm
+ * @bo: requested buffer object
+ *
+ * Find @bo inside the requested vm (cayman+).
+ * Search inside the @bos vm list for the requested vm
+ * Returns the found bo_va or NULL if none is found
+ *
+ * Object has to be reserved!
+ */
+struct radeon_bo_va *radeon_vm_bo_find(struct radeon_vm *vm,
+				       struct radeon_bo *bo)
+{
+	struct radeon_bo_va *bo_va;
+
+	list_for_each_entry(bo_va, &bo->va, bo_list) {
+		if (bo_va->vm == vm) {
+			return bo_va;
+		}
 	}
-	rdev->vm_manager.use_bitmap |= 1 << id;
-	vm->id = id;
-	list_add_tail(&vm->list, &rdev->vm_manager.lru_vm);
-	return radeon_vm_bo_update_pte(rdev, vm, rdev->ring_tmp_bo.bo,
-				       &rdev->ring_tmp_bo.bo->tbo.mem);
+	return NULL;
 }
 
-/* object have to be reserved */
 /**
  * radeon_vm_bo_add - add a bo to a specific vm
  *
  * @rdev: radeon_device pointer
  * @vm: requested vm
  * @bo: radeon buffer object
- * @offset: requested offset of the buffer in the VM address space
- * @flags: attributes of pages (read/write/valid/etc.)
  *
  * Add @bo into the requested vm (cayman+).
- * Add @bo to the list of bos associated with the vm and validate
- * the offset requested within the vm address space.
- * Returns 0 for success, error for failure.
+ * Add @bo to the list of bos associated with the vm
+ * Returns newly added bo_va or NULL for failure
+ *
+ * Object has to be reserved!
  */
-int radeon_vm_bo_add(struct radeon_device *rdev,
-		     struct radeon_vm *vm,
-		     struct radeon_bo *bo,
-		     uint64_t offset,
-		     uint32_t flags)
+struct radeon_bo_va *radeon_vm_bo_add(struct radeon_device *rdev,
+				      struct radeon_vm *vm,
+				      struct radeon_bo *bo)
 {
-	struct radeon_bo_va *bo_va, *tmp;
-	struct list_head *head;
-	uint64_t size = radeon_bo_size(bo), last_offset = 0;
-	unsigned last_pfn;
+	struct radeon_bo_va *bo_va;
 
 	bo_va = kzalloc(sizeof(struct radeon_bo_va), GFP_KERNEL);
 	if (bo_va == NULL) {
-		return -ENOMEM;
+		return NULL;
 	}
 	bo_va->vm = vm;
 	bo_va->bo = bo;
-	bo_va->soffset = offset;
-	bo_va->eoffset = offset + size;
-	bo_va->flags = flags;
+	bo_va->soffset = 0;
+	bo_va->eoffset = 0;
+	bo_va->flags = 0;
 	bo_va->valid = false;
+	bo_va->ref_count = 1;
 	INIT_LIST_HEAD(&bo_va->bo_list);
 	INIT_LIST_HEAD(&bo_va->vm_list);
-	/* make sure object fit at this offset */
-	if (bo_va->soffset >= bo_va->eoffset) {
-		kfree(bo_va);
-		return -EINVAL;
-	}
 
-	last_pfn = bo_va->eoffset / RADEON_GPU_PAGE_SIZE;
-	if (last_pfn > rdev->vm_manager.max_pfn) {
-		kfree(bo_va);
-		dev_err(rdev->dev, "va above limit (0x%08X > 0x%08X)\n",
-			last_pfn, rdev->vm_manager.max_pfn);
-		return -EINVAL;
+	mutex_lock(&vm->mutex);
+	list_add(&bo_va->vm_list, &vm->va);
+	list_add_tail(&bo_va->bo_list, &bo->va);
+	mutex_unlock(&vm->mutex);
+
+	return bo_va;
+}
+
+/**
+ * radeon_vm_bo_set_addr - set bos virtual address inside a vm
+ *
+ * @rdev: radeon_device pointer
+ * @bo_va: bo_va to store the address
+ * @soffset: requested offset of the buffer in the VM address space
+ * @flags: attributes of pages (read/write/valid/etc.)
+ *
+ * Set offset of @bo_va (cayman+).
+ * Validate and set the offset requested within the vm address space.
+ * Returns 0 for success, error for failure.
+ *
+ * Object has to be reserved!
+ */
+int radeon_vm_bo_set_addr(struct radeon_device *rdev,
+			  struct radeon_bo_va *bo_va,
+			  uint64_t soffset,
+			  uint32_t flags)
+{
+	uint64_t size = radeon_bo_size(bo_va->bo);
+	uint64_t eoffset, last_offset = 0;
+	struct radeon_vm *vm = bo_va->vm;
+	struct radeon_bo_va *tmp;
+	struct list_head *head;
+	unsigned last_pfn;
+
+	if (soffset) {
+		/* make sure object fit at this offset */
+		eoffset = soffset + size;
+		if (soffset >= eoffset) {
+			return -EINVAL;
+		}
+
+		last_pfn = eoffset / RADEON_GPU_PAGE_SIZE;
+		if (last_pfn > rdev->vm_manager.max_pfn) {
+			dev_err(rdev->dev, "va above limit (0x%08X > 0x%08X)\n",
+				last_pfn, rdev->vm_manager.max_pfn);
+			return -EINVAL;
+		}
+
+	} else {
+		eoffset = last_pfn = 0;
 	}
 
 	mutex_lock(&vm->mutex);
@@ -713,7 +802,7 @@ int radeon_vm_bo_add(struct radeon_device *rdev,
 		if (last_pfn > vm->last_pfn) {
 			/* grow va space 32M by 32M */
 			unsigned align = ((32 << 20) >> 12) - 1;
-			radeon_vm_unbind_locked(rdev, vm);
+			radeon_vm_free_pt(rdev, vm);
 			vm->last_pfn = (last_pfn + align) & ~align;
 		}
 		mutex_unlock(&rdev->vm_manager.lock);
@@ -721,68 +810,60 @@ int radeon_vm_bo_add(struct radeon_device *rdev,
 	head = &vm->va;
 	last_offset = 0;
 	list_for_each_entry(tmp, &vm->va, vm_list) {
-		if (bo_va->soffset >= last_offset && bo_va->eoffset < tmp->soffset) {
+		if (bo_va == tmp) {
+			/* skip over currently modified bo */
+			continue;
+		}
+
+		if (soffset >= last_offset && eoffset <= tmp->soffset) {
 			/* bo can be added before this one */
 			break;
 		}
-		if (bo_va->soffset >= tmp->soffset && bo_va->soffset < tmp->eoffset) {
+		if (eoffset > tmp->soffset && soffset < tmp->eoffset) {
 			/* bo and tmp overlap, invalid offset */
 			dev_err(rdev->dev, "bo %p va 0x%08X conflict with (bo %p 0x%08X 0x%08X)\n",
-				bo, (unsigned)bo_va->soffset, tmp->bo,
+				bo_va->bo, (unsigned)bo_va->soffset, tmp->bo,
 				(unsigned)tmp->soffset, (unsigned)tmp->eoffset);
-			kfree(bo_va);
 			mutex_unlock(&vm->mutex);
 			return -EINVAL;
 		}
 		last_offset = tmp->eoffset;
 		head = &tmp->vm_list;
 	}
-	list_add(&bo_va->vm_list, head);
-	list_add_tail(&bo_va->bo_list, &bo->va);
+
+	bo_va->soffset = soffset;
+	bo_va->eoffset = eoffset;
+	bo_va->flags = flags;
+	bo_va->valid = false;
+	list_move(&bo_va->vm_list, head);
+
 	mutex_unlock(&vm->mutex);
 	return 0;
 }
 
 /**
- * radeon_vm_get_addr - get the physical address of the page
+ * radeon_vm_map_gart - get the physical address of a gart page
  *
  * @rdev: radeon_device pointer
- * @mem: ttm mem
- * @pfn: pfn
+ * @addr: the unmapped addr
  *
  * Look up the physical address of the page that the pte resolves
  * to (cayman+).
  * Returns the physical address of the page.
  */
-static u64 radeon_vm_get_addr(struct radeon_device *rdev,
-			      struct ttm_mem_reg *mem,
-			      unsigned pfn)
+uint64_t radeon_vm_map_gart(struct radeon_device *rdev, uint64_t addr)
 {
-	u64 addr = 0;
-
-	switch (mem->mem_type) {
-	case TTM_PL_VRAM:
-		addr = (mem->start << PAGE_SHIFT);
-		addr += pfn * RADEON_GPU_PAGE_SIZE;
-		addr += rdev->vm_manager.vram_base_offset;
-		break;
-	case TTM_PL_TT:
-		/* offset inside page table */
-		addr = mem->start << PAGE_SHIFT;
-		addr += pfn * RADEON_GPU_PAGE_SIZE;
-		addr = addr >> PAGE_SHIFT;
-		/* page table offset */
-		addr = rdev->gart.pages_addr[addr];
-		/* in case cpu page size != gpu page size*/
-		addr += (pfn * RADEON_GPU_PAGE_SIZE) & (~PAGE_MASK);
-		break;
-	default:
-		break;
-	}
-	return addr;
+	uint64_t result;
+
+	/* page table offset */
+	result = rdev->gart.pages_addr[addr >> PAGE_SHIFT];
+
+	/* in case cpu page size != gpu page size*/
+	result |= addr & (~PAGE_MASK);
+
+	return result;
 }
 
-/* object have to be reserved & global and local mutex must be locked */
 /**
  * radeon_vm_bo_update_pte - map a bo into the vm page table
  *
@@ -793,103 +874,160 @@ static u64 radeon_vm_get_addr(struct radeon_device *rdev,
  *
  * Fill in the page table entries for @bo (cayman+).
  * Returns 0 for success, -EINVAL for failure.
+ *
+ * Object have to be reserved & global and local mutex must be locked!
  */
 int radeon_vm_bo_update_pte(struct radeon_device *rdev,
 			    struct radeon_vm *vm,
 			    struct radeon_bo *bo,
 			    struct ttm_mem_reg *mem)
 {
+	unsigned ridx = rdev->asic->vm.pt_ring_index;
+	struct radeon_ring *ring = &rdev->ring[ridx];
+	struct radeon_semaphore *sem = NULL;
 	struct radeon_bo_va *bo_va;
-	unsigned ngpu_pages, i;
-	uint64_t addr = 0, pfn;
-	uint32_t flags;
+	unsigned nptes, npdes, ndw;
+	uint64_t pe, addr;
+	uint64_t pfn;
+	int r;
 
 	/* nothing to do if vm isn't bound */
-	if (vm->id == -1)
+	if (vm->sa_bo == NULL)
 		return 0;
 
-	bo_va = radeon_bo_va(bo, vm);
+	bo_va = radeon_vm_bo_find(vm, bo);
 	if (bo_va == NULL) {
 		dev_err(rdev->dev, "bo %p not in vm %p\n", bo, vm);
 		return -EINVAL;
 	}
 
-	if (bo_va->valid && mem)
+	if (!bo_va->soffset) {
+		dev_err(rdev->dev, "bo %p don't has a mapping in vm %p\n",
+			bo, vm);
+		return -EINVAL;
+	}
+
+	if ((bo_va->valid && mem) || (!bo_va->valid && mem == NULL))
 		return 0;
 
-	ngpu_pages = radeon_bo_ngpu_pages(bo);
 	bo_va->flags &= ~RADEON_VM_PAGE_VALID;
 	bo_va->flags &= ~RADEON_VM_PAGE_SYSTEM;
 	if (mem) {
+		addr = mem->start << PAGE_SHIFT;
 		if (mem->mem_type != TTM_PL_SYSTEM) {
 			bo_va->flags |= RADEON_VM_PAGE_VALID;
 			bo_va->valid = true;
 		}
 		if (mem->mem_type == TTM_PL_TT) {
 			bo_va->flags |= RADEON_VM_PAGE_SYSTEM;
+		} else {
+			addr += rdev->vm_manager.vram_base_offset;
 		}
+	} else {
+		addr = 0;
+		bo_va->valid = false;
 	}
-	pfn = bo_va->soffset / RADEON_GPU_PAGE_SIZE;
-	flags = rdev->vm_manager.funcs->page_flags(rdev, bo_va->vm, bo_va->flags);
-	for (i = 0, addr = 0; i < ngpu_pages; i++) {
-		if (mem && bo_va->valid) {
-			addr = radeon_vm_get_addr(rdev, mem, i);
+
+	if (vm->fence && radeon_fence_signaled(vm->fence)) {
+		radeon_fence_unref(&vm->fence);
+	}
+
+	if (vm->fence && vm->fence->ring != ridx) {
+		r = radeon_semaphore_create(rdev, &sem);
+		if (r) {
+			return r;
 		}
-		rdev->vm_manager.funcs->set_page(rdev, bo_va->vm, i + pfn, addr, flags);
 	}
-	rdev->vm_manager.funcs->tlb_flush(rdev, bo_va->vm);
+
+	/* estimate number of dw needed */
+	/* reserve space for 32-bit padding */
+	ndw = 32;
+
+	nptes = radeon_bo_ngpu_pages(bo);
+
+	pfn = (bo_va->soffset / RADEON_GPU_PAGE_SIZE);
+
+	/* handle cases where a bo spans several pdes  */
+	npdes = (ALIGN(pfn + nptes, RADEON_VM_PTE_COUNT) -
+		 (pfn & ~(RADEON_VM_PTE_COUNT - 1))) >> RADEON_VM_BLOCK_SIZE;
+
+	/* reserve space for one header for every 2k dwords */
+	ndw += (nptes >> 11) * 3;
+	/* reserve space for pte addresses */
+	ndw += nptes * 2;
+
+	/* reserve space for one header for every 2k dwords */
+	ndw += (npdes >> 11) * 3;
+	/* reserve space for pde addresses */
+	ndw += npdes * 2;
+
+	r = radeon_ring_lock(rdev, ring, ndw);
+	if (r) {
+		return r;
+	}
+
+	if (sem && radeon_fence_need_sync(vm->fence, ridx)) {
+		radeon_semaphore_sync_rings(rdev, sem, vm->fence->ring, ridx);
+		radeon_fence_note_sync(vm->fence, ridx);
+	}
+
+	/* update page table entries */
+	pe = vm->pd_gpu_addr;
+	pe += radeon_vm_directory_size(rdev);
+	pe += (bo_va->soffset / RADEON_GPU_PAGE_SIZE) * 8;
+
+	radeon_asic_vm_set_page(rdev, pe, addr, nptes,
+				RADEON_GPU_PAGE_SIZE, bo_va->flags);
+
+	/* update page directory entries */
+	addr = pe;
+
+	pe = vm->pd_gpu_addr;
+	pe += ((bo_va->soffset / RADEON_GPU_PAGE_SIZE) >> RADEON_VM_BLOCK_SIZE) * 8;
+
+	radeon_asic_vm_set_page(rdev, pe, addr, npdes,
+				RADEON_VM_PTE_COUNT * 8, RADEON_VM_PAGE_VALID);
+
+	radeon_fence_unref(&vm->fence);
+	r = radeon_fence_emit(rdev, &vm->fence, ridx);
+	if (r) {
+		radeon_ring_unlock_undo(rdev, ring);
+		return r;
+	}
+	radeon_ring_unlock_commit(rdev, ring);
+	radeon_semaphore_free(rdev, &sem, vm->fence);
+	radeon_fence_unref(&vm->last_flush);
 	return 0;
 }
 
-/* object have to be reserved */
 /**
  * radeon_vm_bo_rmv - remove a bo to a specific vm
  *
  * @rdev: radeon_device pointer
- * @vm: requested vm
- * @bo: radeon buffer object
+ * @bo_va: requested bo_va
  *
- * Remove @bo from the requested vm (cayman+).
- * Remove @bo from the list of bos associated with the vm and
- * remove the ptes for @bo in the page table.
+ * Remove @bo_va->bo from the requested vm (cayman+).
+ * Remove @bo_va->bo from the list of bos associated with the bo_va->vm and
+ * remove the ptes for @bo_va in the page table.
  * Returns 0 for success.
+ *
+ * Object have to be reserved!
  */
 int radeon_vm_bo_rmv(struct radeon_device *rdev,
-		     struct radeon_vm *vm,
-		     struct radeon_bo *bo)
+		     struct radeon_bo_va *bo_va)
 {
-	struct radeon_bo_va *bo_va;
 	int r;
 
-	bo_va = radeon_bo_va(bo, vm);
-	if (bo_va == NULL)
-		return 0;
-
-	/* wait for va use to end */
-	while (bo_va->fence) {
-		r = radeon_fence_wait(bo_va->fence, false);
-		if (r) {
-			DRM_ERROR("error while waiting for fence: %d\n", r);
-		}
-		if (r == -EDEADLK) {
-			r = radeon_gpu_reset(rdev);
-			if (!r)
-				continue;
-		}
-		break;
-	}
-	radeon_fence_unref(&bo_va->fence);
-
 	mutex_lock(&rdev->vm_manager.lock);
-	mutex_lock(&vm->mutex);
-	radeon_vm_bo_update_pte(rdev, vm, bo, NULL);
+	mutex_lock(&bo_va->vm->mutex);
+	r = radeon_vm_bo_update_pte(rdev, bo_va->vm, bo_va->bo, NULL);
 	mutex_unlock(&rdev->vm_manager.lock);
 	list_del(&bo_va->vm_list);
-	mutex_unlock(&vm->mutex);
+	mutex_unlock(&bo_va->vm->mutex);
 	list_del(&bo_va->bo_list);
 
 	kfree(bo_va);
-	return 0;
+	return r;
 }
 
 /**
@@ -925,27 +1063,23 @@ void radeon_vm_bo_invalidate(struct radeon_device *rdev,
  */
 int radeon_vm_init(struct radeon_device *rdev, struct radeon_vm *vm)
 {
+	struct radeon_bo_va *bo_va;
 	int r;
 
-	vm->id = -1;
+	vm->id = 0;
 	vm->fence = NULL;
+	vm->last_pfn = 0;
 	mutex_init(&vm->mutex);
 	INIT_LIST_HEAD(&vm->list);
 	INIT_LIST_HEAD(&vm->va);
-	/* SI requires equal sized PTs for all VMs, so always set
-	 * last_pfn to max_pfn.  cayman allows variable sized
-	 * pts so we can grow then as needed.  Once we switch
-	 * to two level pts we can unify this again.
-	 */
-	if (rdev->family >= CHIP_TAHITI)
-		vm->last_pfn = rdev->vm_manager.max_pfn;
-	else
-		vm->last_pfn = 0;
+
 	/* map the ib pool buffer at 0 in virtual address space, set
 	 * read only
 	 */
-	r = radeon_vm_bo_add(rdev, vm, rdev->ring_tmp_bo.bo, 0,
-			     RADEON_VM_PAGE_READABLE | RADEON_VM_PAGE_SNOOPED);
+	bo_va = radeon_vm_bo_add(rdev, vm, rdev->ring_tmp_bo.bo);
+	r = radeon_vm_bo_set_addr(rdev, bo_va, RADEON_VA_IB_OFFSET,
+				  RADEON_VM_PAGE_READABLE |
+				  RADEON_VM_PAGE_SNOOPED);
 	return r;
 }
 
@@ -965,7 +1099,7 @@ void radeon_vm_fini(struct radeon_device *rdev, struct radeon_vm *vm)
 
 	mutex_lock(&rdev->vm_manager.lock);
 	mutex_lock(&vm->mutex);
-	radeon_vm_unbind_locked(rdev, vm);
+	radeon_vm_free_pt(rdev, vm);
 	mutex_unlock(&rdev->vm_manager.lock);
 
 	/* remove all bo at this point non are busy any more because unbind
@@ -973,10 +1107,9 @@ void radeon_vm_fini(struct radeon_device *rdev, struct radeon_vm *vm)
 	 */
 	r = radeon_bo_reserve(rdev->ring_tmp_bo.bo, false);
 	if (!r) {
-		bo_va = radeon_bo_va(rdev->ring_tmp_bo.bo, vm);
+		bo_va = radeon_vm_bo_find(vm, rdev->ring_tmp_bo.bo);
 		list_del_init(&bo_va->bo_list);
 		list_del_init(&bo_va->vm_list);
-		radeon_fence_unref(&bo_va->fence);
 		radeon_bo_unreserve(rdev->ring_tmp_bo.bo);
 		kfree(bo_va);
 	}
@@ -988,10 +1121,11 @@ void radeon_vm_fini(struct radeon_device *rdev, struct radeon_vm *vm)
 		r = radeon_bo_reserve(bo_va->bo, false);
 		if (!r) {
 			list_del_init(&bo_va->bo_list);
-			radeon_fence_unref(&bo_va->fence);
 			radeon_bo_unreserve(bo_va->bo);
 			kfree(bo_va);
 		}
 	}
+	radeon_fence_unref(&vm->fence);
+	radeon_fence_unref(&vm->last_flush);
 	mutex_unlock(&vm->mutex);
 }
diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c
index 04c212da6f65..f38fbcc46935 100644
--- a/drivers/gpu/drm/radeon/radeon_gem.c
+++ b/drivers/gpu/drm/radeon/radeon_gem.c
@@ -123,6 +123,30 @@ void radeon_gem_fini(struct radeon_device *rdev)
  */
 int radeon_gem_object_open(struct drm_gem_object *obj, struct drm_file *file_priv)
 {
+	struct radeon_bo *rbo = gem_to_radeon_bo(obj);
+	struct radeon_device *rdev = rbo->rdev;
+	struct radeon_fpriv *fpriv = file_priv->driver_priv;
+	struct radeon_vm *vm = &fpriv->vm;
+	struct radeon_bo_va *bo_va;
+	int r;
+
+	if (rdev->family < CHIP_CAYMAN) {
+		return 0;
+	}
+
+	r = radeon_bo_reserve(rbo, false);
+	if (r) {
+		return r;
+	}
+
+	bo_va = radeon_vm_bo_find(vm, rbo);
+	if (!bo_va) {
+		bo_va = radeon_vm_bo_add(rdev, vm, rbo);
+	} else {
+		++bo_va->ref_count;
+	}
+	radeon_bo_unreserve(rbo);
+
 	return 0;
 }
 
@@ -133,16 +157,25 @@ void radeon_gem_object_close(struct drm_gem_object *obj,
 	struct radeon_device *rdev = rbo->rdev;
 	struct radeon_fpriv *fpriv = file_priv->driver_priv;
 	struct radeon_vm *vm = &fpriv->vm;
+	struct radeon_bo_va *bo_va;
+	int r;
 
 	if (rdev->family < CHIP_CAYMAN) {
 		return;
 	}
 
-	if (radeon_bo_reserve(rbo, false)) {
-		dev_err(rdev->dev, "leaking bo va because we fail to reserve bo\n");
+	r = radeon_bo_reserve(rbo, true);
+	if (r) {
+		dev_err(rdev->dev, "leaking bo va because "
+			"we fail to reserve bo (%d)\n", r);
 		return;
 	}
-	radeon_vm_bo_rmv(rdev, vm, rbo);
+	bo_va = radeon_vm_bo_find(vm, rbo);
+	if (bo_va) {
+		if (--bo_va->ref_count == 0) {
+			radeon_vm_bo_rmv(rdev, bo_va);
+		}
+	}
 	radeon_bo_unreserve(rbo);
 }
 
@@ -458,19 +491,24 @@ int radeon_gem_va_ioctl(struct drm_device *dev, void *data,
 		drm_gem_object_unreference_unlocked(gobj);
 		return r;
 	}
+	bo_va = radeon_vm_bo_find(&fpriv->vm, rbo);
+	if (!bo_va) {
+		args->operation = RADEON_VA_RESULT_ERROR;
+		drm_gem_object_unreference_unlocked(gobj);
+		return -ENOENT;
+	}
+
 	switch (args->operation) {
 	case RADEON_VA_MAP:
-		bo_va = radeon_bo_va(rbo, &fpriv->vm);
-		if (bo_va) {
+		if (bo_va->soffset) {
 			args->operation = RADEON_VA_RESULT_VA_EXIST;
 			args->offset = bo_va->soffset;
 			goto out;
 		}
-		r = radeon_vm_bo_add(rdev, &fpriv->vm, rbo,
-				     args->offset, args->flags);
+		r = radeon_vm_bo_set_addr(rdev, bo_va, args->offset, args->flags);
 		break;
 	case RADEON_VA_UNMAP:
-		r = radeon_vm_bo_rmv(rdev, &fpriv->vm, rbo);
+		r = radeon_vm_bo_set_addr(rdev, bo_va, 0, 0);
 		break;
 	default:
 		break;
diff --git a/drivers/gpu/drm/radeon/radeon_ioc32.c b/drivers/gpu/drm/radeon/radeon_ioc32.c
index 8fc81a26438a..c180df8e84db 100644
--- a/drivers/gpu/drm/radeon/radeon_ioc32.c
+++ b/drivers/gpu/drm/radeon/radeon_ioc32.c
@@ -368,7 +368,7 @@ static int compat_radeon_cp_setparam(struct file *file, unsigned int cmd,
 #define compat_radeon_cp_setparam NULL
 #endif /* X86_64 || IA64 */
 
-drm_ioctl_compat_t *radeon_compat_ioctls[] = {
+static drm_ioctl_compat_t *radeon_compat_ioctls[] = {
 	[DRM_RADEON_CP_INIT] = compat_radeon_cp_init,
 	[DRM_RADEON_CLEAR] = compat_radeon_cp_clear,
 	[DRM_RADEON_STIPPLE] = compat_radeon_cp_stipple,
diff --git a/drivers/gpu/drm/radeon/radeon_irq_kms.c b/drivers/gpu/drm/radeon/radeon_irq_kms.c
index 9201992cee12..90374dd77960 100644
--- a/drivers/gpu/drm/radeon/radeon_irq_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_irq_kms.c
@@ -99,7 +99,6 @@ void radeon_driver_irq_preinstall_kms(struct drm_device *dev)
 	/* Disable *all* interrupts */
 	for (i = 0; i < RADEON_NUM_RINGS; i++)
 		atomic_set(&rdev->irq.ring_int[i], 0);
-	rdev->irq.gui_idle = false;
 	for (i = 0; i < RADEON_MAX_HPD_PINS; i++)
 		rdev->irq.hpd[i] = false;
 	for (i = 0; i < RADEON_MAX_CRTCS; i++) {
@@ -147,7 +146,6 @@ void radeon_driver_irq_uninstall_kms(struct drm_device *dev)
 	/* Disable *all* interrupts */
 	for (i = 0; i < RADEON_NUM_RINGS; i++)
 		atomic_set(&rdev->irq.ring_int[i], 0);
-	rdev->irq.gui_idle = false;
 	for (i = 0; i < RADEON_MAX_HPD_PINS; i++)
 		rdev->irq.hpd[i] = false;
 	for (i = 0; i < RADEON_MAX_CRTCS; i++) {
@@ -204,6 +202,16 @@ static bool radeon_msi_ok(struct radeon_device *rdev)
 	    (rdev->pdev->subsystem_device == 0x01fd))
 		return true;
 
+	/* Gateway RS690 only seems to work with MSIs. */
+	if ((rdev->pdev->device == 0x791f) &&
+	    (rdev->pdev->subsystem_vendor == 0x107b) &&
+	    (rdev->pdev->subsystem_device == 0x0185))
+		return true;
+
+	/* try and enable MSIs by default on all RS690s */
+	if (rdev->family == CHIP_RS690)
+		return true;
+
 	/* RV515 seems to have MSI issues where it loses
 	 * MSI rearms occasionally. This leads to lockups and freezes.
 	 * disable it by default.
@@ -457,34 +465,3 @@ void radeon_irq_kms_disable_hpd(struct radeon_device *rdev, unsigned hpd_mask)
 	spin_unlock_irqrestore(&rdev->irq.lock, irqflags);
 }
 
-/**
- * radeon_irq_kms_wait_gui_idle - waits for drawing engine to be idle
- *
- * @rdev: radeon device pointer
- *
- * Enabled the GUI idle interrupt and waits for it to fire (r6xx+).
- * This is currently used to make sure the 3D engine is idle for power
- * management, but should be replaces with proper fence waits.
- * GUI idle interrupts don't work very well on pre-r6xx hw and it also
- * does not take into account other aspects of the chip that may be busy.
- * DO NOT USE GOING FORWARD.
- */
-int radeon_irq_kms_wait_gui_idle(struct radeon_device *rdev)
-{
-	unsigned long irqflags;
-	int r;
-
-	spin_lock_irqsave(&rdev->irq.lock, irqflags);
-	rdev->irq.gui_idle = true;
-	radeon_irq_set(rdev);
-	spin_unlock_irqrestore(&rdev->irq.lock, irqflags);
-
-	r = wait_event_timeout(rdev->irq.idle_queue, radeon_gui_idle(rdev),
-			       msecs_to_jiffies(RADEON_WAIT_IDLE_TIMEOUT));
-
-	spin_lock_irqsave(&rdev->irq.lock, irqflags);
-	rdev->irq.gui_idle = false;
-	radeon_irq_set(rdev);
-	spin_unlock_irqrestore(&rdev->irq.lock, irqflags);
-	return r;
-}
diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c
index 8a7f87f17c13..83b8d8aa71c0 100644
--- a/drivers/gpu/drm/radeon/radeon_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_kms.c
@@ -50,6 +50,7 @@ int radeon_driver_unload_kms(struct drm_device *dev)
 
 	if (rdev == NULL)
 		return 0;
+	radeon_acpi_fini(rdev);
 	radeon_modeset_fini(rdev);
 	radeon_device_fini(rdev);
 	kfree(rdev);
@@ -102,11 +103,6 @@ int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags)
 		goto out;
 	}
 
-	/* Call ACPI methods */
-	acpi_status = radeon_acpi_init(rdev);
-	if (acpi_status)
-		dev_dbg(&dev->pdev->dev, "Error during ACPI methods call\n");
-
 	/* Again modeset_init should fail only on fatal error
 	 * otherwise it should provide enough functionalities
 	 * for shadowfb to run
@@ -114,6 +110,17 @@ int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags)
 	r = radeon_modeset_init(rdev);
 	if (r)
 		dev_err(&dev->pdev->dev, "Fatal error during modeset init\n");
+
+	/* Call ACPI methods: require modeset init
+	 * but failure is not fatal
+	 */
+	if (!r) {
+		acpi_status = radeon_acpi_init(rdev);
+		if (acpi_status)
+		dev_dbg(&dev->pdev->dev,
+				"Error during ACPI methods call\n");
+	}
+
 out:
 	if (r)
 		radeon_driver_unload_kms(dev);
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
index 94b4a1c12893..5677a424b585 100644
--- a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
+++ b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
@@ -206,11 +206,6 @@ static void radeon_legacy_rmx_mode_set(struct drm_crtc *crtc,
 	WREG32(RADEON_FP_CRTC_V_TOTAL_DISP, fp_crtc_v_total_disp);
 }
 
-void radeon_restore_common_regs(struct drm_device *dev)
-{
-	/* don't need this yet */
-}
-
 static void radeon_pll_wait_for_read_update_complete(struct drm_device *dev)
 {
 	struct radeon_device *rdev = dev->dev_private;
@@ -295,7 +290,7 @@ static uint8_t radeon_compute_pll_gain(uint16_t ref_freq, uint16_t ref_div,
 		return 1;
 }
 
-void radeon_crtc_dpms(struct drm_crtc *crtc, int mode)
+static void radeon_crtc_dpms(struct drm_crtc *crtc, int mode)
 {
 	struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
 	struct drm_device *dev = crtc->dev;
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
index 757b08f07195..92487e614778 100644
--- a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
+++ b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
@@ -271,13 +271,6 @@ static const struct drm_encoder_helper_funcs radeon_legacy_lvds_helper_funcs = {
 
 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) || defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
 
-#define MAX_RADEON_LEVEL 0xFF
-
-struct radeon_backlight_privdata {
-	struct radeon_encoder *encoder;
-	uint8_t negative;
-};
-
 static uint8_t radeon_legacy_lvds_level(struct backlight_device *bd)
 {
 	struct radeon_backlight_privdata *pdata = bl_get_data(bd);
@@ -286,21 +279,33 @@ static uint8_t radeon_legacy_lvds_level(struct backlight_device *bd)
 	/* Convert brightness to hardware level */
 	if (bd->props.brightness < 0)
 		level = 0;
-	else if (bd->props.brightness > MAX_RADEON_LEVEL)
-		level = MAX_RADEON_LEVEL;
+	else if (bd->props.brightness > RADEON_MAX_BL_LEVEL)
+		level = RADEON_MAX_BL_LEVEL;
 	else
 		level = bd->props.brightness;
 
 	if (pdata->negative)
-		level = MAX_RADEON_LEVEL - level;
+		level = RADEON_MAX_BL_LEVEL - level;
 
 	return level;
 }
 
-static int radeon_legacy_backlight_update_status(struct backlight_device *bd)
+u8
+radeon_legacy_get_backlight_level(struct radeon_encoder *radeon_encoder)
+{
+	struct drm_device *dev = radeon_encoder->base.dev;
+	struct radeon_device *rdev = dev->dev_private;
+	u8 backlight_level;
+
+	backlight_level = (RREG32(RADEON_LVDS_GEN_CNTL) >>
+			   RADEON_LVDS_BL_MOD_LEVEL_SHIFT) & 0xff;
+
+	return backlight_level;
+}
+
+void
+radeon_legacy_set_backlight_level(struct radeon_encoder *radeon_encoder, u8 level)
 {
-	struct radeon_backlight_privdata *pdata = bl_get_data(bd);
-	struct radeon_encoder *radeon_encoder = pdata->encoder;
 	struct drm_device *dev = radeon_encoder->base.dev;
 	struct radeon_device *rdev = dev->dev_private;
 	int dpms_mode = DRM_MODE_DPMS_ON;
@@ -308,19 +313,31 @@ static int radeon_legacy_backlight_update_status(struct backlight_device *bd)
 	if (radeon_encoder->enc_priv) {
 		if (rdev->is_atom_bios) {
 			struct radeon_encoder_atom_dig *lvds = radeon_encoder->enc_priv;
-			dpms_mode = lvds->dpms_mode;
-			lvds->backlight_level = radeon_legacy_lvds_level(bd);
+			if (lvds->backlight_level > 0)
+				dpms_mode = lvds->dpms_mode;
+			else
+				dpms_mode = DRM_MODE_DPMS_OFF;
+			lvds->backlight_level = level;
 		} else {
 			struct radeon_encoder_lvds *lvds = radeon_encoder->enc_priv;
-			dpms_mode = lvds->dpms_mode;
-			lvds->backlight_level = radeon_legacy_lvds_level(bd);
+			if (lvds->backlight_level > 0)
+				dpms_mode = lvds->dpms_mode;
+			else
+				dpms_mode = DRM_MODE_DPMS_OFF;
+			lvds->backlight_level = level;
 		}
 	}
 
-	if (bd->props.brightness > 0)
-		radeon_legacy_lvds_update(&radeon_encoder->base, dpms_mode);
-	else
-		radeon_legacy_lvds_update(&radeon_encoder->base, DRM_MODE_DPMS_OFF);
+	radeon_legacy_lvds_update(&radeon_encoder->base, dpms_mode);
+}
+
+static int radeon_legacy_backlight_update_status(struct backlight_device *bd)
+{
+	struct radeon_backlight_privdata *pdata = bl_get_data(bd);
+	struct radeon_encoder *radeon_encoder = pdata->encoder;
+
+	radeon_legacy_set_backlight_level(radeon_encoder,
+					  radeon_legacy_lvds_level(bd));
 
 	return 0;
 }
@@ -336,7 +353,7 @@ static int radeon_legacy_backlight_get_brightness(struct backlight_device *bd)
 	backlight_level = (RREG32(RADEON_LVDS_GEN_CNTL) >>
 			   RADEON_LVDS_BL_MOD_LEVEL_SHIFT) & 0xff;
 
-	return pdata->negative ? MAX_RADEON_LEVEL - backlight_level : backlight_level;
+	return pdata->negative ? RADEON_MAX_BL_LEVEL - backlight_level : backlight_level;
 }
 
 static const struct backlight_ops radeon_backlight_ops = {
@@ -370,7 +387,7 @@ void radeon_legacy_backlight_init(struct radeon_encoder *radeon_encoder,
 	}
 
 	memset(&props, 0, sizeof(props));
-	props.max_brightness = MAX_RADEON_LEVEL;
+	props.max_brightness = RADEON_MAX_BL_LEVEL;
 	props.type = BACKLIGHT_RAW;
 	bd = backlight_device_register("radeon_bl", &drm_connector->kdev,
 				       pdata, &radeon_backlight_ops, &props);
@@ -449,7 +466,7 @@ static void radeon_legacy_backlight_exit(struct radeon_encoder *radeon_encoder)
 	}
 
 	if (bd) {
-		struct radeon_legacy_backlight_privdata *pdata;
+		struct radeon_backlight_privdata *pdata;
 
 		pdata = bl_get_data(bd);
 		backlight_device_unregister(bd);
diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h
index 0c28ca3964b1..92c5f473cf08 100644
--- a/drivers/gpu/drm/radeon/radeon_mode.h
+++ b/drivers/gpu/drm/radeon/radeon_mode.h
@@ -251,8 +251,23 @@ struct radeon_mode_info {
 
 	/* pointer to fbdev info structure */
 	struct radeon_fbdev *rfbdev;
+	/* firmware flags */
+	u16 firmware_flags;
+	/* pointer to backlight encoder */
+	struct radeon_encoder *bl_encoder;
 };
 
+#define RADEON_MAX_BL_LEVEL 0xFF
+
+#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) || defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
+
+struct radeon_backlight_privdata {
+	struct radeon_encoder *encoder;
+	uint8_t negative;
+};
+
+#endif
+
 #define MAX_H_CODE_TIMING_LEN 32
 #define MAX_V_CODE_TIMING_LEN 32
 
@@ -268,6 +283,18 @@ struct radeon_tv_regs {
 	uint16_t v_code_timing[MAX_V_CODE_TIMING_LEN];
 };
 
+struct radeon_atom_ss {
+	uint16_t percentage;
+	uint8_t type;
+	uint16_t step;
+	uint8_t delay;
+	uint8_t range;
+	uint8_t refdiv;
+	/* asic_ss */
+	uint16_t rate;
+	uint16_t amount;
+};
+
 struct radeon_crtc {
 	struct drm_crtc base;
 	int crtc_id;
@@ -292,6 +319,16 @@ struct radeon_crtc {
 	/* page flipping */
 	struct radeon_unpin_work *unpin_work;
 	int deferred_flip_completion;
+	/* pll sharing */
+	struct radeon_atom_ss ss;
+	bool ss_enabled;
+	u32 adjusted_clock;
+	int bpc;
+	u32 pll_reference_div;
+	u32 pll_post_div;
+	u32 pll_flags;
+	struct drm_encoder *encoder;
+	struct drm_connector *connector;
 };
 
 struct radeon_encoder_primary_dac {
@@ -345,18 +382,6 @@ struct radeon_encoder_ext_tmds {
 };
 
 /* spread spectrum */
-struct radeon_atom_ss {
-	uint16_t percentage;
-	uint8_t type;
-	uint16_t step;
-	uint8_t delay;
-	uint8_t range;
-	uint8_t refdiv;
-	/* asic_ss */
-	uint16_t rate;
-	uint16_t amount;
-};
-
 struct radeon_encoder_atom_dig {
 	bool linkb;
 	/* atom dig */
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
index 56ed724b398d..8b27dd6e3144 100644
--- a/drivers/gpu/drm/radeon/radeon_object.c
+++ b/drivers/gpu/drm/radeon/radeon_object.c
@@ -52,7 +52,7 @@ void radeon_bo_clear_va(struct radeon_bo *bo)
 
 	list_for_each_entry_safe(bo_va, tmp, &bo->va, bo_list) {
 		/* remove from all vm address space */
-		radeon_vm_bo_rmv(bo->rdev, bo_va->vm, bo);
+		radeon_vm_bo_rmv(bo->rdev, bo_va);
 	}
 }
 
@@ -627,18 +627,17 @@ int radeon_bo_wait(struct radeon_bo *bo, u32 *mem_type, bool no_wait)
 /**
  * radeon_bo_reserve - reserve bo
  * @bo:		bo structure
- * @no_wait:		don't sleep while trying to reserve (return -EBUSY)
+ * @no_intr:	don't return -ERESTARTSYS on pending signal
  *
  * Returns:
- * -EBUSY: buffer is busy and @no_wait is true
  * -ERESTARTSYS: A wait for the buffer to become unreserved was interrupted by
  * a signal. Release all buffer reservations and return to user-space.
  */
-int radeon_bo_reserve(struct radeon_bo *bo, bool no_wait)
+int radeon_bo_reserve(struct radeon_bo *bo, bool no_intr)
 {
 	int r;
 
-	r = ttm_bo_reserve(&bo->tbo, true, no_wait, false, 0);
+	r = ttm_bo_reserve(&bo->tbo, !no_intr, false, false, 0);
 	if (unlikely(r != 0)) {
 		if (r != -ERESTARTSYS)
 			dev_err(bo->rdev->dev, "%p reserve failed\n", bo);
@@ -646,16 +645,3 @@ int radeon_bo_reserve(struct radeon_bo *bo, bool no_wait)
 	}
 	return 0;
 }
-
-/* object have to be reserved */
-struct radeon_bo_va *radeon_bo_va(struct radeon_bo *rbo, struct radeon_vm *vm)
-{
-	struct radeon_bo_va *bo_va;
-
-	list_for_each_entry(bo_va, &rbo->va, bo_list) {
-		if (bo_va->vm == vm) {
-			return bo_va;
-		}
-	}
-	return NULL;
-}
diff --git a/drivers/gpu/drm/radeon/radeon_object.h b/drivers/gpu/drm/radeon/radeon_object.h
index 17fb99f177cf..93cd491fff2e 100644
--- a/drivers/gpu/drm/radeon/radeon_object.h
+++ b/drivers/gpu/drm/radeon/radeon_object.h
@@ -52,7 +52,7 @@ static inline unsigned radeon_mem_type_to_domain(u32 mem_type)
 	return 0;
 }
 
-int radeon_bo_reserve(struct radeon_bo *bo, bool no_wait);
+int radeon_bo_reserve(struct radeon_bo *bo, bool no_intr);
 
 static inline void radeon_bo_unreserve(struct radeon_bo *bo)
 {
@@ -141,8 +141,6 @@ extern void radeon_bo_move_notify(struct ttm_buffer_object *bo,
 					struct ttm_mem_reg *mem);
 extern int radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo);
 extern int radeon_bo_get_surface_reg(struct radeon_bo *bo);
-extern struct radeon_bo_va *radeon_bo_va(struct radeon_bo *rbo,
-					 struct radeon_vm *vm);
 
 /*
  * sub allocation
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
index 3ef0319981d3..aa14dbb7e4fb 100644
--- a/drivers/gpu/drm/radeon/radeon_pm.c
+++ b/drivers/gpu/drm/radeon/radeon_pm.c
@@ -24,9 +24,6 @@
 #include "radeon.h"
 #include "avivod.h"
 #include "atom.h"
-#ifdef CONFIG_ACPI
-#include <linux/acpi.h>
-#endif
 #include <linux/power_supply.h>
 #include <linux/hwmon.h>
 #include <linux/hwmon-sysfs.h>
@@ -36,7 +33,7 @@
 #define RADEON_WAIT_VBLANK_TIMEOUT 200
 
 static const char *radeon_pm_state_type_name[5] = {
-	"Default",
+	"",
 	"Powersave",
 	"Battery",
 	"Balanced",
@@ -50,8 +47,6 @@ static bool radeon_pm_debug_check_in_vbl(struct radeon_device *rdev, bool finish
 static void radeon_pm_update_profile(struct radeon_device *rdev);
 static void radeon_pm_set_clocks(struct radeon_device *rdev);
 
-#define ACPI_AC_CLASS           "ac_adapter"
-
 int radeon_pm_get_type_index(struct radeon_device *rdev,
 			     enum radeon_pm_state_type ps_type,
 			     int instance)
@@ -70,33 +65,17 @@ int radeon_pm_get_type_index(struct radeon_device *rdev,
 	return rdev->pm.default_power_state_index;
 }
 
-#ifdef CONFIG_ACPI
-static int radeon_acpi_event(struct notifier_block *nb,
-			     unsigned long val,
-			     void *data)
+void radeon_pm_acpi_event_handler(struct radeon_device *rdev)
 {
-	struct radeon_device *rdev = container_of(nb, struct radeon_device, acpi_nb);
-	struct acpi_bus_event *entry = (struct acpi_bus_event *)data;
-
-	if (strcmp(entry->device_class, ACPI_AC_CLASS) == 0) {
-		if (power_supply_is_system_supplied() > 0)
-			DRM_DEBUG_DRIVER("pm: AC\n");
-		else
-			DRM_DEBUG_DRIVER("pm: DC\n");
-
-		if (rdev->pm.pm_method == PM_METHOD_PROFILE) {
-			if (rdev->pm.profile == PM_PROFILE_AUTO) {
-				mutex_lock(&rdev->pm.mutex);
-				radeon_pm_update_profile(rdev);
-				radeon_pm_set_clocks(rdev);
-				mutex_unlock(&rdev->pm.mutex);
-			}
+	if (rdev->pm.pm_method == PM_METHOD_PROFILE) {
+		if (rdev->pm.profile == PM_PROFILE_AUTO) {
+			mutex_lock(&rdev->pm.mutex);
+			radeon_pm_update_profile(rdev);
+			radeon_pm_set_clocks(rdev);
+			mutex_unlock(&rdev->pm.mutex);
 		}
 	}
-
-	return NOTIFY_OK;
 }
-#endif
 
 static void radeon_pm_update_profile(struct radeon_device *rdev)
 {
@@ -188,8 +167,21 @@ static void radeon_set_power_state(struct radeon_device *rdev)
 		if (sclk > rdev->pm.default_sclk)
 			sclk = rdev->pm.default_sclk;
 
-		mclk = rdev->pm.power_state[rdev->pm.requested_power_state_index].
-			clock_info[rdev->pm.requested_clock_mode_index].mclk;
+		/* starting with BTC, there is one state that is used for both
+		 * MH and SH.  Difference is that we always use the high clock index for
+		 * mclk.
+		 */
+		if ((rdev->pm.pm_method == PM_METHOD_PROFILE) &&
+		    (rdev->family >= CHIP_BARTS) &&
+		    rdev->pm.active_crtc_count &&
+		    ((rdev->pm.profile_index == PM_PROFILE_MID_MH_IDX) ||
+		     (rdev->pm.profile_index == PM_PROFILE_LOW_MH_IDX)))
+			mclk = rdev->pm.power_state[rdev->pm.requested_power_state_index].
+				clock_info[rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx].mclk;
+		else
+			mclk = rdev->pm.power_state[rdev->pm.requested_power_state_index].
+				clock_info[rdev->pm.requested_clock_mode_index].mclk;
+
 		if (mclk > rdev->pm.default_mclk)
 			mclk = rdev->pm.default_mclk;
 
@@ -253,18 +245,13 @@ static void radeon_pm_set_clocks(struct radeon_device *rdev)
 	down_write(&rdev->pm.mclk_lock);
 	mutex_lock(&rdev->ring_lock);
 
-	/* gui idle int has issues on older chips it seems */
-	if (rdev->family >= CHIP_R600) {
-		if (rdev->irq.installed) {
-			/* wait for GPU to become idle */
-			radeon_irq_kms_wait_gui_idle(rdev);
-		}
-	} else {
-		struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
-		if (ring->ready) {
-			radeon_fence_wait_empty_locked(rdev, RADEON_RING_TYPE_GFX_INDEX);
-		}
+	/* wait for the rings to drain */
+	for (i = 0; i < RADEON_NUM_RINGS; i++) {
+		struct radeon_ring *ring = &rdev->ring[i];
+		if (ring->ready)
+			radeon_fence_wait_empty_locked(rdev, i);
 	}
+
 	radeon_unmap_vram_bos(rdev);
 
 	if (rdev->irq.installed) {
@@ -320,17 +307,15 @@ static void radeon_pm_print_states(struct radeon_device *rdev)
 		for (j = 0; j < power_state->num_clock_modes; j++) {
 			clock_info = &(power_state->clock_info[j]);
 			if (rdev->flags & RADEON_IS_IGP)
-				DRM_DEBUG_DRIVER("\t\t%d e: %d%s\n",
-					j,
-					clock_info->sclk * 10,
-					clock_info->flags & RADEON_PM_MODE_NO_DISPLAY ? "\tNo display only" : "");
+				DRM_DEBUG_DRIVER("\t\t%d e: %d\n",
+						 j,
+						 clock_info->sclk * 10);
 			else
-				DRM_DEBUG_DRIVER("\t\t%d e: %d\tm: %d\tv: %d%s\n",
-					j,
-					clock_info->sclk * 10,
-					clock_info->mclk * 10,
-					clock_info->voltage.voltage,
-					clock_info->flags & RADEON_PM_MODE_NO_DISPLAY ? "\tNo display only" : "");
+				DRM_DEBUG_DRIVER("\t\t%d e: %d\tm: %d\tv: %d\n",
+						 j,
+						 clock_info->sclk * 10,
+						 clock_info->mclk * 10,
+						 clock_info->voltage.voltage);
 		}
 	}
 }
@@ -547,7 +532,9 @@ void radeon_pm_suspend(struct radeon_device *rdev)
 void radeon_pm_resume(struct radeon_device *rdev)
 {
 	/* set up the default clocks if the MC ucode is loaded */
-	if (ASIC_IS_DCE5(rdev) && rdev->mc_fw) {
+	if ((rdev->family >= CHIP_BARTS) &&
+	    (rdev->family <= CHIP_CAYMAN) &&
+	    rdev->mc_fw) {
 		if (rdev->pm.default_vddc)
 			radeon_atom_set_voltage(rdev, rdev->pm.default_vddc,
 						SET_VOLTAGE_TYPE_ASIC_VDDC);
@@ -602,7 +589,9 @@ int radeon_pm_init(struct radeon_device *rdev)
 		radeon_pm_print_states(rdev);
 		radeon_pm_init_profile(rdev);
 		/* set up the default clocks if the MC ucode is loaded */
-		if (ASIC_IS_DCE5(rdev) && rdev->mc_fw) {
+		if ((rdev->family >= CHIP_BARTS) &&
+		    (rdev->family <= CHIP_CAYMAN) &&
+		    rdev->mc_fw) {
 			if (rdev->pm.default_vddc)
 				radeon_atom_set_voltage(rdev, rdev->pm.default_vddc,
 							SET_VOLTAGE_TYPE_ASIC_VDDC);
@@ -632,10 +621,6 @@ int radeon_pm_init(struct radeon_device *rdev)
 		if (ret)
 			DRM_ERROR("failed to create device file for power method\n");
 
-#ifdef CONFIG_ACPI
-		rdev->acpi_nb.notifier_call = radeon_acpi_event;
-		register_acpi_notifier(&rdev->acpi_nb);
-#endif
 		if (radeon_debugfs_pm_init(rdev)) {
 			DRM_ERROR("Failed to register debugfs file for PM!\n");
 		}
@@ -666,9 +651,6 @@ void radeon_pm_fini(struct radeon_device *rdev)
 
 		device_remove_file(rdev->dev, &dev_attr_power_profile);
 		device_remove_file(rdev->dev, &dev_attr_power_method);
-#ifdef CONFIG_ACPI
-		unregister_acpi_notifier(&rdev->acpi_nb);
-#endif
 	}
 
 	if (rdev->pm.power_state)
diff --git a/drivers/gpu/drm/radeon/radeon_ring.c b/drivers/gpu/drm/radeon/radeon_ring.c
index fc209c8b8666..bba66902c83b 100644
--- a/drivers/gpu/drm/radeon/radeon_ring.c
+++ b/drivers/gpu/drm/radeon/radeon_ring.c
@@ -43,7 +43,7 @@
  * produce command buffers which are send to the kernel and
  * put in IBs for execution by the requested ring.
  */
-int radeon_debugfs_sa_init(struct radeon_device *rdev);
+static int radeon_debugfs_sa_init(struct radeon_device *rdev);
 
 /**
  * radeon_ib_get - request an IB (Indirect Buffer)
@@ -58,7 +58,8 @@ int radeon_debugfs_sa_init(struct radeon_device *rdev);
  * Returns 0 on success, error on failure.
  */
 int radeon_ib_get(struct radeon_device *rdev, int ring,
-		  struct radeon_ib *ib, unsigned size)
+		  struct radeon_ib *ib, struct radeon_vm *vm,
+		  unsigned size)
 {
 	int i, r;
 
@@ -76,8 +77,15 @@ int radeon_ib_get(struct radeon_device *rdev, int ring,
 	ib->ring = ring;
 	ib->fence = NULL;
 	ib->ptr = radeon_sa_bo_cpu_addr(ib->sa_bo);
-	ib->gpu_addr = radeon_sa_bo_gpu_addr(ib->sa_bo);
-	ib->vm_id = 0;
+	ib->vm = vm;
+	if (vm) {
+		/* ib pool is bound at RADEON_VA_IB_OFFSET in virtual address
+		 * space and soffset is the offset inside the pool bo
+		 */
+		ib->gpu_addr = ib->sa_bo->soffset + RADEON_VA_IB_OFFSET;
+	} else {
+		ib->gpu_addr = radeon_sa_bo_gpu_addr(ib->sa_bo);
+	}
 	ib->is_const_ib = false;
 	for (i = 0; i < RADEON_NUM_RINGS; ++i)
 		ib->sync_to[i] = NULL;
@@ -152,6 +160,10 @@ int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib,
 	if (!need_sync) {
 		radeon_semaphore_free(rdev, &ib->semaphore, NULL);
 	}
+	/* if we can't remember our last VM flush then flush now! */
+	if (ib->vm && !ib->vm->last_flush) {
+		radeon_ring_vm_flush(rdev, ib->ring, ib->vm);
+	}
 	if (const_ib) {
 		radeon_ring_ib_execute(rdev, const_ib->ring, const_ib);
 		radeon_semaphore_free(rdev, &const_ib->semaphore, NULL);
@@ -166,6 +178,10 @@ int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib,
 	if (const_ib) {
 		const_ib->fence = radeon_fence_ref(ib->fence);
 	}
+	/* we just flushed the VM, remember that */
+	if (ib->vm && !ib->vm->last_flush) {
+		ib->vm->last_flush = radeon_fence_ref(ib->fence);
+	}
 	radeon_ring_unlock_commit(rdev, ring);
 	return 0;
 }
@@ -275,7 +291,7 @@ int radeon_ib_ring_tests(struct radeon_device *rdev)
  * wptr.  The GPU then starts fetching commands and executes
  * them until the pointers are equal again.
  */
-int radeon_debugfs_ring_init(struct radeon_device *rdev, struct radeon_ring *ring);
+static int radeon_debugfs_ring_init(struct radeon_device *rdev, struct radeon_ring *ring);
 
 /**
  * radeon_ring_write - write a value to the ring
@@ -803,7 +819,7 @@ static struct drm_info_list radeon_debugfs_sa_list[] = {
 
 #endif
 
-int radeon_debugfs_ring_init(struct radeon_device *rdev, struct radeon_ring *ring)
+static int radeon_debugfs_ring_init(struct radeon_device *rdev, struct radeon_ring *ring)
 {
 #if defined(CONFIG_DEBUG_FS)
 	unsigned i;
@@ -823,7 +839,7 @@ int radeon_debugfs_ring_init(struct radeon_device *rdev, struct radeon_ring *rin
 	return 0;
 }
 
-int radeon_debugfs_sa_init(struct radeon_device *rdev)
+static int radeon_debugfs_sa_init(struct radeon_device *rdev)
 {
 #if defined(CONFIG_DEBUG_FS)
 	return radeon_debugfs_add_files(rdev, radeon_debugfs_sa_list, 1);
diff --git a/drivers/gpu/drm/radeon/radeon_sa.c b/drivers/gpu/drm/radeon/radeon_sa.c
index 83e889b9420b..cb800995d4f9 100644
--- a/drivers/gpu/drm/radeon/radeon_sa.c
+++ b/drivers/gpu/drm/radeon/radeon_sa.c
@@ -315,7 +315,7 @@ int radeon_sa_bo_new(struct radeon_device *rdev,
 {
 	struct radeon_fence *fences[RADEON_NUM_RINGS];
 	unsigned tries[RADEON_NUM_RINGS];
-	int i, r = -ENOMEM;
+	int i, r;
 
 	BUG_ON(align > RADEON_GPU_PAGE_SIZE);
 	BUG_ON(size > sa_manager->size);
@@ -330,7 +330,7 @@ int radeon_sa_bo_new(struct radeon_device *rdev,
 	INIT_LIST_HEAD(&(*sa_bo)->flist);
 
 	spin_lock(&sa_manager->wq.lock);
-	while(1) {
+	do {
 		for (i = 0; i < RADEON_NUM_RINGS; ++i) {
 			fences[i] = NULL;
 			tries[i] = 0;
@@ -348,26 +348,22 @@ int radeon_sa_bo_new(struct radeon_device *rdev,
 			/* see if we can skip over some allocations */
 		} while (radeon_sa_bo_next_hole(sa_manager, fences, tries));
 
-		if (!block) {
-			break;
-		}
-
 		spin_unlock(&sa_manager->wq.lock);
 		r = radeon_fence_wait_any(rdev, fences, false);
 		spin_lock(&sa_manager->wq.lock);
 		/* if we have nothing to wait for block */
-		if (r == -ENOENT) {
+		if (r == -ENOENT && block) {
 			r = wait_event_interruptible_locked(
 				sa_manager->wq, 
 				radeon_sa_event(sa_manager, size, align)
 			);
+
+		} else if (r == -ENOENT) {
+			r = -ENOMEM;
 		}
-		if (r) {
-			goto out_err;
-		}
-	};
 
-out_err:
+	} while (!r);
+
 	spin_unlock(&sa_manager->wq.lock);
 	kfree(*sa_bo);
 	*sa_bo = NULL;
diff --git a/drivers/gpu/drm/radeon/radeon_test.c b/drivers/gpu/drm/radeon/radeon_test.c
index 7c16540c10ff..587c09a00ba2 100644
--- a/drivers/gpu/drm/radeon/radeon_test.c
+++ b/drivers/gpu/drm/radeon/radeon_test.c
@@ -313,7 +313,7 @@ out_cleanup:
 		printk(KERN_WARNING "Error while testing ring sync (%d).\n", r);
 }
 
-void radeon_test_ring_sync2(struct radeon_device *rdev,
+static void radeon_test_ring_sync2(struct radeon_device *rdev,
 			    struct radeon_ring *ringA,
 			    struct radeon_ring *ringB,
 			    struct radeon_ring *ringC)
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
index 5b71c716d83f..5ebe1b3e5db2 100644
--- a/drivers/gpu/drm/radeon/radeon_ttm.c
+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
@@ -549,7 +549,7 @@ static struct ttm_backend_func radeon_backend_func = {
 	.destroy = &radeon_ttm_backend_destroy,
 };
 
-struct ttm_tt *radeon_ttm_tt_create(struct ttm_bo_device *bdev,
+static struct ttm_tt *radeon_ttm_tt_create(struct ttm_bo_device *bdev,
 				    unsigned long size, uint32_t page_flags,
 				    struct page *dummy_read_page)
 {
diff --git a/drivers/gpu/drm/radeon/rs400.c b/drivers/gpu/drm/radeon/rs400.c
index 2752f7f78237..73051ce3121e 100644
--- a/drivers/gpu/drm/radeon/rs400.c
+++ b/drivers/gpu/drm/radeon/rs400.c
@@ -242,7 +242,7 @@ int rs400_mc_wait_for_idle(struct radeon_device *rdev)
 	return -1;
 }
 
-void rs400_gpu_init(struct radeon_device *rdev)
+static void rs400_gpu_init(struct radeon_device *rdev)
 {
 	/* FIXME: is this correct ? */
 	r420_pipes_init(rdev);
@@ -252,7 +252,7 @@ void rs400_gpu_init(struct radeon_device *rdev)
 	}
 }
 
-void rs400_mc_init(struct radeon_device *rdev)
+static void rs400_mc_init(struct radeon_device *rdev)
 {
 	u64 base;
 
@@ -370,7 +370,7 @@ static int rs400_debugfs_pcie_gart_info_init(struct radeon_device *rdev)
 #endif
 }
 
-void rs400_mc_program(struct radeon_device *rdev)
+static void rs400_mc_program(struct radeon_device *rdev)
 {
 	struct r100_mc_save save;
 
diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c
index 6590cc128f36..5a0fc74c2ba6 100644
--- a/drivers/gpu/drm/radeon/rs600.c
+++ b/drivers/gpu/drm/radeon/rs600.c
@@ -43,22 +43,30 @@
 
 #include "rs600_reg_safe.h"
 
-void rs600_gpu_init(struct radeon_device *rdev);
+static void rs600_gpu_init(struct radeon_device *rdev);
 int rs600_mc_wait_for_idle(struct radeon_device *rdev);
 
+static const u32 crtc_offsets[2] =
+{
+	0,
+	AVIVO_D2CRTC_H_TOTAL - AVIVO_D1CRTC_H_TOTAL
+};
+
 void avivo_wait_for_vblank(struct radeon_device *rdev, int crtc)
 {
-	struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc];
 	int i;
 
-	if (RREG32(AVIVO_D1CRTC_CONTROL + radeon_crtc->crtc_offset) & AVIVO_CRTC_EN) {
+	if (crtc >= rdev->num_crtc)
+		return;
+
+	if (RREG32(AVIVO_D1CRTC_CONTROL + crtc_offsets[crtc]) & AVIVO_CRTC_EN) {
 		for (i = 0; i < rdev->usec_timeout; i++) {
-			if (!(RREG32(AVIVO_D1CRTC_STATUS + radeon_crtc->crtc_offset) & AVIVO_D1CRTC_V_BLANK))
+			if (!(RREG32(AVIVO_D1CRTC_STATUS + crtc_offsets[crtc]) & AVIVO_D1CRTC_V_BLANK))
 				break;
 			udelay(1);
 		}
 		for (i = 0; i < rdev->usec_timeout; i++) {
-			if (RREG32(AVIVO_D1CRTC_STATUS + radeon_crtc->crtc_offset) & AVIVO_D1CRTC_V_BLANK)
+			if (RREG32(AVIVO_D1CRTC_STATUS + crtc_offsets[crtc]) & AVIVO_D1CRTC_V_BLANK)
 				break;
 			udelay(1);
 		}
@@ -424,7 +432,7 @@ void rs600_gart_tlb_flush(struct radeon_device *rdev)
 	tmp = RREG32_MC(R_000100_MC_PT0_CNTL);
 }
 
-int rs600_gart_init(struct radeon_device *rdev)
+static int rs600_gart_init(struct radeon_device *rdev)
 {
 	int r;
 
@@ -506,7 +514,7 @@ static int rs600_gart_enable(struct radeon_device *rdev)
 	return 0;
 }
 
-void rs600_gart_disable(struct radeon_device *rdev)
+static void rs600_gart_disable(struct radeon_device *rdev)
 {
 	u32 tmp;
 
@@ -517,7 +525,7 @@ void rs600_gart_disable(struct radeon_device *rdev)
 	radeon_gart_table_vram_unpin(rdev);
 }
 
-void rs600_gart_fini(struct radeon_device *rdev)
+static void rs600_gart_fini(struct radeon_device *rdev)
 {
 	radeon_gart_fini(rdev);
 	rs600_gart_disable(rdev);
@@ -567,9 +575,6 @@ int rs600_irq_set(struct radeon_device *rdev)
 	if (atomic_read(&rdev->irq.ring_int[RADEON_RING_TYPE_GFX_INDEX])) {
 		tmp |= S_000040_SW_INT_EN(1);
 	}
-	if (rdev->irq.gui_idle) {
-		tmp |= S_000040_GUI_IDLE(1);
-	}
 	if (rdev->irq.crtc_vblank_int[0] ||
 	    atomic_read(&rdev->irq.pflip[0])) {
 		mode_int |= S_006540_D1MODE_VBLANK_INT_MASK(1);
@@ -602,12 +607,6 @@ static inline u32 rs600_irq_ack(struct radeon_device *rdev)
 	uint32_t irq_mask = S_000044_SW_INT(1);
 	u32 tmp;
 
-	/* the interrupt works, but the status bit is permanently asserted */
-	if (rdev->irq.gui_idle && radeon_gui_idle(rdev)) {
-		if (!rdev->irq.gui_idle_acked)
-			irq_mask |= S_000044_GUI_IDLE_STAT(1);
-	}
-
 	if (G_000044_DISPLAY_INT_STAT(irqs)) {
 		rdev->irq.stat_regs.r500.disp_int = RREG32(R_007EDC_DISP_INTERRUPT_STATUS);
 		if (G_007EDC_LB_D1_VBLANK_INTERRUPT(rdev->irq.stat_regs.r500.disp_int)) {
@@ -667,9 +666,6 @@ int rs600_irq_process(struct radeon_device *rdev)
 	bool queue_hotplug = false;
 	bool queue_hdmi = false;
 
-	/* reset gui idle ack.  the status bit is broken */
-	rdev->irq.gui_idle_acked = false;
-
 	status = rs600_irq_ack(rdev);
 	if (!status &&
 	    !rdev->irq.stat_regs.r500.disp_int &&
@@ -683,11 +679,6 @@ int rs600_irq_process(struct radeon_device *rdev)
 		if (G_000044_SW_INT(status)) {
 			radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX);
 		}
-		/* GUI idle */
-		if (G_000040_GUI_IDLE(status)) {
-			rdev->irq.gui_idle_acked = true;
-			wake_up(&rdev->irq.idle_queue);
-		}
 		/* Vertical blank interrupts */
 		if (G_007EDC_LB_D1_VBLANK_INTERRUPT(rdev->irq.stat_regs.r500.disp_int)) {
 			if (rdev->irq.crtc_vblank_int[0]) {
@@ -721,8 +712,6 @@ int rs600_irq_process(struct radeon_device *rdev)
 		}
 		status = rs600_irq_ack(rdev);
 	}
-	/* reset gui idle ack.  the status bit is broken */
-	rdev->irq.gui_idle_acked = false;
 	if (queue_hotplug)
 		schedule_work(&rdev->hotplug_work);
 	if (queue_hdmi)
@@ -764,7 +753,7 @@ int rs600_mc_wait_for_idle(struct radeon_device *rdev)
 	return -1;
 }
 
-void rs600_gpu_init(struct radeon_device *rdev)
+static void rs600_gpu_init(struct radeon_device *rdev)
 {
 	r420_pipes_init(rdev);
 	/* Wait for mc idle */
@@ -772,7 +761,7 @@ void rs600_gpu_init(struct radeon_device *rdev)
 		dev_warn(rdev->dev, "Wait MC idle timeout before updating MC.\n");
 }
 
-void rs600_mc_init(struct radeon_device *rdev)
+static void rs600_mc_init(struct radeon_device *rdev)
 {
 	u64 base;
 
@@ -834,7 +823,7 @@ void rs600_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
 	WREG32(R_000074_MC_IND_DATA, v);
 }
 
-void rs600_debugfs(struct radeon_device *rdev)
+static void rs600_debugfs(struct radeon_device *rdev)
 {
 	if (r100_debugfs_rbbm_init(rdev))
 		DRM_ERROR("Failed to register debugfs file for RBBM !\n");
diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c
index dfb9f0fe6f38..5706d2ac75ab 100644
--- a/drivers/gpu/drm/radeon/rs690.c
+++ b/drivers/gpu/drm/radeon/rs690.c
@@ -145,7 +145,7 @@ void rs690_pm_info(struct radeon_device *rdev)
 	rdev->pm.sideport_bandwidth.full = dfixed_div(rdev->pm.sideport_bandwidth, tmp);
 }
 
-void rs690_mc_init(struct radeon_device *rdev)
+static void rs690_mc_init(struct radeon_device *rdev)
 {
 	u64 base;
 
@@ -224,7 +224,7 @@ struct rs690_watermark {
 	fixed20_12 sclk;
 };
 
-void rs690_crtc_bandwidth_compute(struct radeon_device *rdev,
+static void rs690_crtc_bandwidth_compute(struct radeon_device *rdev,
 				  struct radeon_crtc *crtc,
 				  struct rs690_watermark *wm)
 {
@@ -581,7 +581,7 @@ void rs690_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
 	WREG32(R_000078_MC_INDEX, 0x7F);
 }
 
-void rs690_mc_program(struct radeon_device *rdev)
+static void rs690_mc_program(struct radeon_device *rdev)
 {
 	struct rv515_mc_save save;
 
diff --git a/drivers/gpu/drm/radeon/rv515.c b/drivers/gpu/drm/radeon/rv515.c
index ae4f93e2f135..785d09590b24 100644
--- a/drivers/gpu/drm/radeon/rv515.c
+++ b/drivers/gpu/drm/radeon/rv515.c
@@ -35,9 +35,9 @@
 #include "rv515_reg_safe.h"
 
 /* This files gather functions specifics to: rv515 */
-int rv515_debugfs_pipes_info_init(struct radeon_device *rdev);
-int rv515_debugfs_ga_info_init(struct radeon_device *rdev);
-void rv515_gpu_init(struct radeon_device *rdev);
+static int rv515_debugfs_pipes_info_init(struct radeon_device *rdev);
+static int rv515_debugfs_ga_info_init(struct radeon_device *rdev);
+static void rv515_gpu_init(struct radeon_device *rdev);
 int rv515_mc_wait_for_idle(struct radeon_device *rdev);
 
 void rv515_debugfs(struct radeon_device *rdev)
@@ -143,7 +143,7 @@ void rv515_vga_render_disable(struct radeon_device *rdev)
 		RREG32(R_000300_VGA_RENDER_CONTROL) & C_000300_VGA_VSTATUS_CNTL);
 }
 
-void rv515_gpu_init(struct radeon_device *rdev)
+static void rv515_gpu_init(struct radeon_device *rdev)
 {
 	unsigned pipe_select_current, gb_pipe_select, tmp;
 
@@ -189,7 +189,7 @@ static void rv515_vram_get_type(struct radeon_device *rdev)
 	}
 }
 
-void rv515_mc_init(struct radeon_device *rdev)
+static void rv515_mc_init(struct radeon_device *rdev)
 {
 
 	rv515_vram_get_type(rdev);
@@ -261,7 +261,7 @@ static struct drm_info_list rv515_ga_info_list[] = {
 };
 #endif
 
-int rv515_debugfs_pipes_info_init(struct radeon_device *rdev)
+static int rv515_debugfs_pipes_info_init(struct radeon_device *rdev)
 {
 #if defined(CONFIG_DEBUG_FS)
 	return radeon_debugfs_add_files(rdev, rv515_pipes_info_list, 1);
@@ -270,7 +270,7 @@ int rv515_debugfs_pipes_info_init(struct radeon_device *rdev)
 #endif
 }
 
-int rv515_debugfs_ga_info_init(struct radeon_device *rdev)
+static int rv515_debugfs_ga_info_init(struct radeon_device *rdev)
 {
 #if defined(CONFIG_DEBUG_FS)
 	return radeon_debugfs_add_files(rdev, rv515_ga_info_list, 1);
@@ -310,7 +310,7 @@ void rv515_mc_resume(struct radeon_device *rdev, struct rv515_mc_save *save)
 	WREG32(R_000300_VGA_RENDER_CONTROL, save->vga_render_control);
 }
 
-void rv515_mc_program(struct radeon_device *rdev)
+static void rv515_mc_program(struct radeon_device *rdev)
 {
 	struct rv515_mc_save save;
 
@@ -787,7 +787,7 @@ struct rv515_watermark {
 	fixed20_12 sclk;
 };
 
-void rv515_crtc_bandwidth_compute(struct radeon_device *rdev,
+static void rv515_crtc_bandwidth_compute(struct radeon_device *rdev,
 				  struct radeon_crtc *crtc,
 				  struct rv515_watermark *wm)
 {
diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c
index 35a4152bb1ad..79814a08c8e5 100644
--- a/drivers/gpu/drm/radeon/rv770.c
+++ b/drivers/gpu/drm/radeon/rv770.c
@@ -124,7 +124,7 @@ void rv770_pm_misc(struct radeon_device *rdev)
 /*
  * GART
  */
-int rv770_pcie_gart_enable(struct radeon_device *rdev)
+static int rv770_pcie_gart_enable(struct radeon_device *rdev)
 {
 	u32 tmp;
 	int r, i;
@@ -175,7 +175,7 @@ int rv770_pcie_gart_enable(struct radeon_device *rdev)
 	return 0;
 }
 
-void rv770_pcie_gart_disable(struct radeon_device *rdev)
+static void rv770_pcie_gart_disable(struct radeon_device *rdev)
 {
 	u32 tmp;
 	int i;
@@ -201,7 +201,7 @@ void rv770_pcie_gart_disable(struct radeon_device *rdev)
 	radeon_gart_table_vram_unpin(rdev);
 }
 
-void rv770_pcie_gart_fini(struct radeon_device *rdev)
+static void rv770_pcie_gart_fini(struct radeon_device *rdev)
 {
 	radeon_gart_fini(rdev);
 	rv770_pcie_gart_disable(rdev);
@@ -209,7 +209,7 @@ void rv770_pcie_gart_fini(struct radeon_device *rdev)
 }
 
 
-void rv770_agp_enable(struct radeon_device *rdev)
+static void rv770_agp_enable(struct radeon_device *rdev)
 {
 	u32 tmp;
 	int i;
@@ -839,7 +839,7 @@ void r700_vram_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc)
 	}
 }
 
-int rv770_mc_init(struct radeon_device *rdev)
+static int rv770_mc_init(struct radeon_device *rdev)
 {
 	u32 tmp;
 	int chansize, numchan;
diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c
index d883cae56378..f79633a036c3 100644
--- a/drivers/gpu/drm/radeon/si.c
+++ b/drivers/gpu/drm/radeon/si.c
@@ -1806,13 +1806,14 @@ void si_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
 #endif
 			  (ib->gpu_addr & 0xFFFFFFFC));
 	radeon_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xFFFF);
-	radeon_ring_write(ring, ib->length_dw | (ib->vm_id << 24));
+	radeon_ring_write(ring, ib->length_dw |
+			  (ib->vm ? (ib->vm->id << 24) : 0));
 
 	if (!ib->is_const_ib) {
 		/* flush read cache over gart for this vmid */
 		radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
 		radeon_ring_write(ring, (CP_COHER_CNTL2 - PACKET3_SET_CONFIG_REG_START) >> 2);
-		radeon_ring_write(ring, ib->vm_id);
+		radeon_ring_write(ring, ib->vm ? ib->vm->id : 0);
 		radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3));
 		radeon_ring_write(ring, PACKET3_TCL1_ACTION_ENA |
 				  PACKET3_TC_ACTION_ENA |
@@ -2363,7 +2364,7 @@ void si_pcie_gart_tlb_flush(struct radeon_device *rdev)
 	WREG32(VM_INVALIDATE_REQUEST, 1);
 }
 
-int si_pcie_gart_enable(struct radeon_device *rdev)
+static int si_pcie_gart_enable(struct radeon_device *rdev)
 {
 	int r, i;
 
@@ -2425,7 +2426,7 @@ int si_pcie_gart_enable(struct radeon_device *rdev)
 	WREG32(VM_CONTEXT1_PROTECTION_FAULT_DEFAULT_ADDR,
 	       (u32)(rdev->dummy_page.addr >> 12));
 	WREG32(VM_CONTEXT1_CNTL2, 0);
-	WREG32(VM_CONTEXT1_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(0) |
+	WREG32(VM_CONTEXT1_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(1) |
 				RANGE_PROTECTION_FAULT_ENABLE_DEFAULT);
 
 	si_pcie_gart_tlb_flush(rdev);
@@ -2436,7 +2437,7 @@ int si_pcie_gart_enable(struct radeon_device *rdev)
 	return 0;
 }
 
-void si_pcie_gart_disable(struct radeon_device *rdev)
+static void si_pcie_gart_disable(struct radeon_device *rdev)
 {
 	/* Disable all tables */
 	WREG32(VM_CONTEXT0_CNTL, 0);
@@ -2455,7 +2456,7 @@ void si_pcie_gart_disable(struct radeon_device *rdev)
 	radeon_gart_table_vram_unpin(rdev);
 }
 
-void si_pcie_gart_fini(struct radeon_device *rdev)
+static void si_pcie_gart_fini(struct radeon_device *rdev)
 {
 	si_pcie_gart_disable(rdev);
 	radeon_gart_table_vram_free(rdev);
@@ -2788,41 +2789,84 @@ void si_vm_fini(struct radeon_device *rdev)
 {
 }
 
-int si_vm_bind(struct radeon_device *rdev, struct radeon_vm *vm, int id)
+/**
+ * si_vm_set_page - update the page tables using the CP
+ *
+ * @rdev: radeon_device pointer
+ * @pe: addr of the page entry
+ * @addr: dst addr to write into pe
+ * @count: number of page entries to update
+ * @incr: increase next addr by incr bytes
+ * @flags: access flags
+ *
+ * Update the page tables using the CP (cayman-si).
+ */
+void si_vm_set_page(struct radeon_device *rdev, uint64_t pe,
+		    uint64_t addr, unsigned count,
+		    uint32_t incr, uint32_t flags)
 {
-	if (id < 8)
-		WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (id << 2), vm->pt_gpu_addr >> 12);
-	else
-		WREG32(VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + ((id - 8) << 2),
-		       vm->pt_gpu_addr >> 12);
-	/* flush hdp cache */
-	WREG32(HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1);
-	/* bits 0-15 are the VM contexts0-15 */
-	WREG32(VM_INVALIDATE_REQUEST, 1 << id);
-	return 0;
+	struct radeon_ring *ring = &rdev->ring[rdev->asic->vm.pt_ring_index];
+	uint32_t r600_flags = cayman_vm_page_flags(rdev, flags);
+	int i;
+	uint64_t value;
+
+	radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 2 + count * 2));
+	radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
+				 WRITE_DATA_DST_SEL(1)));
+	radeon_ring_write(ring, pe);
+	radeon_ring_write(ring, upper_32_bits(pe));
+	for (i = 0; i < count; ++i) {
+		if (flags & RADEON_VM_PAGE_SYSTEM) {
+			value = radeon_vm_map_gart(rdev, addr);
+			value &= 0xFFFFFFFFFFFFF000ULL;
+		} else if (flags & RADEON_VM_PAGE_VALID)
+			value = addr;
+		else
+			value = 0;
+		addr += incr;
+		value |= r600_flags;
+		radeon_ring_write(ring, value);
+		radeon_ring_write(ring, upper_32_bits(value));
+	}
 }
 
-void si_vm_unbind(struct radeon_device *rdev, struct radeon_vm *vm)
+void si_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm)
 {
-	if (vm->id < 8)
-		WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (vm->id << 2), 0);
-	else
-		WREG32(VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + ((vm->id - 8) << 2), 0);
-	/* flush hdp cache */
-	WREG32(HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1);
-	/* bits 0-15 are the VM contexts0-15 */
-	WREG32(VM_INVALIDATE_REQUEST, 1 << vm->id);
-}
+	struct radeon_ring *ring = &rdev->ring[ridx];
 
-void si_vm_tlb_flush(struct radeon_device *rdev, struct radeon_vm *vm)
-{
-	if (vm->id == -1)
+	if (vm == NULL)
 		return;
 
+	/* write new base address */
+	radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
+	radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
+				 WRITE_DATA_DST_SEL(0)));
+
+	if (vm->id < 8) {
+		radeon_ring_write(ring,
+				  (VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (vm->id << 2)) >> 2);
+	} else {
+		radeon_ring_write(ring,
+				  (VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + ((vm->id - 8) << 2)) >> 2);
+	}
+	radeon_ring_write(ring, 0);
+	radeon_ring_write(ring, vm->pd_gpu_addr >> 12);
+
 	/* flush hdp cache */
-	WREG32(HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1);
+	radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
+	radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
+				 WRITE_DATA_DST_SEL(0)));
+	radeon_ring_write(ring, HDP_MEM_COHERENCY_FLUSH_CNTL >> 2);
+	radeon_ring_write(ring, 0);
+	radeon_ring_write(ring, 0x1);
+
 	/* bits 0-15 are the VM contexts0-15 */
-	WREG32(VM_INVALIDATE_REQUEST, 1 << vm->id);
+	radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
+	radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
+				 WRITE_DATA_DST_SEL(0)));
+	radeon_ring_write(ring, VM_INVALIDATE_REQUEST >> 2);
+	radeon_ring_write(ring, 0);
+	radeon_ring_write(ring, 1 << vm->id);
 }
 
 /*
@@ -3199,10 +3243,6 @@ int si_irq_set(struct radeon_device *rdev)
 		DRM_DEBUG("si_irq_set: hpd 6\n");
 		hpd6 |= DC_HPDx_INT_EN;
 	}
-	if (rdev->irq.gui_idle) {
-		DRM_DEBUG("gui idle\n");
-		grbm_int_cntl |= GUI_IDLE_INT_ENABLE;
-	}
 
 	WREG32(CP_INT_CNTL_RING0, cp_int_cntl);
 	WREG32(CP_INT_CNTL_RING1, cp_int_cntl1);
@@ -3658,7 +3698,6 @@ restart_ih:
 			break;
 		case 233: /* GUI IDLE */
 			DRM_DEBUG("IH: GUI idle\n");
-			wake_up(&rdev->irq.idle_queue);
 			break;
 		default:
 			DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
diff --git a/drivers/gpu/drm/radeon/sid.h b/drivers/gpu/drm/radeon/sid.h
index ef4815c27b1c..7d2a20e56577 100644
--- a/drivers/gpu/drm/radeon/sid.h
+++ b/drivers/gpu/drm/radeon/sid.h
@@ -812,6 +812,21 @@
 #define	PACKET3_DRAW_INDEX_OFFSET_2			0x35
 #define	PACKET3_DRAW_INDEX_MULTI_ELEMENT		0x36
 #define	PACKET3_WRITE_DATA				0x37
+#define		WRITE_DATA_DST_SEL(x)                   ((x) << 8)
+                /* 0 - register
+		 * 1 - memory (sync - via GRBM)
+		 * 2 - tc/l2
+		 * 3 - gds
+		 * 4 - reserved
+		 * 5 - memory (async - direct)
+		 */
+#define		WR_ONE_ADDR                             (1 << 16)
+#define		WR_CONFIRM                              (1 << 20)
+#define		WRITE_DATA_ENGINE_SEL(x)                ((x) << 30)
+                /* 0 - me
+		 * 1 - pfp
+		 * 2 - ce
+		 */
 #define	PACKET3_DRAW_INDEX_INDIRECT_MULTI		0x38
 #define	PACKET3_MEM_SEMAPHORE				0x39
 #define	PACKET3_MPEG_INDEX				0x3A
diff --git a/drivers/gpu/drm/savage/savage_bci.c b/drivers/gpu/drm/savage/savage_bci.c
index b88a42154e16..b55c1d661147 100644
--- a/drivers/gpu/drm/savage/savage_bci.c
+++ b/drivers/gpu/drm/savage/savage_bci.c
@@ -547,6 +547,8 @@ int savage_driver_load(struct drm_device *dev, unsigned long chipset)
 
 	dev_priv->chipset = (enum savage_family)chipset;
 
+	pci_set_master(dev->pdev);
+
 	return 0;
 }
 
diff --git a/drivers/gpu/drm/shmobile/Kconfig b/drivers/gpu/drm/shmobile/Kconfig
new file mode 100644
index 000000000000..7e7d52b2a2fc
--- /dev/null
+++ b/drivers/gpu/drm/shmobile/Kconfig
@@ -0,0 +1,10 @@
+config DRM_SHMOBILE
+	tristate "DRM Support for SH Mobile"
+	depends on DRM && (SUPERH || ARCH_SHMOBILE)
+	select DRM_KMS_HELPER
+	select DRM_KMS_CMA_HELPER
+	select DRM_GEM_CMA_HELPER
+	help
+	  Choose this option if you have an SH Mobile chipset.
+	  If M is selected the module will be called shmob-drm.
+
diff --git a/drivers/gpu/drm/shmobile/Makefile b/drivers/gpu/drm/shmobile/Makefile
new file mode 100644
index 000000000000..4c3eeb355630
--- /dev/null
+++ b/drivers/gpu/drm/shmobile/Makefile
@@ -0,0 +1,7 @@
+shmob-drm-y := shmob_drm_backlight.o \
+	       shmob_drm_crtc.o \
+	       shmob_drm_drv.o \
+	       shmob_drm_kms.o \
+	       shmob_drm_plane.o
+
+obj-$(CONFIG_DRM_SHMOBILE)	+= shmob-drm.o
diff --git a/drivers/gpu/drm/shmobile/shmob_drm_backlight.c b/drivers/gpu/drm/shmobile/shmob_drm_backlight.c
new file mode 100644
index 000000000000..463aee18f774
--- /dev/null
+++ b/drivers/gpu/drm/shmobile/shmob_drm_backlight.c
@@ -0,0 +1,90 @@
+/*
+ * shmob_drm_backlight.c  --  SH Mobile DRM Backlight
+ *
+ * Copyright (C) 2012 Renesas Corporation
+ *
+ * Laurent Pinchart (laurent.pinchart@ideasonboard.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/backlight.h>
+
+#include "shmob_drm_backlight.h"
+#include "shmob_drm_crtc.h"
+#include "shmob_drm_drv.h"
+
+static int shmob_drm_backlight_update(struct backlight_device *bdev)
+{
+	struct shmob_drm_connector *scon = bl_get_data(bdev);
+	struct shmob_drm_device *sdev = scon->connector.dev->dev_private;
+	const struct shmob_drm_backlight_data *bdata = &sdev->pdata->backlight;
+	int brightness = bdev->props.brightness;
+
+	if (bdev->props.power != FB_BLANK_UNBLANK ||
+	    bdev->props.state & BL_CORE_SUSPENDED)
+		brightness = 0;
+
+	return bdata->set_brightness(brightness);
+}
+
+static int shmob_drm_backlight_get_brightness(struct backlight_device *bdev)
+{
+	struct shmob_drm_connector *scon = bl_get_data(bdev);
+	struct shmob_drm_device *sdev = scon->connector.dev->dev_private;
+	const struct shmob_drm_backlight_data *bdata = &sdev->pdata->backlight;
+
+	return bdata->get_brightness();
+}
+
+static const struct backlight_ops shmob_drm_backlight_ops = {
+	.options	= BL_CORE_SUSPENDRESUME,
+	.update_status	= shmob_drm_backlight_update,
+	.get_brightness	= shmob_drm_backlight_get_brightness,
+};
+
+void shmob_drm_backlight_dpms(struct shmob_drm_connector *scon, int mode)
+{
+	if (scon->backlight == NULL)
+		return;
+
+	scon->backlight->props.power = mode == DRM_MODE_DPMS_ON
+				     ? FB_BLANK_UNBLANK : FB_BLANK_POWERDOWN;
+	backlight_update_status(scon->backlight);
+}
+
+int shmob_drm_backlight_init(struct shmob_drm_connector *scon)
+{
+	struct shmob_drm_device *sdev = scon->connector.dev->dev_private;
+	const struct shmob_drm_backlight_data *bdata = &sdev->pdata->backlight;
+	struct drm_connector *connector = &scon->connector;
+	struct drm_device *dev = connector->dev;
+	struct backlight_device *backlight;
+
+	if (!bdata->max_brightness)
+		return 0;
+
+	backlight = backlight_device_register(bdata->name, dev->dev, scon,
+					      &shmob_drm_backlight_ops, NULL);
+	if (IS_ERR(backlight)) {
+		dev_err(dev->dev, "unable to register backlight device: %ld\n",
+			PTR_ERR(backlight));
+		return PTR_ERR(backlight);
+	}
+
+	backlight->props.max_brightness = bdata->max_brightness;
+	backlight->props.brightness = bdata->max_brightness;
+	backlight->props.power = FB_BLANK_POWERDOWN;
+	backlight_update_status(backlight);
+
+	scon->backlight = backlight;
+	return 0;
+}
+
+void shmob_drm_backlight_exit(struct shmob_drm_connector *scon)
+{
+	backlight_device_unregister(scon->backlight);
+}
diff --git a/drivers/gpu/drm/shmobile/shmob_drm_backlight.h b/drivers/gpu/drm/shmobile/shmob_drm_backlight.h
new file mode 100644
index 000000000000..9477595d2ff3
--- /dev/null
+++ b/drivers/gpu/drm/shmobile/shmob_drm_backlight.h
@@ -0,0 +1,23 @@
+/*
+ * shmob_drm_backlight.h  --  SH Mobile DRM Backlight
+ *
+ * Copyright (C) 2012 Renesas Corporation
+ *
+ * Laurent Pinchart (laurent.pinchart@ideasonboard.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __SHMOB_DRM_BACKLIGHT_H__
+#define __SHMOB_DRM_BACKLIGHT_H__
+
+struct shmob_drm_connector;
+
+void shmob_drm_backlight_dpms(struct shmob_drm_connector *scon, int mode);
+int shmob_drm_backlight_init(struct shmob_drm_connector *scon);
+void shmob_drm_backlight_exit(struct shmob_drm_connector *scon);
+
+#endif /* __SHMOB_DRM_BACKLIGHT_H__ */
diff --git a/drivers/gpu/drm/shmobile/shmob_drm_crtc.c b/drivers/gpu/drm/shmobile/shmob_drm_crtc.c
new file mode 100644
index 000000000000..0e7a9306bd0c
--- /dev/null
+++ b/drivers/gpu/drm/shmobile/shmob_drm_crtc.c
@@ -0,0 +1,763 @@
+/*
+ * shmob_drm_crtc.c  --  SH Mobile DRM CRTCs
+ *
+ * Copyright (C) 2012 Renesas Corporation
+ *
+ * Laurent Pinchart (laurent.pinchart@ideasonboard.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/backlight.h>
+#include <linux/clk.h>
+
+#include <drm/drmP.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_gem_cma_helper.h>
+
+#include <video/sh_mobile_meram.h>
+
+#include "shmob_drm_backlight.h"
+#include "shmob_drm_crtc.h"
+#include "shmob_drm_drv.h"
+#include "shmob_drm_kms.h"
+#include "shmob_drm_plane.h"
+#include "shmob_drm_regs.h"
+
+/*
+ * TODO: panel support
+ */
+
+/* -----------------------------------------------------------------------------
+ * Clock management
+ */
+
+static void shmob_drm_clk_on(struct shmob_drm_device *sdev)
+{
+	if (sdev->clock)
+		clk_enable(sdev->clock);
+#if 0
+	if (sdev->meram_dev && sdev->meram_dev->pdev)
+		pm_runtime_get_sync(&sdev->meram_dev->pdev->dev);
+#endif
+}
+
+static void shmob_drm_clk_off(struct shmob_drm_device *sdev)
+{
+#if 0
+	if (sdev->meram_dev && sdev->meram_dev->pdev)
+		pm_runtime_put_sync(&sdev->meram_dev->pdev->dev);
+#endif
+	if (sdev->clock)
+		clk_disable(sdev->clock);
+}
+
+/* -----------------------------------------------------------------------------
+ * CRTC
+ */
+
+static void shmob_drm_crtc_setup_geometry(struct shmob_drm_crtc *scrtc)
+{
+	struct drm_crtc *crtc = &scrtc->crtc;
+	struct shmob_drm_device *sdev = crtc->dev->dev_private;
+	const struct shmob_drm_interface_data *idata = &sdev->pdata->iface;
+	const struct drm_display_mode *mode = &crtc->mode;
+	u32 value;
+
+	value = sdev->ldmt1r
+	      | ((mode->flags & DRM_MODE_FLAG_PVSYNC) ? 0 : LDMT1R_VPOL)
+	      | ((mode->flags & DRM_MODE_FLAG_PHSYNC) ? 0 : LDMT1R_HPOL)
+	      | ((idata->flags & SHMOB_DRM_IFACE_FL_DWPOL) ? LDMT1R_DWPOL : 0)
+	      | ((idata->flags & SHMOB_DRM_IFACE_FL_DIPOL) ? LDMT1R_DIPOL : 0)
+	      | ((idata->flags & SHMOB_DRM_IFACE_FL_DAPOL) ? LDMT1R_DAPOL : 0)
+	      | ((idata->flags & SHMOB_DRM_IFACE_FL_HSCNT) ? LDMT1R_HSCNT : 0)
+	      | ((idata->flags & SHMOB_DRM_IFACE_FL_DWCNT) ? LDMT1R_DWCNT : 0);
+	lcdc_write(sdev, LDMT1R, value);
+
+	if (idata->interface >= SHMOB_DRM_IFACE_SYS8A &&
+	    idata->interface <= SHMOB_DRM_IFACE_SYS24) {
+		/* Setup SYS bus. */
+		value = (idata->sys.cs_setup << LDMT2R_CSUP_SHIFT)
+		      | (idata->sys.vsync_active_high ? LDMT2R_RSV : 0)
+		      | (idata->sys.vsync_dir_input ? LDMT2R_VSEL : 0)
+		      | (idata->sys.write_setup << LDMT2R_WCSC_SHIFT)
+		      | (idata->sys.write_cycle << LDMT2R_WCEC_SHIFT)
+		      | (idata->sys.write_strobe << LDMT2R_WCLW_SHIFT);
+		lcdc_write(sdev, LDMT2R, value);
+
+		value = (idata->sys.read_latch << LDMT3R_RDLC_SHIFT)
+		      | (idata->sys.read_setup << LDMT3R_RCSC_SHIFT)
+		      | (idata->sys.read_cycle << LDMT3R_RCEC_SHIFT)
+		      | (idata->sys.read_strobe << LDMT3R_RCLW_SHIFT);
+		lcdc_write(sdev, LDMT3R, value);
+	}
+
+	value = ((mode->hdisplay / 8) << 16)			/* HDCN */
+	      | (mode->htotal / 8);				/* HTCN */
+	lcdc_write(sdev, LDHCNR, value);
+
+	value = (((mode->hsync_end - mode->hsync_start) / 8) << 16) /* HSYNW */
+	      | (mode->hsync_start / 8);			/* HSYNP */
+	lcdc_write(sdev, LDHSYNR, value);
+
+	value = ((mode->hdisplay & 7) << 24) | ((mode->htotal & 7) << 16)
+	      | (((mode->hsync_end - mode->hsync_start) & 7) << 8)
+	      | (mode->hsync_start & 7);
+	lcdc_write(sdev, LDHAJR, value);
+
+	value = ((mode->vdisplay) << 16)			/* VDLN */
+	      | mode->vtotal;					/* VTLN */
+	lcdc_write(sdev, LDVLNR, value);
+
+	value = ((mode->vsync_end - mode->vsync_start) << 16)	/* VSYNW */
+	      | mode->vsync_start;				/* VSYNP */
+	lcdc_write(sdev, LDVSYNR, value);
+}
+
+static void shmob_drm_crtc_start_stop(struct shmob_drm_crtc *scrtc, bool start)
+{
+	struct shmob_drm_device *sdev = scrtc->crtc.dev->dev_private;
+	u32 value;
+
+	value = lcdc_read(sdev, LDCNT2R);
+	if (start)
+		lcdc_write(sdev, LDCNT2R, value | LDCNT2R_DO);
+	else
+		lcdc_write(sdev, LDCNT2R, value & ~LDCNT2R_DO);
+
+	/* Wait until power is applied/stopped. */
+	while (1) {
+		value = lcdc_read(sdev, LDPMR) & LDPMR_LPS;
+		if ((start && value) || (!start && !value))
+			break;
+
+		cpu_relax();
+	}
+
+	if (!start) {
+		/* Stop the dot clock. */
+		lcdc_write(sdev, LDDCKSTPR, LDDCKSTPR_DCKSTP);
+	}
+}
+
+/*
+ * shmob_drm_crtc_start - Configure and start the LCDC
+ * @scrtc: the SH Mobile CRTC
+ *
+ * Configure and start the LCDC device. External devices (clocks, MERAM, panels,
+ * ...) are not touched by this function.
+ */
+static void shmob_drm_crtc_start(struct shmob_drm_crtc *scrtc)
+{
+	struct drm_crtc *crtc = &scrtc->crtc;
+	struct shmob_drm_device *sdev = crtc->dev->dev_private;
+	const struct shmob_drm_interface_data *idata = &sdev->pdata->iface;
+	const struct shmob_drm_format_info *format;
+	struct drm_device *dev = sdev->ddev;
+	struct drm_plane *plane;
+	u32 value;
+
+	if (scrtc->started)
+		return;
+
+	format = shmob_drm_format_info(crtc->fb->pixel_format);
+	if (WARN_ON(format == NULL))
+		return;
+
+	/* Enable clocks before accessing the hardware. */
+	shmob_drm_clk_on(sdev);
+
+	/* Reset and enable the LCDC. */
+	lcdc_write(sdev, LDCNT2R, lcdc_read(sdev, LDCNT2R) | LDCNT2R_BR);
+	lcdc_wait_bit(sdev, LDCNT2R, LDCNT2R_BR, 0);
+	lcdc_write(sdev, LDCNT2R, LDCNT2R_ME);
+
+	/* Stop the LCDC first and disable all interrupts. */
+	shmob_drm_crtc_start_stop(scrtc, false);
+	lcdc_write(sdev, LDINTR, 0);
+
+	/* Configure power supply, dot clocks and start them. */
+	lcdc_write(sdev, LDPMR, 0);
+
+	value = sdev->lddckr;
+	if (idata->clk_div) {
+		/* FIXME: sh7724 can only use 42, 48, 54 and 60 for the divider
+		 * denominator.
+		 */
+		lcdc_write(sdev, LDDCKPAT1R, 0);
+		lcdc_write(sdev, LDDCKPAT2R, (1 << (idata->clk_div / 2)) - 1);
+
+		if (idata->clk_div == 1)
+			value |= LDDCKR_MOSEL;
+		else
+			value |= idata->clk_div;
+	}
+
+	lcdc_write(sdev, LDDCKR, value);
+	lcdc_write(sdev, LDDCKSTPR, 0);
+	lcdc_wait_bit(sdev, LDDCKSTPR, ~0, 0);
+
+	/* TODO: Setup SYS panel */
+
+	/* Setup geometry, format, frame buffer memory and operation mode. */
+	shmob_drm_crtc_setup_geometry(scrtc);
+
+	/* TODO: Handle YUV colorspaces. Hardcode REC709 for now. */
+	lcdc_write(sdev, LDDFR, format->lddfr | LDDFR_CF1);
+	lcdc_write(sdev, LDMLSR, scrtc->line_size);
+	lcdc_write(sdev, LDSA1R, scrtc->dma[0]);
+	if (format->yuv)
+		lcdc_write(sdev, LDSA2R, scrtc->dma[1]);
+	lcdc_write(sdev, LDSM1R, 0);
+
+	/* Word and long word swap. */
+	switch (format->fourcc) {
+	case DRM_FORMAT_RGB565:
+	case DRM_FORMAT_NV21:
+	case DRM_FORMAT_NV61:
+	case DRM_FORMAT_NV42:
+		value = LDDDSR_LS | LDDDSR_WS;
+		break;
+	case DRM_FORMAT_RGB888:
+	case DRM_FORMAT_NV12:
+	case DRM_FORMAT_NV16:
+	case DRM_FORMAT_NV24:
+		value = LDDDSR_LS | LDDDSR_WS | LDDDSR_BS;
+		break;
+	case DRM_FORMAT_ARGB8888:
+	default:
+		value = LDDDSR_LS;
+		break;
+	}
+	lcdc_write(sdev, LDDDSR, value);
+
+	/* Setup planes. */
+	list_for_each_entry(plane, &dev->mode_config.plane_list, head) {
+		if (plane->crtc == crtc)
+			shmob_drm_plane_setup(plane);
+	}
+
+	/* Enable the display output. */
+	lcdc_write(sdev, LDCNT1R, LDCNT1R_DE);
+
+	shmob_drm_crtc_start_stop(scrtc, true);
+
+	scrtc->started = true;
+}
+
+static void shmob_drm_crtc_stop(struct shmob_drm_crtc *scrtc)
+{
+	struct drm_crtc *crtc = &scrtc->crtc;
+	struct shmob_drm_device *sdev = crtc->dev->dev_private;
+
+	if (!scrtc->started)
+		return;
+
+	/* Disable the MERAM cache. */
+	if (scrtc->cache) {
+		sh_mobile_meram_cache_free(sdev->meram, scrtc->cache);
+		scrtc->cache = NULL;
+	}
+
+	/* Stop the LCDC. */
+	shmob_drm_crtc_start_stop(scrtc, false);
+
+	/* Disable the display output. */
+	lcdc_write(sdev, LDCNT1R, 0);
+
+	/* Stop clocks. */
+	shmob_drm_clk_off(sdev);
+
+	scrtc->started = false;
+}
+
+void shmob_drm_crtc_suspend(struct shmob_drm_crtc *scrtc)
+{
+	shmob_drm_crtc_stop(scrtc);
+}
+
+void shmob_drm_crtc_resume(struct shmob_drm_crtc *scrtc)
+{
+	if (scrtc->dpms != DRM_MODE_DPMS_ON)
+		return;
+
+	shmob_drm_crtc_start(scrtc);
+}
+
+static void shmob_drm_crtc_compute_base(struct shmob_drm_crtc *scrtc,
+					int x, int y)
+{
+	struct drm_crtc *crtc = &scrtc->crtc;
+	struct drm_framebuffer *fb = crtc->fb;
+	struct shmob_drm_device *sdev = crtc->dev->dev_private;
+	struct drm_gem_cma_object *gem;
+	unsigned int bpp;
+
+	bpp = scrtc->format->yuv ? 8 : scrtc->format->bpp;
+	gem = drm_fb_cma_get_gem_obj(fb, 0);
+	scrtc->dma[0] = gem->paddr + fb->offsets[0]
+		      + y * fb->pitches[0] + x * bpp / 8;
+
+	if (scrtc->format->yuv) {
+		bpp = scrtc->format->bpp - 8;
+		gem = drm_fb_cma_get_gem_obj(fb, 1);
+		scrtc->dma[1] = gem->paddr + fb->offsets[1]
+			      + y / (bpp == 4 ? 2 : 1) * fb->pitches[1]
+			      + x * (bpp == 16 ? 2 : 1);
+	}
+
+	if (scrtc->cache)
+		sh_mobile_meram_cache_update(sdev->meram, scrtc->cache,
+					     scrtc->dma[0], scrtc->dma[1],
+					     &scrtc->dma[0], &scrtc->dma[1]);
+}
+
+static void shmob_drm_crtc_update_base(struct shmob_drm_crtc *scrtc)
+{
+	struct drm_crtc *crtc = &scrtc->crtc;
+	struct shmob_drm_device *sdev = crtc->dev->dev_private;
+
+	shmob_drm_crtc_compute_base(scrtc, crtc->x, crtc->y);
+
+	lcdc_write_mirror(sdev, LDSA1R, scrtc->dma[0]);
+	if (scrtc->format->yuv)
+		lcdc_write_mirror(sdev, LDSA2R, scrtc->dma[1]);
+
+	lcdc_write(sdev, LDRCNTR, lcdc_read(sdev, LDRCNTR) ^ LDRCNTR_MRS);
+}
+
+#define to_shmob_crtc(c)	container_of(c, struct shmob_drm_crtc, crtc)
+
+static void shmob_drm_crtc_dpms(struct drm_crtc *crtc, int mode)
+{
+	struct shmob_drm_crtc *scrtc = to_shmob_crtc(crtc);
+
+	if (scrtc->dpms == mode)
+		return;
+
+	if (mode == DRM_MODE_DPMS_ON)
+		shmob_drm_crtc_start(scrtc);
+	else
+		shmob_drm_crtc_stop(scrtc);
+
+	scrtc->dpms = mode;
+}
+
+static bool shmob_drm_crtc_mode_fixup(struct drm_crtc *crtc,
+				      const struct drm_display_mode *mode,
+				      struct drm_display_mode *adjusted_mode)
+{
+	return true;
+}
+
+static void shmob_drm_crtc_mode_prepare(struct drm_crtc *crtc)
+{
+	shmob_drm_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
+}
+
+static int shmob_drm_crtc_mode_set(struct drm_crtc *crtc,
+				   struct drm_display_mode *mode,
+				   struct drm_display_mode *adjusted_mode,
+				   int x, int y,
+				   struct drm_framebuffer *old_fb)
+{
+	struct shmob_drm_crtc *scrtc = to_shmob_crtc(crtc);
+	struct shmob_drm_device *sdev = crtc->dev->dev_private;
+	const struct sh_mobile_meram_cfg *mdata = sdev->pdata->meram;
+	const struct shmob_drm_format_info *format;
+	void *cache;
+
+	format = shmob_drm_format_info(crtc->fb->pixel_format);
+	if (format == NULL) {
+		dev_dbg(sdev->dev, "mode_set: unsupported format %08x\n",
+			crtc->fb->pixel_format);
+		return -EINVAL;
+	}
+
+	scrtc->format = format;
+	scrtc->line_size = crtc->fb->pitches[0];
+
+	if (sdev->meram) {
+		/* Enable MERAM cache if configured. We need to de-init
+		 * configured ICBs before we can re-initialize them.
+		 */
+		if (scrtc->cache) {
+			sh_mobile_meram_cache_free(sdev->meram, scrtc->cache);
+			scrtc->cache = NULL;
+		}
+
+		cache = sh_mobile_meram_cache_alloc(sdev->meram, mdata,
+						    crtc->fb->pitches[0],
+						    adjusted_mode->vdisplay,
+						    format->meram,
+						    &scrtc->line_size);
+		if (!IS_ERR(cache))
+			scrtc->cache = cache;
+	}
+
+	shmob_drm_crtc_compute_base(scrtc, x, y);
+
+	return 0;
+}
+
+static void shmob_drm_crtc_mode_commit(struct drm_crtc *crtc)
+{
+	shmob_drm_crtc_dpms(crtc, DRM_MODE_DPMS_ON);
+}
+
+static int shmob_drm_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
+					struct drm_framebuffer *old_fb)
+{
+	shmob_drm_crtc_update_base(to_shmob_crtc(crtc));
+
+	return 0;
+}
+
+static const struct drm_crtc_helper_funcs crtc_helper_funcs = {
+	.dpms = shmob_drm_crtc_dpms,
+	.mode_fixup = shmob_drm_crtc_mode_fixup,
+	.prepare = shmob_drm_crtc_mode_prepare,
+	.commit = shmob_drm_crtc_mode_commit,
+	.mode_set = shmob_drm_crtc_mode_set,
+	.mode_set_base = shmob_drm_crtc_mode_set_base,
+};
+
+void shmob_drm_crtc_cancel_page_flip(struct shmob_drm_crtc *scrtc,
+				     struct drm_file *file)
+{
+	struct drm_pending_vblank_event *event;
+	struct drm_device *dev = scrtc->crtc.dev;
+	unsigned long flags;
+
+	/* Destroy the pending vertical blanking event associated with the
+	 * pending page flip, if any, and disable vertical blanking interrupts.
+	 */
+	spin_lock_irqsave(&dev->event_lock, flags);
+	event = scrtc->event;
+	if (event && event->base.file_priv == file) {
+		scrtc->event = NULL;
+		event->base.destroy(&event->base);
+		drm_vblank_put(dev, 0);
+	}
+	spin_unlock_irqrestore(&dev->event_lock, flags);
+}
+
+void shmob_drm_crtc_finish_page_flip(struct shmob_drm_crtc *scrtc)
+{
+	struct drm_pending_vblank_event *event;
+	struct drm_device *dev = scrtc->crtc.dev;
+	struct timeval vblanktime;
+	unsigned long flags;
+
+	spin_lock_irqsave(&dev->event_lock, flags);
+	event = scrtc->event;
+	scrtc->event = NULL;
+	spin_unlock_irqrestore(&dev->event_lock, flags);
+
+	if (event == NULL)
+		return;
+
+	event->event.sequence = drm_vblank_count_and_time(dev, 0, &vblanktime);
+	event->event.tv_sec = vblanktime.tv_sec;
+	event->event.tv_usec = vblanktime.tv_usec;
+
+	spin_lock_irqsave(&dev->event_lock, flags);
+	list_add_tail(&event->base.link, &event->base.file_priv->event_list);
+	wake_up_interruptible(&event->base.file_priv->event_wait);
+	spin_unlock_irqrestore(&dev->event_lock, flags);
+
+	drm_vblank_put(dev, 0);
+}
+
+static int shmob_drm_crtc_page_flip(struct drm_crtc *crtc,
+				    struct drm_framebuffer *fb,
+				    struct drm_pending_vblank_event *event)
+{
+	struct shmob_drm_crtc *scrtc = to_shmob_crtc(crtc);
+	struct drm_device *dev = scrtc->crtc.dev;
+	unsigned long flags;
+
+	spin_lock_irqsave(&dev->event_lock, flags);
+	if (scrtc->event != NULL) {
+		spin_unlock_irqrestore(&dev->event_lock, flags);
+		return -EBUSY;
+	}
+	spin_unlock_irqrestore(&dev->event_lock, flags);
+
+	crtc->fb = fb;
+	shmob_drm_crtc_update_base(scrtc);
+
+	if (event) {
+		event->pipe = 0;
+		spin_lock_irqsave(&dev->event_lock, flags);
+		scrtc->event = event;
+		spin_unlock_irqrestore(&dev->event_lock, flags);
+		drm_vblank_get(dev, 0);
+	}
+
+	return 0;
+}
+
+static const struct drm_crtc_funcs crtc_funcs = {
+	.destroy = drm_crtc_cleanup,
+	.set_config = drm_crtc_helper_set_config,
+	.page_flip = shmob_drm_crtc_page_flip,
+};
+
+int shmob_drm_crtc_create(struct shmob_drm_device *sdev)
+{
+	struct drm_crtc *crtc = &sdev->crtc.crtc;
+	int ret;
+
+	sdev->crtc.dpms = DRM_MODE_DPMS_OFF;
+
+	ret = drm_crtc_init(sdev->ddev, crtc, &crtc_funcs);
+	if (ret < 0)
+		return ret;
+
+	drm_crtc_helper_add(crtc, &crtc_helper_funcs);
+
+	return 0;
+}
+
+/* -----------------------------------------------------------------------------
+ * Encoder
+ */
+
+#define to_shmob_encoder(e) \
+	container_of(e, struct shmob_drm_encoder, encoder)
+
+static void shmob_drm_encoder_dpms(struct drm_encoder *encoder, int mode)
+{
+	struct shmob_drm_encoder *senc = to_shmob_encoder(encoder);
+	struct shmob_drm_device *sdev = encoder->dev->dev_private;
+	struct shmob_drm_connector *scon = &sdev->connector;
+
+	if (senc->dpms == mode)
+		return;
+
+	shmob_drm_backlight_dpms(scon, mode);
+
+	senc->dpms = mode;
+}
+
+static bool shmob_drm_encoder_mode_fixup(struct drm_encoder *encoder,
+					 const struct drm_display_mode *mode,
+					 struct drm_display_mode *adjusted_mode)
+{
+	struct drm_device *dev = encoder->dev;
+	struct shmob_drm_device *sdev = dev->dev_private;
+	struct drm_connector *connector = &sdev->connector.connector;
+	const struct drm_display_mode *panel_mode;
+
+	if (list_empty(&connector->modes)) {
+		dev_dbg(dev->dev, "mode_fixup: empty modes list\n");
+		return false;
+	}
+
+	/* The flat panel mode is fixed, just copy it to the adjusted mode. */
+	panel_mode = list_first_entry(&connector->modes,
+				      struct drm_display_mode, head);
+	drm_mode_copy(adjusted_mode, panel_mode);
+
+	return true;
+}
+
+static void shmob_drm_encoder_mode_prepare(struct drm_encoder *encoder)
+{
+	/* No-op, everything is handled in the CRTC code. */
+}
+
+static void shmob_drm_encoder_mode_set(struct drm_encoder *encoder,
+				       struct drm_display_mode *mode,
+				       struct drm_display_mode *adjusted_mode)
+{
+	/* No-op, everything is handled in the CRTC code. */
+}
+
+static void shmob_drm_encoder_mode_commit(struct drm_encoder *encoder)
+{
+	/* No-op, everything is handled in the CRTC code. */
+}
+
+static const struct drm_encoder_helper_funcs encoder_helper_funcs = {
+	.dpms = shmob_drm_encoder_dpms,
+	.mode_fixup = shmob_drm_encoder_mode_fixup,
+	.prepare = shmob_drm_encoder_mode_prepare,
+	.commit = shmob_drm_encoder_mode_commit,
+	.mode_set = shmob_drm_encoder_mode_set,
+};
+
+static void shmob_drm_encoder_destroy(struct drm_encoder *encoder)
+{
+	drm_encoder_cleanup(encoder);
+}
+
+static const struct drm_encoder_funcs encoder_funcs = {
+	.destroy = shmob_drm_encoder_destroy,
+};
+
+int shmob_drm_encoder_create(struct shmob_drm_device *sdev)
+{
+	struct drm_encoder *encoder = &sdev->encoder.encoder;
+	int ret;
+
+	sdev->encoder.dpms = DRM_MODE_DPMS_OFF;
+
+	encoder->possible_crtcs = 1;
+
+	ret = drm_encoder_init(sdev->ddev, encoder, &encoder_funcs,
+			       DRM_MODE_ENCODER_LVDS);
+	if (ret < 0)
+		return ret;
+
+	drm_encoder_helper_add(encoder, &encoder_helper_funcs);
+
+	return 0;
+}
+
+void shmob_drm_crtc_enable_vblank(struct shmob_drm_device *sdev, bool enable)
+{
+	unsigned long flags;
+	u32 ldintr;
+
+	/* Be careful not to acknowledge any pending interrupt. */
+	spin_lock_irqsave(&sdev->irq_lock, flags);
+	ldintr = lcdc_read(sdev, LDINTR) | LDINTR_STATUS_MASK;
+	if (enable)
+		ldintr |= LDINTR_VEE;
+	else
+		ldintr &= ~LDINTR_VEE;
+	lcdc_write(sdev, LDINTR, ldintr);
+	spin_unlock_irqrestore(&sdev->irq_lock, flags);
+}
+
+/* -----------------------------------------------------------------------------
+ * Connector
+ */
+
+#define to_shmob_connector(c) \
+	container_of(c, struct shmob_drm_connector, connector)
+
+static int shmob_drm_connector_get_modes(struct drm_connector *connector)
+{
+	struct shmob_drm_device *sdev = connector->dev->dev_private;
+	struct drm_display_mode *mode;
+
+	mode = drm_mode_create(connector->dev);
+	if (mode == NULL)
+		return 0;
+
+	mode->type = DRM_MODE_TYPE_PREFERRED | DRM_MODE_TYPE_DRIVER;
+	mode->clock = sdev->pdata->panel.mode.clock;
+	mode->hdisplay = sdev->pdata->panel.mode.hdisplay;
+	mode->hsync_start = sdev->pdata->panel.mode.hsync_start;
+	mode->hsync_end = sdev->pdata->panel.mode.hsync_end;
+	mode->htotal = sdev->pdata->panel.mode.htotal;
+	mode->vdisplay = sdev->pdata->panel.mode.vdisplay;
+	mode->vsync_start = sdev->pdata->panel.mode.vsync_start;
+	mode->vsync_end = sdev->pdata->panel.mode.vsync_end;
+	mode->vtotal = sdev->pdata->panel.mode.vtotal;
+	mode->flags = sdev->pdata->panel.mode.flags;
+
+	drm_mode_set_name(mode);
+	drm_mode_probed_add(connector, mode);
+
+	connector->display_info.width_mm = sdev->pdata->panel.width_mm;
+	connector->display_info.height_mm = sdev->pdata->panel.height_mm;
+
+	return 1;
+}
+
+static int shmob_drm_connector_mode_valid(struct drm_connector *connector,
+					  struct drm_display_mode *mode)
+{
+	return MODE_OK;
+}
+
+static struct drm_encoder *
+shmob_drm_connector_best_encoder(struct drm_connector *connector)
+{
+	struct shmob_drm_connector *scon = to_shmob_connector(connector);
+
+	return scon->encoder;
+}
+
+static const struct drm_connector_helper_funcs connector_helper_funcs = {
+	.get_modes = shmob_drm_connector_get_modes,
+	.mode_valid = shmob_drm_connector_mode_valid,
+	.best_encoder = shmob_drm_connector_best_encoder,
+};
+
+static void shmob_drm_connector_destroy(struct drm_connector *connector)
+{
+	struct shmob_drm_connector *scon = to_shmob_connector(connector);
+
+	shmob_drm_backlight_exit(scon);
+	drm_sysfs_connector_remove(connector);
+	drm_connector_cleanup(connector);
+}
+
+static enum drm_connector_status
+shmob_drm_connector_detect(struct drm_connector *connector, bool force)
+{
+	return connector_status_connected;
+}
+
+static const struct drm_connector_funcs connector_funcs = {
+	.dpms = drm_helper_connector_dpms,
+	.detect = shmob_drm_connector_detect,
+	.fill_modes = drm_helper_probe_single_connector_modes,
+	.destroy = shmob_drm_connector_destroy,
+};
+
+int shmob_drm_connector_create(struct shmob_drm_device *sdev,
+			       struct drm_encoder *encoder)
+{
+	struct drm_connector *connector = &sdev->connector.connector;
+	int ret;
+
+	sdev->connector.encoder = encoder;
+
+	connector->display_info.width_mm = sdev->pdata->panel.width_mm;
+	connector->display_info.height_mm = sdev->pdata->panel.height_mm;
+
+	ret = drm_connector_init(sdev->ddev, connector, &connector_funcs,
+				 DRM_MODE_CONNECTOR_LVDS);
+	if (ret < 0)
+		return ret;
+
+	drm_connector_helper_add(connector, &connector_helper_funcs);
+	ret = drm_sysfs_connector_add(connector);
+	if (ret < 0)
+		goto err_cleanup;
+
+	ret = shmob_drm_backlight_init(&sdev->connector);
+	if (ret < 0)
+		goto err_sysfs;
+
+	ret = drm_mode_connector_attach_encoder(connector, encoder);
+	if (ret < 0)
+		goto err_backlight;
+
+	connector->encoder = encoder;
+
+	drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
+	drm_connector_property_set_value(connector,
+		sdev->ddev->mode_config.dpms_property, DRM_MODE_DPMS_OFF);
+
+	return 0;
+
+err_backlight:
+	shmob_drm_backlight_exit(&sdev->connector);
+err_sysfs:
+	drm_sysfs_connector_remove(connector);
+err_cleanup:
+	drm_connector_cleanup(connector);
+	return ret;
+}
diff --git a/drivers/gpu/drm/shmobile/shmob_drm_crtc.h b/drivers/gpu/drm/shmobile/shmob_drm_crtc.h
new file mode 100644
index 000000000000..e5bd109c4c38
--- /dev/null
+++ b/drivers/gpu/drm/shmobile/shmob_drm_crtc.h
@@ -0,0 +1,60 @@
+/*
+ * shmob_drm_crtc.h  --  SH Mobile DRM CRTCs
+ *
+ * Copyright (C) 2012 Renesas Corporation
+ *
+ * Laurent Pinchart (laurent.pinchart@ideasonboard.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __SHMOB_DRM_CRTC_H__
+#define __SHMOB_DRM_CRTC_H__
+
+#include <drm/drmP.h>
+#include <drm/drm_crtc.h>
+
+struct backlight_device;
+struct shmob_drm_device;
+
+struct shmob_drm_crtc {
+	struct drm_crtc crtc;
+
+	struct drm_pending_vblank_event *event;
+	int dpms;
+
+	const struct shmob_drm_format_info *format;
+	void *cache;
+	unsigned long dma[2];
+	unsigned int line_size;
+	bool started;
+};
+
+struct shmob_drm_encoder {
+	struct drm_encoder encoder;
+	int dpms;
+};
+
+struct shmob_drm_connector {
+	struct drm_connector connector;
+	struct drm_encoder *encoder;
+
+	struct backlight_device *backlight;
+};
+
+int shmob_drm_crtc_create(struct shmob_drm_device *sdev);
+void shmob_drm_crtc_enable_vblank(struct shmob_drm_device *sdev, bool enable);
+void shmob_drm_crtc_cancel_page_flip(struct shmob_drm_crtc *scrtc,
+				     struct drm_file *file);
+void shmob_drm_crtc_finish_page_flip(struct shmob_drm_crtc *scrtc);
+void shmob_drm_crtc_suspend(struct shmob_drm_crtc *scrtc);
+void shmob_drm_crtc_resume(struct shmob_drm_crtc *scrtc);
+
+int shmob_drm_encoder_create(struct shmob_drm_device *sdev);
+int shmob_drm_connector_create(struct shmob_drm_device *sdev,
+			       struct drm_encoder *encoder);
+
+#endif /* __SHMOB_DRM_CRTC_H__ */
diff --git a/drivers/gpu/drm/shmobile/shmob_drm_drv.c b/drivers/gpu/drm/shmobile/shmob_drm_drv.c
new file mode 100644
index 000000000000..c71d493fd0c5
--- /dev/null
+++ b/drivers/gpu/drm/shmobile/shmob_drm_drv.c
@@ -0,0 +1,361 @@
+/*
+ * shmob_drm_drv.c  --  SH Mobile DRM driver
+ *
+ * Copyright (C) 2012 Renesas Corporation
+ *
+ * Laurent Pinchart (laurent.pinchart@ideasonboard.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/pm.h>
+#include <linux/slab.h>
+
+#include <drm/drmP.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_gem_cma_helper.h>
+
+#include "shmob_drm_crtc.h"
+#include "shmob_drm_drv.h"
+#include "shmob_drm_kms.h"
+#include "shmob_drm_plane.h"
+#include "shmob_drm_regs.h"
+
+/* -----------------------------------------------------------------------------
+ * Hardware initialization
+ */
+
+static int __devinit shmob_drm_init_interface(struct shmob_drm_device *sdev)
+{
+	static const u32 ldmt1r[] = {
+		[SHMOB_DRM_IFACE_RGB8] = LDMT1R_MIFTYP_RGB8,
+		[SHMOB_DRM_IFACE_RGB9] = LDMT1R_MIFTYP_RGB9,
+		[SHMOB_DRM_IFACE_RGB12A] = LDMT1R_MIFTYP_RGB12A,
+		[SHMOB_DRM_IFACE_RGB12B] = LDMT1R_MIFTYP_RGB12B,
+		[SHMOB_DRM_IFACE_RGB16] = LDMT1R_MIFTYP_RGB16,
+		[SHMOB_DRM_IFACE_RGB18] = LDMT1R_MIFTYP_RGB18,
+		[SHMOB_DRM_IFACE_RGB24] = LDMT1R_MIFTYP_RGB24,
+		[SHMOB_DRM_IFACE_YUV422] = LDMT1R_MIFTYP_YCBCR,
+		[SHMOB_DRM_IFACE_SYS8A] = LDMT1R_IFM | LDMT1R_MIFTYP_SYS8A,
+		[SHMOB_DRM_IFACE_SYS8B] = LDMT1R_IFM | LDMT1R_MIFTYP_SYS8B,
+		[SHMOB_DRM_IFACE_SYS8C] = LDMT1R_IFM | LDMT1R_MIFTYP_SYS8C,
+		[SHMOB_DRM_IFACE_SYS8D] = LDMT1R_IFM | LDMT1R_MIFTYP_SYS8D,
+		[SHMOB_DRM_IFACE_SYS9] = LDMT1R_IFM | LDMT1R_MIFTYP_SYS9,
+		[SHMOB_DRM_IFACE_SYS12] = LDMT1R_IFM | LDMT1R_MIFTYP_SYS12,
+		[SHMOB_DRM_IFACE_SYS16A] = LDMT1R_IFM | LDMT1R_MIFTYP_SYS16A,
+		[SHMOB_DRM_IFACE_SYS16B] = LDMT1R_IFM | LDMT1R_MIFTYP_SYS16B,
+		[SHMOB_DRM_IFACE_SYS16C] = LDMT1R_IFM | LDMT1R_MIFTYP_SYS16C,
+		[SHMOB_DRM_IFACE_SYS18] = LDMT1R_IFM | LDMT1R_MIFTYP_SYS18,
+		[SHMOB_DRM_IFACE_SYS24] = LDMT1R_IFM | LDMT1R_MIFTYP_SYS24,
+	};
+
+	if (sdev->pdata->iface.interface >= ARRAY_SIZE(ldmt1r)) {
+		dev_err(sdev->dev, "invalid interface type %u\n",
+			sdev->pdata->iface.interface);
+		return -EINVAL;
+	}
+
+	sdev->ldmt1r = ldmt1r[sdev->pdata->iface.interface];
+	return 0;
+}
+
+static int __devinit shmob_drm_setup_clocks(struct shmob_drm_device *sdev,
+					    enum shmob_drm_clk_source clksrc)
+{
+	struct clk *clk;
+	char *clkname;
+
+	switch (clksrc) {
+	case SHMOB_DRM_CLK_BUS:
+		clkname = "bus_clk";
+		sdev->lddckr = LDDCKR_ICKSEL_BUS;
+		break;
+	case SHMOB_DRM_CLK_PERIPHERAL:
+		clkname = "peripheral_clk";
+		sdev->lddckr = LDDCKR_ICKSEL_MIPI;
+		break;
+	case SHMOB_DRM_CLK_EXTERNAL:
+		clkname = NULL;
+		sdev->lddckr = LDDCKR_ICKSEL_HDMI;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	clk = clk_get(sdev->dev, clkname);
+	if (IS_ERR(clk)) {
+		dev_err(sdev->dev, "cannot get dot clock %s\n", clkname);
+		return PTR_ERR(clk);
+	}
+
+	sdev->clock = clk;
+	return 0;
+}
+
+/* -----------------------------------------------------------------------------
+ * DRM operations
+ */
+
+static int shmob_drm_unload(struct drm_device *dev)
+{
+	struct shmob_drm_device *sdev = dev->dev_private;
+
+	drm_kms_helper_poll_fini(dev);
+	drm_mode_config_cleanup(dev);
+	drm_vblank_cleanup(dev);
+	drm_irq_uninstall(dev);
+
+	if (sdev->clock)
+		clk_put(sdev->clock);
+
+	if (sdev->mmio)
+		iounmap(sdev->mmio);
+
+	dev->dev_private = NULL;
+	kfree(sdev);
+
+	return 0;
+}
+
+static int shmob_drm_load(struct drm_device *dev, unsigned long flags)
+{
+	struct shmob_drm_platform_data *pdata = dev->dev->platform_data;
+	struct platform_device *pdev = dev->platformdev;
+	struct shmob_drm_device *sdev;
+	struct resource *res;
+	unsigned int i;
+	int ret;
+
+	if (pdata == NULL) {
+		dev_err(dev->dev, "no platform data\n");
+		return -EINVAL;
+	}
+
+	sdev = kzalloc(sizeof(*sdev), GFP_KERNEL);
+	if (sdev == NULL) {
+		dev_err(dev->dev, "failed to allocate private data\n");
+		return -ENOMEM;
+	}
+
+	sdev->dev = &pdev->dev;
+	sdev->pdata = pdata;
+	spin_lock_init(&sdev->irq_lock);
+
+	sdev->ddev = dev;
+	dev->dev_private = sdev;
+
+	/* I/O resources and clocks */
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (res == NULL) {
+		dev_err(&pdev->dev, "failed to get memory resource\n");
+		ret = -EINVAL;
+		goto done;
+	}
+
+	sdev->mmio = ioremap_nocache(res->start, resource_size(res));
+	if (sdev->mmio == NULL) {
+		dev_err(&pdev->dev, "failed to remap memory resource\n");
+		ret = -ENOMEM;
+		goto done;
+	}
+
+	ret = shmob_drm_setup_clocks(sdev, pdata->clk_source);
+	if (ret < 0)
+		goto done;
+
+	ret = shmob_drm_init_interface(sdev);
+	if (ret < 0)
+		goto done;
+
+	ret = shmob_drm_modeset_init(sdev);
+	if (ret < 0) {
+		dev_err(&pdev->dev, "failed to initialize mode setting\n");
+		goto done;
+	}
+
+	for (i = 0; i < 4; ++i) {
+		ret = shmob_drm_plane_create(sdev, i);
+		if (ret < 0) {
+			dev_err(&pdev->dev, "failed to create plane %u\n", i);
+			goto done;
+		}
+	}
+
+	ret = drm_vblank_init(dev, 1);
+	if (ret < 0) {
+		dev_err(&pdev->dev, "failed to initialize vblank\n");
+		goto done;
+	}
+
+	ret = drm_irq_install(dev);
+	if (ret < 0) {
+		dev_err(&pdev->dev, "failed to install IRQ handler\n");
+		goto done;
+	}
+
+done:
+	if (ret)
+		shmob_drm_unload(dev);
+
+	return ret;
+}
+
+static void shmob_drm_preclose(struct drm_device *dev, struct drm_file *file)
+{
+	struct shmob_drm_device *sdev = dev->dev_private;
+
+	shmob_drm_crtc_cancel_page_flip(&sdev->crtc, file);
+}
+
+static irqreturn_t shmob_drm_irq(int irq, void *arg)
+{
+	struct drm_device *dev = arg;
+	struct shmob_drm_device *sdev = dev->dev_private;
+	unsigned long flags;
+	u32 status;
+
+	/* Acknowledge interrupts. Putting interrupt enable and interrupt flag
+	 * bits in the same register is really brain-dead design and requires
+	 * taking a spinlock.
+	 */
+	spin_lock_irqsave(&sdev->irq_lock, flags);
+	status = lcdc_read(sdev, LDINTR);
+	lcdc_write(sdev, LDINTR, status ^ LDINTR_STATUS_MASK);
+	spin_unlock_irqrestore(&sdev->irq_lock, flags);
+
+	if (status & LDINTR_VES) {
+		drm_handle_vblank(dev, 0);
+		shmob_drm_crtc_finish_page_flip(&sdev->crtc);
+	}
+
+	return IRQ_HANDLED;
+}
+
+static int shmob_drm_enable_vblank(struct drm_device *dev, int crtc)
+{
+	struct shmob_drm_device *sdev = dev->dev_private;
+
+	shmob_drm_crtc_enable_vblank(sdev, true);
+
+	return 0;
+}
+
+static void shmob_drm_disable_vblank(struct drm_device *dev, int crtc)
+{
+	struct shmob_drm_device *sdev = dev->dev_private;
+
+	shmob_drm_crtc_enable_vblank(sdev, false);
+}
+
+static const struct file_operations shmob_drm_fops = {
+	.owner		= THIS_MODULE,
+	.open		= drm_open,
+	.release	= drm_release,
+	.unlocked_ioctl	= drm_ioctl,
+#ifdef CONFIG_COMPAT
+	.compat_ioctl	= drm_compat_ioctl,
+#endif
+	.poll		= drm_poll,
+	.read		= drm_read,
+	.fasync		= drm_fasync,
+	.llseek		= no_llseek,
+	.mmap		= drm_gem_cma_mmap,
+};
+
+static struct drm_driver shmob_drm_driver = {
+	.driver_features	= DRIVER_HAVE_IRQ | DRIVER_GEM | DRIVER_MODESET,
+	.load			= shmob_drm_load,
+	.unload			= shmob_drm_unload,
+	.preclose		= shmob_drm_preclose,
+	.irq_handler		= shmob_drm_irq,
+	.get_vblank_counter	= drm_vblank_count,
+	.enable_vblank		= shmob_drm_enable_vblank,
+	.disable_vblank		= shmob_drm_disable_vblank,
+	.gem_free_object	= drm_gem_cma_free_object,
+	.gem_vm_ops		= &drm_gem_cma_vm_ops,
+	.dumb_create		= drm_gem_cma_dumb_create,
+	.dumb_map_offset	= drm_gem_cma_dumb_map_offset,
+	.dumb_destroy		= drm_gem_cma_dumb_destroy,
+	.fops			= &shmob_drm_fops,
+	.name			= "shmob-drm",
+	.desc			= "Renesas SH Mobile DRM",
+	.date			= "20120424",
+	.major			= 1,
+	.minor			= 0,
+};
+
+/* -----------------------------------------------------------------------------
+ * Power management
+ */
+
+#if CONFIG_PM_SLEEP
+static int shmob_drm_pm_suspend(struct device *dev)
+{
+	struct platform_device *pdev = to_platform_device(dev);
+	struct drm_device *ddev = platform_get_drvdata(pdev);
+	struct shmob_drm_device *sdev = ddev->dev_private;
+
+	drm_kms_helper_poll_disable(ddev);
+	shmob_drm_crtc_suspend(&sdev->crtc);
+
+	return 0;
+}
+
+static int shmob_drm_pm_resume(struct device *dev)
+{
+	struct platform_device *pdev = to_platform_device(dev);
+	struct drm_device *ddev = platform_get_drvdata(pdev);
+	struct shmob_drm_device *sdev = ddev->dev_private;
+
+	mutex_lock(&sdev->ddev->mode_config.mutex);
+	shmob_drm_crtc_resume(&sdev->crtc);
+	mutex_unlock(&sdev->ddev->mode_config.mutex);
+
+	drm_kms_helper_poll_enable(sdev->ddev);
+	return 0;
+}
+#endif
+
+static const struct dev_pm_ops shmob_drm_pm_ops = {
+	SET_SYSTEM_SLEEP_PM_OPS(shmob_drm_pm_suspend, shmob_drm_pm_resume)
+};
+
+/* -----------------------------------------------------------------------------
+ * Platform driver
+ */
+
+static int __devinit shmob_drm_probe(struct platform_device *pdev)
+{
+	return drm_platform_init(&shmob_drm_driver, pdev);
+}
+
+static int __devexit shmob_drm_remove(struct platform_device *pdev)
+{
+	drm_platform_exit(&shmob_drm_driver, pdev);
+
+	return 0;
+}
+
+static struct platform_driver shmob_drm_platform_driver = {
+	.probe		= shmob_drm_probe,
+	.remove		= __devexit_p(shmob_drm_remove),
+	.driver		= {
+		.owner	= THIS_MODULE,
+		.name	= "shmob-drm",
+		.pm	= &shmob_drm_pm_ops,
+	},
+};
+
+module_platform_driver(shmob_drm_platform_driver);
+
+MODULE_AUTHOR("Laurent Pinchart <laurent.pinchart@ideasonboard.com>");
+MODULE_DESCRIPTION("Renesas SH Mobile DRM Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/shmobile/shmob_drm_drv.h b/drivers/gpu/drm/shmobile/shmob_drm_drv.h
new file mode 100644
index 000000000000..4d46b811b5a7
--- /dev/null
+++ b/drivers/gpu/drm/shmobile/shmob_drm_drv.h
@@ -0,0 +1,47 @@
+/*
+ * shmob_drm.h  --  SH Mobile DRM driver
+ *
+ * Copyright (C) 2012 Renesas Corporation
+ *
+ * Laurent Pinchart (laurent.pinchart@ideasonboard.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __SHMOB_DRM_DRV_H__
+#define __SHMOB_DRM_DRV_H__
+
+#include <linux/kernel.h>
+#include <linux/platform_data/shmob_drm.h>
+#include <linux/spinlock.h>
+
+#include "shmob_drm_crtc.h"
+
+struct clk;
+struct device;
+struct drm_device;
+struct sh_mobile_meram_info;
+
+struct shmob_drm_device {
+	struct device *dev;
+	const struct shmob_drm_platform_data *pdata;
+
+	void __iomem *mmio;
+	struct clk *clock;
+	struct sh_mobile_meram_info *meram;
+	u32 lddckr;
+	u32 ldmt1r;
+
+	spinlock_t irq_lock;		/* Protects hardware LDINTR register */
+
+	struct drm_device *ddev;
+
+	struct shmob_drm_crtc crtc;
+	struct shmob_drm_encoder encoder;
+	struct shmob_drm_connector connector;
+};
+
+#endif /* __SHMOB_DRM_DRV_H__ */
diff --git a/drivers/gpu/drm/shmobile/shmob_drm_kms.c b/drivers/gpu/drm/shmobile/shmob_drm_kms.c
new file mode 100644
index 000000000000..c291ee385b4f
--- /dev/null
+++ b/drivers/gpu/drm/shmobile/shmob_drm_kms.c
@@ -0,0 +1,160 @@
+/*
+ * shmob_drm_kms.c  --  SH Mobile DRM Mode Setting
+ *
+ * Copyright (C) 2012 Renesas Corporation
+ *
+ * Laurent Pinchart (laurent.pinchart@ideasonboard.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <drm/drmP.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_gem_cma_helper.h>
+
+#include <video/sh_mobile_meram.h>
+
+#include "shmob_drm_crtc.h"
+#include "shmob_drm_drv.h"
+#include "shmob_drm_kms.h"
+#include "shmob_drm_regs.h"
+
+/* -----------------------------------------------------------------------------
+ * Format helpers
+ */
+
+static const struct shmob_drm_format_info shmob_drm_format_infos[] = {
+	{
+		.fourcc = DRM_FORMAT_RGB565,
+		.bpp = 16,
+		.yuv = false,
+		.lddfr = LDDFR_PKF_RGB16,
+		.meram = SH_MOBILE_MERAM_PF_RGB,
+	}, {
+		.fourcc = DRM_FORMAT_RGB888,
+		.bpp = 24,
+		.yuv = false,
+		.lddfr = LDDFR_PKF_RGB24,
+		.meram = SH_MOBILE_MERAM_PF_RGB,
+	}, {
+		.fourcc = DRM_FORMAT_ARGB8888,
+		.bpp = 32,
+		.yuv = false,
+		.lddfr = LDDFR_PKF_ARGB32,
+		.meram = SH_MOBILE_MERAM_PF_RGB,
+	}, {
+		.fourcc = DRM_FORMAT_NV12,
+		.bpp = 12,
+		.yuv = true,
+		.lddfr = LDDFR_CC | LDDFR_YF_420,
+		.meram = SH_MOBILE_MERAM_PF_NV,
+	}, {
+		.fourcc = DRM_FORMAT_NV21,
+		.bpp = 12,
+		.yuv = true,
+		.lddfr = LDDFR_CC | LDDFR_YF_420,
+		.meram = SH_MOBILE_MERAM_PF_NV,
+	}, {
+		.fourcc = DRM_FORMAT_NV16,
+		.bpp = 16,
+		.yuv = true,
+		.lddfr = LDDFR_CC | LDDFR_YF_422,
+		.meram = SH_MOBILE_MERAM_PF_NV,
+	}, {
+		.fourcc = DRM_FORMAT_NV61,
+		.bpp = 16,
+		.yuv = true,
+		.lddfr = LDDFR_CC | LDDFR_YF_422,
+		.meram = SH_MOBILE_MERAM_PF_NV,
+	}, {
+		.fourcc = DRM_FORMAT_NV24,
+		.bpp = 24,
+		.yuv = true,
+		.lddfr = LDDFR_CC | LDDFR_YF_444,
+		.meram = SH_MOBILE_MERAM_PF_NV24,
+	}, {
+		.fourcc = DRM_FORMAT_NV42,
+		.bpp = 24,
+		.yuv = true,
+		.lddfr = LDDFR_CC | LDDFR_YF_444,
+		.meram = SH_MOBILE_MERAM_PF_NV24,
+	},
+};
+
+const struct shmob_drm_format_info *shmob_drm_format_info(u32 fourcc)
+{
+	unsigned int i;
+
+	for (i = 0; i < ARRAY_SIZE(shmob_drm_format_infos); ++i) {
+		if (shmob_drm_format_infos[i].fourcc == fourcc)
+			return &shmob_drm_format_infos[i];
+	}
+
+	return NULL;
+}
+
+/* -----------------------------------------------------------------------------
+ * Frame buffer
+ */
+
+static struct drm_framebuffer *
+shmob_drm_fb_create(struct drm_device *dev, struct drm_file *file_priv,
+		    struct drm_mode_fb_cmd2 *mode_cmd)
+{
+	const struct shmob_drm_format_info *format;
+
+	format = shmob_drm_format_info(mode_cmd->pixel_format);
+	if (format == NULL) {
+		dev_dbg(dev->dev, "unsupported pixel format %08x\n",
+			mode_cmd->pixel_format);
+		return ERR_PTR(-EINVAL);
+	}
+
+	if (mode_cmd->pitches[0] & 7 || mode_cmd->pitches[0] >= 65536) {
+		dev_dbg(dev->dev, "valid pitch value %u\n",
+			mode_cmd->pitches[0]);
+		return ERR_PTR(-EINVAL);
+	}
+
+	if (format->yuv) {
+		unsigned int chroma_cpp = format->bpp == 24 ? 2 : 1;
+
+		if (mode_cmd->pitches[1] != mode_cmd->pitches[0] * chroma_cpp) {
+			dev_dbg(dev->dev,
+				"luma and chroma pitches do not match\n");
+			return ERR_PTR(-EINVAL);
+		}
+	}
+
+	return drm_fb_cma_create(dev, file_priv, mode_cmd);
+}
+
+static const struct drm_mode_config_funcs shmob_drm_mode_config_funcs = {
+	.fb_create = shmob_drm_fb_create,
+};
+
+int shmob_drm_modeset_init(struct shmob_drm_device *sdev)
+{
+	drm_mode_config_init(sdev->ddev);
+
+	shmob_drm_crtc_create(sdev);
+	shmob_drm_encoder_create(sdev);
+	shmob_drm_connector_create(sdev, &sdev->encoder.encoder);
+
+	drm_kms_helper_poll_init(sdev->ddev);
+
+	sdev->ddev->mode_config.min_width = 0;
+	sdev->ddev->mode_config.min_height = 0;
+	sdev->ddev->mode_config.max_width = 4095;
+	sdev->ddev->mode_config.max_height = 4095;
+	sdev->ddev->mode_config.funcs = &shmob_drm_mode_config_funcs;
+
+	drm_helper_disable_unused_functions(sdev->ddev);
+
+	return 0;
+}
diff --git a/drivers/gpu/drm/shmobile/shmob_drm_kms.h b/drivers/gpu/drm/shmobile/shmob_drm_kms.h
new file mode 100644
index 000000000000..9495c9111308
--- /dev/null
+++ b/drivers/gpu/drm/shmobile/shmob_drm_kms.h
@@ -0,0 +1,34 @@
+/*
+ * shmob_drm_kms.h  --  SH Mobile DRM Mode Setting
+ *
+ * Copyright (C) 2012 Renesas Corporation
+ *
+ * Laurent Pinchart (laurent.pinchart@ideasonboard.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __SHMOB_DRM_KMS_H__
+#define __SHMOB_DRM_KMS_H__
+
+#include <linux/types.h>
+
+struct drm_gem_cma_object;
+struct shmob_drm_device;
+
+struct shmob_drm_format_info {
+	u32 fourcc;
+	unsigned int bpp;
+	bool yuv;
+	u32 lddfr;
+	unsigned int meram;
+};
+
+const struct shmob_drm_format_info *shmob_drm_format_info(u32 fourcc);
+
+int shmob_drm_modeset_init(struct shmob_drm_device *sdev);
+
+#endif /* __SHMOB_DRM_KMS_H__ */
diff --git a/drivers/gpu/drm/shmobile/shmob_drm_plane.c b/drivers/gpu/drm/shmobile/shmob_drm_plane.c
new file mode 100644
index 000000000000..e1eb899b0288
--- /dev/null
+++ b/drivers/gpu/drm/shmobile/shmob_drm_plane.c
@@ -0,0 +1,268 @@
+/*
+ * shmob_drm_plane.c  --  SH Mobile DRM Planes
+ *
+ * Copyright (C) 2012 Renesas Corporation
+ *
+ * Laurent Pinchart (laurent.pinchart@ideasonboard.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <drm/drmP.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_gem_cma_helper.h>
+
+#include <video/sh_mobile_meram.h>
+
+#include "shmob_drm_drv.h"
+#include "shmob_drm_kms.h"
+#include "shmob_drm_plane.h"
+#include "shmob_drm_regs.h"
+
+struct shmob_drm_plane {
+	struct drm_plane plane;
+	unsigned int index;
+	unsigned int alpha;
+
+	const struct shmob_drm_format_info *format;
+	unsigned long dma[2];
+
+	unsigned int src_x;
+	unsigned int src_y;
+	unsigned int crtc_x;
+	unsigned int crtc_y;
+	unsigned int crtc_w;
+	unsigned int crtc_h;
+};
+
+#define to_shmob_plane(p)	container_of(p, struct shmob_drm_plane, plane)
+
+static void shmob_drm_plane_compute_base(struct shmob_drm_plane *splane,
+					 struct drm_framebuffer *fb,
+					 int x, int y)
+{
+	struct drm_gem_cma_object *gem;
+	unsigned int bpp;
+
+	bpp = splane->format->yuv ? 8 : splane->format->bpp;
+	gem = drm_fb_cma_get_gem_obj(fb, 0);
+	splane->dma[0] = gem->paddr + fb->offsets[0]
+		       + y * fb->pitches[0] + x * bpp / 8;
+
+	if (splane->format->yuv) {
+		bpp = splane->format->bpp - 8;
+		gem = drm_fb_cma_get_gem_obj(fb, 1);
+		splane->dma[1] = gem->paddr + fb->offsets[1]
+			       + y / (bpp == 4 ? 2 : 1) * fb->pitches[1]
+			       + x * (bpp == 16 ? 2 : 1);
+	}
+}
+
+static void __shmob_drm_plane_setup(struct shmob_drm_plane *splane,
+				    struct drm_framebuffer *fb)
+{
+	struct shmob_drm_device *sdev = splane->plane.dev->dev_private;
+	u32 format;
+
+	/* TODO: Support ROP3 mode */
+	format = LDBBSIFR_EN | (splane->alpha << LDBBSIFR_LAY_SHIFT);
+
+	switch (splane->format->fourcc) {
+	case DRM_FORMAT_RGB565:
+	case DRM_FORMAT_NV21:
+	case DRM_FORMAT_NV61:
+	case DRM_FORMAT_NV42:
+		format |= LDBBSIFR_SWPL | LDBBSIFR_SWPW;
+		break;
+	case DRM_FORMAT_RGB888:
+	case DRM_FORMAT_NV12:
+	case DRM_FORMAT_NV16:
+	case DRM_FORMAT_NV24:
+		format |= LDBBSIFR_SWPL | LDBBSIFR_SWPW | LDBBSIFR_SWPB;
+		break;
+	case DRM_FORMAT_ARGB8888:
+	default:
+		format |= LDBBSIFR_SWPL;
+		break;
+	}
+
+	switch (splane->format->fourcc) {
+	case DRM_FORMAT_RGB565:
+		format |= LDBBSIFR_AL_1 | LDBBSIFR_RY | LDBBSIFR_RPKF_RGB16;
+		break;
+	case DRM_FORMAT_RGB888:
+		format |= LDBBSIFR_AL_1 | LDBBSIFR_RY | LDBBSIFR_RPKF_RGB24;
+		break;
+	case DRM_FORMAT_ARGB8888:
+		format |= LDBBSIFR_AL_PK | LDBBSIFR_RY | LDDFR_PKF_ARGB32;
+		break;
+	case DRM_FORMAT_NV12:
+	case DRM_FORMAT_NV21:
+		format |= LDBBSIFR_AL_1 | LDBBSIFR_CHRR_420;
+		break;
+	case DRM_FORMAT_NV16:
+	case DRM_FORMAT_NV61:
+		format |= LDBBSIFR_AL_1 | LDBBSIFR_CHRR_422;
+		break;
+	case DRM_FORMAT_NV24:
+	case DRM_FORMAT_NV42:
+		format |= LDBBSIFR_AL_1 | LDBBSIFR_CHRR_444;
+		break;
+	}
+
+#define plane_reg_dump(sdev, splane, reg) \
+	dev_dbg(sdev->ddev->dev, "%s(%u): %s 0x%08x 0x%08x\n", __func__, \
+		splane->index, #reg, \
+		lcdc_read(sdev, reg(splane->index)), \
+		lcdc_read(sdev, reg(splane->index) + LCDC_SIDE_B_OFFSET))
+
+	plane_reg_dump(sdev, splane, LDBnBSIFR);
+	plane_reg_dump(sdev, splane, LDBnBSSZR);
+	plane_reg_dump(sdev, splane, LDBnBLOCR);
+	plane_reg_dump(sdev, splane, LDBnBSMWR);
+	plane_reg_dump(sdev, splane, LDBnBSAYR);
+	plane_reg_dump(sdev, splane, LDBnBSACR);
+
+	lcdc_write(sdev, LDBCR, LDBCR_UPC(splane->index));
+	dev_dbg(sdev->ddev->dev, "%s(%u): %s 0x%08x\n", __func__, splane->index,
+		"LDBCR", lcdc_read(sdev, LDBCR));
+
+	lcdc_write(sdev, LDBnBSIFR(splane->index), format);
+
+	lcdc_write(sdev, LDBnBSSZR(splane->index),
+		   (splane->crtc_h << LDBBSSZR_BVSS_SHIFT) |
+		   (splane->crtc_w << LDBBSSZR_BHSS_SHIFT));
+	lcdc_write(sdev, LDBnBLOCR(splane->index),
+		   (splane->crtc_y << LDBBLOCR_CVLC_SHIFT) |
+		   (splane->crtc_x << LDBBLOCR_CHLC_SHIFT));
+	lcdc_write(sdev, LDBnBSMWR(splane->index),
+		   fb->pitches[0] << LDBBSMWR_BSMW_SHIFT);
+
+	shmob_drm_plane_compute_base(splane, fb, splane->src_x, splane->src_y);
+
+	lcdc_write(sdev, LDBnBSAYR(splane->index), splane->dma[0]);
+	if (splane->format->yuv)
+		lcdc_write(sdev, LDBnBSACR(splane->index), splane->dma[1]);
+
+	lcdc_write(sdev, LDBCR,
+		   LDBCR_UPF(splane->index) | LDBCR_UPD(splane->index));
+	dev_dbg(sdev->ddev->dev, "%s(%u): %s 0x%08x\n", __func__, splane->index,
+		"LDBCR", lcdc_read(sdev, LDBCR));
+
+	plane_reg_dump(sdev, splane, LDBnBSIFR);
+	plane_reg_dump(sdev, splane, LDBnBSSZR);
+	plane_reg_dump(sdev, splane, LDBnBLOCR);
+	plane_reg_dump(sdev, splane, LDBnBSMWR);
+	plane_reg_dump(sdev, splane, LDBnBSAYR);
+	plane_reg_dump(sdev, splane, LDBnBSACR);
+}
+
+void shmob_drm_plane_setup(struct drm_plane *plane)
+{
+	struct shmob_drm_plane *splane = to_shmob_plane(plane);
+
+	if (plane->fb == NULL || !plane->enabled)
+		return;
+
+	__shmob_drm_plane_setup(splane, plane->fb);
+}
+
+static int
+shmob_drm_plane_update(struct drm_plane *plane, struct drm_crtc *crtc,
+		       struct drm_framebuffer *fb, int crtc_x, int crtc_y,
+		       unsigned int crtc_w, unsigned int crtc_h,
+		       uint32_t src_x, uint32_t src_y,
+		       uint32_t src_w, uint32_t src_h)
+{
+	struct shmob_drm_plane *splane = to_shmob_plane(plane);
+	struct shmob_drm_device *sdev = plane->dev->dev_private;
+	const struct shmob_drm_format_info *format;
+
+	format = shmob_drm_format_info(fb->pixel_format);
+	if (format == NULL) {
+		dev_dbg(sdev->dev, "update_plane: unsupported format %08x\n",
+			fb->pixel_format);
+		return -EINVAL;
+	}
+
+	if (src_w >> 16 != crtc_w || src_h >> 16 != crtc_h) {
+		dev_dbg(sdev->dev, "%s: scaling not supported\n", __func__);
+		return -EINVAL;
+	}
+
+	splane->format = format;
+
+	splane->src_x = src_x >> 16;
+	splane->src_y = src_y >> 16;
+	splane->crtc_x = crtc_x;
+	splane->crtc_y = crtc_y;
+	splane->crtc_w = crtc_w;
+	splane->crtc_h = crtc_h;
+
+	__shmob_drm_plane_setup(splane, fb);
+	return 0;
+}
+
+static int shmob_drm_plane_disable(struct drm_plane *plane)
+{
+	struct shmob_drm_plane *splane = to_shmob_plane(plane);
+	struct shmob_drm_device *sdev = plane->dev->dev_private;
+
+	splane->format = NULL;
+
+	lcdc_write(sdev, LDBnBSIFR(splane->index), 0);
+	return 0;
+}
+
+static void shmob_drm_plane_destroy(struct drm_plane *plane)
+{
+	struct shmob_drm_plane *splane = to_shmob_plane(plane);
+
+	shmob_drm_plane_disable(plane);
+	drm_plane_cleanup(plane);
+	kfree(splane);
+}
+
+static const struct drm_plane_funcs shmob_drm_plane_funcs = {
+	.update_plane = shmob_drm_plane_update,
+	.disable_plane = shmob_drm_plane_disable,
+	.destroy = shmob_drm_plane_destroy,
+};
+
+static const uint32_t formats[] = {
+	DRM_FORMAT_RGB565,
+	DRM_FORMAT_RGB888,
+	DRM_FORMAT_ARGB8888,
+	DRM_FORMAT_NV12,
+	DRM_FORMAT_NV21,
+	DRM_FORMAT_NV16,
+	DRM_FORMAT_NV61,
+	DRM_FORMAT_NV24,
+	DRM_FORMAT_NV42,
+};
+
+int shmob_drm_plane_create(struct shmob_drm_device *sdev, unsigned int index)
+{
+	struct shmob_drm_plane *splane;
+	int ret;
+
+	splane = kzalloc(sizeof(*splane), GFP_KERNEL);
+	if (splane == NULL)
+		return -ENOMEM;
+
+	splane->index = index;
+	splane->alpha = 255;
+
+	ret = drm_plane_init(sdev->ddev, &splane->plane, 1,
+			     &shmob_drm_plane_funcs, formats,
+			     ARRAY_SIZE(formats), false);
+	if (ret < 0)
+		kfree(splane);
+
+	return ret;
+}
diff --git a/drivers/gpu/drm/shmobile/shmob_drm_plane.h b/drivers/gpu/drm/shmobile/shmob_drm_plane.h
new file mode 100644
index 000000000000..99623d05e3b0
--- /dev/null
+++ b/drivers/gpu/drm/shmobile/shmob_drm_plane.h
@@ -0,0 +1,22 @@
+/*
+ * shmob_drm_plane.h  --  SH Mobile DRM Planes
+ *
+ * Copyright (C) 2012 Renesas Corporation
+ *
+ * Laurent Pinchart (laurent.pinchart@ideasonboard.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __SHMOB_DRM_PLANE_H__
+#define __SHMOB_DRM_PLANE_H__
+
+struct shmob_drm_device;
+
+int shmob_drm_plane_create(struct shmob_drm_device *sdev, unsigned int index);
+void shmob_drm_plane_setup(struct drm_plane *plane);
+
+#endif /* __SHMOB_DRM_PLANE_H__ */
diff --git a/drivers/gpu/drm/shmobile/shmob_drm_regs.h b/drivers/gpu/drm/shmobile/shmob_drm_regs.h
new file mode 100644
index 000000000000..7923cdd6368e
--- /dev/null
+++ b/drivers/gpu/drm/shmobile/shmob_drm_regs.h
@@ -0,0 +1,311 @@
+/*
+ * shmob_drm_regs.h  --  SH Mobile DRM registers
+ *
+ * Copyright (C) 2012 Renesas Corporation
+ *
+ * Laurent Pinchart (laurent.pinchart@ideasonboard.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __SHMOB_DRM_REGS_H__
+#define __SHMOB_DRM_REGS_H__
+
+#include <linux/io.h>
+
+/* Register definitions */
+#define LDDCKPAT1R		0x400
+#define LDDCKPAT2R		0x404
+#define LDDCKR			0x410
+#define LDDCKR_ICKSEL_BUS	(0 << 16)
+#define LDDCKR_ICKSEL_MIPI	(1 << 16)
+#define LDDCKR_ICKSEL_HDMI	(2 << 16)
+#define LDDCKR_ICKSEL_EXT	(3 << 16)
+#define LDDCKR_ICKSEL_MASK	(7 << 16)
+#define LDDCKR_MOSEL		(1 << 6)
+#define LDDCKSTPR		0x414
+#define LDDCKSTPR_DCKSTS	(1 << 16)
+#define LDDCKSTPR_DCKSTP	(1 << 0)
+#define LDMT1R			0x418
+#define LDMT1R_VPOL		(1 << 28)
+#define LDMT1R_HPOL		(1 << 27)
+#define LDMT1R_DWPOL		(1 << 26)
+#define LDMT1R_DIPOL		(1 << 25)
+#define LDMT1R_DAPOL		(1 << 24)
+#define LDMT1R_HSCNT		(1 << 17)
+#define LDMT1R_DWCNT		(1 << 16)
+#define LDMT1R_IFM		(1 << 12)
+#define LDMT1R_MIFTYP_RGB8	(0x0 << 0)
+#define LDMT1R_MIFTYP_RGB9	(0x4 << 0)
+#define LDMT1R_MIFTYP_RGB12A	(0x5 << 0)
+#define LDMT1R_MIFTYP_RGB12B	(0x6 << 0)
+#define LDMT1R_MIFTYP_RGB16	(0x7 << 0)
+#define LDMT1R_MIFTYP_RGB18	(0xa << 0)
+#define LDMT1R_MIFTYP_RGB24	(0xb << 0)
+#define LDMT1R_MIFTYP_YCBCR	(0xf << 0)
+#define LDMT1R_MIFTYP_SYS8A	(0x0 << 0)
+#define LDMT1R_MIFTYP_SYS8B	(0x1 << 0)
+#define LDMT1R_MIFTYP_SYS8C	(0x2 << 0)
+#define LDMT1R_MIFTYP_SYS8D	(0x3 << 0)
+#define LDMT1R_MIFTYP_SYS9	(0x4 << 0)
+#define LDMT1R_MIFTYP_SYS12	(0x5 << 0)
+#define LDMT1R_MIFTYP_SYS16A	(0x7 << 0)
+#define LDMT1R_MIFTYP_SYS16B	(0x8 << 0)
+#define LDMT1R_MIFTYP_SYS16C	(0x9 << 0)
+#define LDMT1R_MIFTYP_SYS18	(0xa << 0)
+#define LDMT1R_MIFTYP_SYS24	(0xb << 0)
+#define LDMT1R_MIFTYP_MASK	(0xf << 0)
+#define LDMT2R			0x41c
+#define LDMT2R_CSUP_MASK	(7 << 26)
+#define LDMT2R_CSUP_SHIFT	26
+#define LDMT2R_RSV		(1 << 25)
+#define LDMT2R_VSEL		(1 << 24)
+#define LDMT2R_WCSC_MASK	(0xff << 16)
+#define LDMT2R_WCSC_SHIFT	16
+#define LDMT2R_WCEC_MASK	(0xff << 8)
+#define LDMT2R_WCEC_SHIFT	8
+#define LDMT2R_WCLW_MASK	(0xff << 0)
+#define LDMT2R_WCLW_SHIFT	0
+#define LDMT3R			0x420
+#define LDMT3R_RDLC_MASK	(0x3f << 24)
+#define LDMT3R_RDLC_SHIFT	24
+#define LDMT3R_RCSC_MASK	(0xff << 16)
+#define LDMT3R_RCSC_SHIFT	16
+#define LDMT3R_RCEC_MASK	(0xff << 8)
+#define LDMT3R_RCEC_SHIFT	8
+#define LDMT3R_RCLW_MASK	(0xff << 0)
+#define LDMT3R_RCLW_SHIFT	0
+#define LDDFR			0x424
+#define LDDFR_CF1		(1 << 18)
+#define LDDFR_CF0		(1 << 17)
+#define LDDFR_CC		(1 << 16)
+#define LDDFR_YF_420		(0 << 8)
+#define LDDFR_YF_422		(1 << 8)
+#define LDDFR_YF_444		(2 << 8)
+#define LDDFR_YF_MASK		(3 << 8)
+#define LDDFR_PKF_ARGB32	(0x00 << 0)
+#define LDDFR_PKF_RGB16		(0x03 << 0)
+#define LDDFR_PKF_RGB24		(0x0b << 0)
+#define LDDFR_PKF_MASK		(0x1f << 0)
+#define LDSM1R			0x428
+#define LDSM1R_OS		(1 << 0)
+#define LDSM2R			0x42c
+#define LDSM2R_OSTRG		(1 << 0)
+#define LDSA1R			0x430
+#define LDSA2R			0x434
+#define LDMLSR			0x438
+#define LDWBFR			0x43c
+#define LDWBCNTR		0x440
+#define LDWBAR			0x444
+#define LDHCNR			0x448
+#define LDHSYNR			0x44c
+#define LDVLNR			0x450
+#define LDVSYNR			0x454
+#define LDHPDR			0x458
+#define LDVPDR			0x45c
+#define LDPMR			0x460
+#define LDPMR_LPS		(3 << 0)
+#define LDINTR			0x468
+#define LDINTR_FE		(1 << 10)
+#define LDINTR_VSE		(1 << 9)
+#define LDINTR_VEE		(1 << 8)
+#define LDINTR_FS		(1 << 2)
+#define LDINTR_VSS		(1 << 1)
+#define LDINTR_VES		(1 << 0)
+#define LDINTR_STATUS_MASK	(0xff << 0)
+#define LDSR			0x46c
+#define LDSR_MSS		(1 << 10)
+#define LDSR_MRS		(1 << 8)
+#define LDSR_AS			(1 << 1)
+#define LDCNT1R			0x470
+#define LDCNT1R_DE		(1 << 0)
+#define LDCNT2R			0x474
+#define LDCNT2R_BR		(1 << 8)
+#define LDCNT2R_MD		(1 << 3)
+#define LDCNT2R_SE		(1 << 2)
+#define LDCNT2R_ME		(1 << 1)
+#define LDCNT2R_DO		(1 << 0)
+#define LDRCNTR			0x478
+#define LDRCNTR_SRS		(1 << 17)
+#define LDRCNTR_SRC		(1 << 16)
+#define LDRCNTR_MRS		(1 << 1)
+#define LDRCNTR_MRC		(1 << 0)
+#define LDDDSR			0x47c
+#define LDDDSR_LS		(1 << 2)
+#define LDDDSR_WS		(1 << 1)
+#define LDDDSR_BS		(1 << 0)
+#define LDHAJR			0x4a0
+
+#define LDDWD0R			0x800
+#define LDDWDxR_WDACT		(1 << 28)
+#define LDDWDxR_RSW		(1 << 24)
+#define LDDRDR			0x840
+#define LDDRDR_RSR		(1 << 24)
+#define LDDRDR_DRD_MASK		(0x3ffff << 0)
+#define LDDWAR			0x900
+#define LDDWAR_WA		(1 << 0)
+#define LDDRAR			0x904
+#define LDDRAR_RA		(1 << 0)
+
+#define LDBCR			0xb00
+#define LDBCR_UPC(n)		(1 << ((n) + 16))
+#define LDBCR_UPF(n)		(1 << ((n) + 8))
+#define LDBCR_UPD(n)		(1 << ((n) + 0))
+#define LDBnBSIFR(n)		(0xb20 + (n) * 0x20 + 0x00)
+#define LDBBSIFR_EN		(1 << 31)
+#define LDBBSIFR_VS		(1 << 29)
+#define LDBBSIFR_BRSEL		(1 << 28)
+#define LDBBSIFR_MX		(1 << 27)
+#define LDBBSIFR_MY		(1 << 26)
+#define LDBBSIFR_CV3		(3 << 24)
+#define LDBBSIFR_CV2		(2 << 24)
+#define LDBBSIFR_CV1		(1 << 24)
+#define LDBBSIFR_CV0		(0 << 24)
+#define LDBBSIFR_CV_MASK	(3 << 24)
+#define LDBBSIFR_LAY_MASK	(0xff << 16)
+#define LDBBSIFR_LAY_SHIFT	16
+#define LDBBSIFR_ROP3_MASK	(0xff << 16)
+#define LDBBSIFR_ROP3_SHIFT	16
+#define LDBBSIFR_AL_PL8		(3 << 14)
+#define LDBBSIFR_AL_PL1		(2 << 14)
+#define LDBBSIFR_AL_PK		(1 << 14)
+#define LDBBSIFR_AL_1		(0 << 14)
+#define LDBBSIFR_AL_MASK	(3 << 14)
+#define LDBBSIFR_SWPL		(1 << 10)
+#define LDBBSIFR_SWPW		(1 << 9)
+#define LDBBSIFR_SWPB		(1 << 8)
+#define LDBBSIFR_RY		(1 << 7)
+#define LDBBSIFR_CHRR_420	(2 << 0)
+#define LDBBSIFR_CHRR_422	(1 << 0)
+#define LDBBSIFR_CHRR_444	(0 << 0)
+#define LDBBSIFR_RPKF_ARGB32	(0x00 << 0)
+#define LDBBSIFR_RPKF_RGB16	(0x03 << 0)
+#define LDBBSIFR_RPKF_RGB24	(0x0b << 0)
+#define LDBBSIFR_RPKF_MASK	(0x1f << 0)
+#define LDBnBSSZR(n)		(0xb20 + (n) * 0x20 + 0x04)
+#define LDBBSSZR_BVSS_MASK	(0xfff << 16)
+#define LDBBSSZR_BVSS_SHIFT	16
+#define LDBBSSZR_BHSS_MASK	(0xfff << 0)
+#define LDBBSSZR_BHSS_SHIFT	0
+#define LDBnBLOCR(n)		(0xb20 + (n) * 0x20 + 0x08)
+#define LDBBLOCR_CVLC_MASK	(0xfff << 16)
+#define LDBBLOCR_CVLC_SHIFT	16
+#define LDBBLOCR_CHLC_MASK	(0xfff << 0)
+#define LDBBLOCR_CHLC_SHIFT	0
+#define LDBnBSMWR(n)		(0xb20 + (n) * 0x20 + 0x0c)
+#define LDBBSMWR_BSMWA_MASK	(0xffff << 16)
+#define LDBBSMWR_BSMWA_SHIFT	16
+#define LDBBSMWR_BSMW_MASK	(0xffff << 0)
+#define LDBBSMWR_BSMW_SHIFT	0
+#define LDBnBSAYR(n)		(0xb20 + (n) * 0x20 + 0x10)
+#define LDBBSAYR_FG1A_MASK	(0xff << 24)
+#define LDBBSAYR_FG1A_SHIFT	24
+#define LDBBSAYR_FG1R_MASK	(0xff << 16)
+#define LDBBSAYR_FG1R_SHIFT	16
+#define LDBBSAYR_FG1G_MASK	(0xff << 8)
+#define LDBBSAYR_FG1G_SHIFT	8
+#define LDBBSAYR_FG1B_MASK	(0xff << 0)
+#define LDBBSAYR_FG1B_SHIFT	0
+#define LDBnBSACR(n)		(0xb20 + (n) * 0x20 + 0x14)
+#define LDBBSACR_FG2A_MASK	(0xff << 24)
+#define LDBBSACR_FG2A_SHIFT	24
+#define LDBBSACR_FG2R_MASK	(0xff << 16)
+#define LDBBSACR_FG2R_SHIFT	16
+#define LDBBSACR_FG2G_MASK	(0xff << 8)
+#define LDBBSACR_FG2G_SHIFT	8
+#define LDBBSACR_FG2B_MASK	(0xff << 0)
+#define LDBBSACR_FG2B_SHIFT	0
+#define LDBnBSAAR(n)		(0xb20 + (n) * 0x20 + 0x18)
+#define LDBBSAAR_AP_MASK	(0xff << 24)
+#define LDBBSAAR_AP_SHIFT	24
+#define LDBBSAAR_R_MASK		(0xff << 16)
+#define LDBBSAAR_R_SHIFT	16
+#define LDBBSAAR_GY_MASK	(0xff << 8)
+#define LDBBSAAR_GY_SHIFT	8
+#define LDBBSAAR_B_MASK		(0xff << 0)
+#define LDBBSAAR_B_SHIFT	0
+#define LDBnBPPCR(n)		(0xb20 + (n) * 0x20 + 0x1c)
+#define LDBBPPCR_AP_MASK	(0xff << 24)
+#define LDBBPPCR_AP_SHIFT	24
+#define LDBBPPCR_R_MASK		(0xff << 16)
+#define LDBBPPCR_R_SHIFT	16
+#define LDBBPPCR_GY_MASK	(0xff << 8)
+#define LDBBPPCR_GY_SHIFT	8
+#define LDBBPPCR_B_MASK		(0xff << 0)
+#define LDBBPPCR_B_SHIFT	0
+#define LDBnBBGCL(n)		(0xb10 + (n) * 0x04)
+#define LDBBBGCL_BGA_MASK	(0xff << 24)
+#define LDBBBGCL_BGA_SHIFT	24
+#define LDBBBGCL_BGR_MASK	(0xff << 16)
+#define LDBBBGCL_BGR_SHIFT	16
+#define LDBBBGCL_BGG_MASK	(0xff << 8)
+#define LDBBBGCL_BGG_SHIFT	8
+#define LDBBBGCL_BGB_MASK	(0xff << 0)
+#define LDBBBGCL_BGB_SHIFT	0
+
+#define LCDC_SIDE_B_OFFSET	0x1000
+#define LCDC_MIRROR_OFFSET	0x2000
+
+static inline bool lcdc_is_banked(u32 reg)
+{
+	switch (reg) {
+	case LDMT1R:
+	case LDMT2R:
+	case LDMT3R:
+	case LDDFR:
+	case LDSM1R:
+	case LDSA1R:
+	case LDSA2R:
+	case LDMLSR:
+	case LDWBFR:
+	case LDWBCNTR:
+	case LDWBAR:
+	case LDHCNR:
+	case LDHSYNR:
+	case LDVLNR:
+	case LDVSYNR:
+	case LDHPDR:
+	case LDVPDR:
+	case LDHAJR:
+		return true;
+	default:
+		return reg >= LDBnBBGCL(0) && reg <= LDBnBPPCR(3);
+	}
+}
+
+static inline void lcdc_write_mirror(struct shmob_drm_device *sdev, u32 reg,
+				     u32 data)
+{
+	iowrite32(data, sdev->mmio + reg + LCDC_MIRROR_OFFSET);
+}
+
+static inline void lcdc_write(struct shmob_drm_device *sdev, u32 reg, u32 data)
+{
+	iowrite32(data, sdev->mmio + reg);
+	if (lcdc_is_banked(reg))
+		iowrite32(data, sdev->mmio + reg + LCDC_SIDE_B_OFFSET);
+}
+
+static inline u32 lcdc_read(struct shmob_drm_device *sdev, u32 reg)
+{
+	return ioread32(sdev->mmio + reg);
+}
+
+static inline int lcdc_wait_bit(struct shmob_drm_device *sdev, u32 reg,
+				u32 mask, u32 until)
+{
+	unsigned long timeout = jiffies + msecs_to_jiffies(5);
+
+	while ((lcdc_read(sdev, reg) & mask) != until) {
+		if (time_after(jiffies, timeout))
+			return -ETIMEDOUT;
+		cpu_relax();
+	}
+
+	return 0;
+}
+
+#endif /* __SHMOB_DRM_REGS_H__ */
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c
index 2a4aa57779e7..2026060f03e0 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_util.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
@@ -472,7 +472,7 @@ pgprot_t ttm_io_prot(uint32_t caching_flags, pgprot_t tmp)
 	else
 		tmp = pgprot_noncached(tmp);
 #endif
-#if defined(__sparc__)
+#if defined(__sparc__) || defined(__mips__)
 	if (!(caching_flags & TTM_PL_FLAG_CACHED))
 		tmp = pgprot_noncached(tmp);
 #endif
diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
index d4aa5a82ab1b..b8b394319b45 100644
--- a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
+++ b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
@@ -1060,7 +1060,7 @@ int ttm_dma_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages)
 
 	_manager = kzalloc(sizeof(*_manager), GFP_KERNEL);
 	if (!_manager)
-		goto err_manager;
+		goto err;
 
 	mutex_init(&_manager->lock);
 	INIT_LIST_HEAD(&_manager->pools);
@@ -1078,9 +1078,6 @@ int ttm_dma_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages)
 	}
 	ttm_dma_pool_mm_shrink_init(_manager);
 	return 0;
-err_manager:
-	kfree(_manager);
-	_manager = NULL;
 err:
 	return ret;
 }
diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c
index 82a529e45afe..bf8260133ea9 100644
--- a/drivers/gpu/drm/ttm/ttm_tt.c
+++ b/drivers/gpu/drm/ttm/ttm_tt.c
@@ -290,8 +290,6 @@ int ttm_tt_swapin(struct ttm_tt *ttm)
 	struct file *swap_storage;
 	struct page *from_page;
 	struct page *to_page;
-	void *from_virtual;
-	void *to_virtual;
 	int i;
 	int ret = -ENOMEM;
 
@@ -311,11 +309,7 @@ int ttm_tt_swapin(struct ttm_tt *ttm)
 			goto out_err;
 
 		preempt_disable();
-		from_virtual = kmap_atomic(from_page);
-		to_virtual = kmap_atomic(to_page);
-		memcpy(to_virtual, from_virtual, PAGE_SIZE);
-		kunmap_atomic(to_virtual);
-		kunmap_atomic(from_virtual);
+		copy_highpage(to_page, from_page);
 		preempt_enable();
 		page_cache_release(from_page);
 	}
@@ -336,8 +330,6 @@ int ttm_tt_swapout(struct ttm_tt *ttm, struct file *persistent_swap_storage)
 	struct file *swap_storage;
 	struct page *from_page;
 	struct page *to_page;
-	void *from_virtual;
-	void *to_virtual;
 	int i;
 	int ret = -ENOMEM;
 
@@ -367,11 +359,7 @@ int ttm_tt_swapout(struct ttm_tt *ttm, struct file *persistent_swap_storage)
 			goto out_err;
 		}
 		preempt_disable();
-		from_virtual = kmap_atomic(from_page);
-		to_virtual = kmap_atomic(to_page);
-		memcpy(to_virtual, from_virtual, PAGE_SIZE);
-		kunmap_atomic(to_virtual);
-		kunmap_atomic(from_virtual);
+		copy_highpage(to_page, from_page);
 		preempt_enable();
 		set_page_dirty(to_page);
 		mark_page_accessed(to_page);
diff --git a/drivers/gpu/drm/udl/udl_connector.c b/drivers/gpu/drm/udl/udl_connector.c
index 627cd85521b1..b3b2cedf6745 100644
--- a/drivers/gpu/drm/udl/udl_connector.c
+++ b/drivers/gpu/drm/udl/udl_connector.c
@@ -57,11 +57,8 @@ static int udl_get_modes(struct drm_connector *connector)
 
 	edid = (struct edid *)udl_get_edid(udl);
 
-	connector->display_info.raw_edid = (char *)edid;
-
 	drm_mode_connector_update_edid_property(connector, edid);
 	ret = drm_add_edid_modes(connector, edid);
-	connector->display_info.raw_edid = NULL;
 	kfree(edid);
 	return ret;
 }
diff --git a/drivers/gpu/drm/udl/udl_encoder.c b/drivers/gpu/drm/udl/udl_encoder.c
index 610538308f19..4052c4656498 100644
--- a/drivers/gpu/drm/udl/udl_encoder.c
+++ b/drivers/gpu/drm/udl/udl_encoder.c
@@ -16,7 +16,7 @@
 #include "udl_drv.h"
 
 /* dummy encoder */
-void udl_enc_destroy(struct drm_encoder *encoder)
+static void udl_enc_destroy(struct drm_encoder *encoder)
 {
 	drm_encoder_cleanup(encoder);
 	kfree(encoder);
diff --git a/drivers/gpu/drm/udl/udl_fb.c b/drivers/gpu/drm/udl/udl_fb.c
index 877df059a76f..67df842fbb33 100644
--- a/drivers/gpu/drm/udl/udl_fb.c
+++ b/drivers/gpu/drm/udl/udl_fb.c
@@ -13,6 +13,7 @@
 #include <linux/module.h>
 #include <linux/slab.h>
 #include <linux/fb.h>
+#include <linux/dma-buf.h>
 
 #include <drm/drmP.h>
 #include <drm/drm_crtc.h>
@@ -355,12 +356,12 @@ static struct fb_ops udlfb_ops = {
 	.fb_release = udl_fb_release,
 };
 
-void udl_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
+static void udl_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
 			   u16 blue, int regno)
 {
 }
 
-void udl_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
+static void udl_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
 			     u16 *blue, int regno)
 {
 	*red = 0;
@@ -376,16 +377,33 @@ static int udl_user_framebuffer_dirty(struct drm_framebuffer *fb,
 {
 	struct udl_framebuffer *ufb = to_udl_fb(fb);
 	int i;
+	int ret = 0;
 
 	if (!ufb->active_16)
 		return 0;
 
+	if (ufb->obj->base.import_attach) {
+		ret = dma_buf_begin_cpu_access(ufb->obj->base.import_attach->dmabuf,
+					       0, ufb->obj->base.size,
+					       DMA_FROM_DEVICE);
+		if (ret)
+			return ret;
+	}
+
 	for (i = 0; i < num_clips; i++) {
-		udl_handle_damage(ufb, clips[i].x1, clips[i].y1,
+		ret = udl_handle_damage(ufb, clips[i].x1, clips[i].y1,
 				  clips[i].x2 - clips[i].x1,
 				  clips[i].y2 - clips[i].y1);
+		if (ret)
+			break;
 	}
-	return 0;
+
+	if (ufb->obj->base.import_attach) {
+		dma_buf_end_cpu_access(ufb->obj->base.import_attach->dmabuf,
+				       0, ufb->obj->base.size,
+				       DMA_FROM_DEVICE);
+	}
+	return ret;
 }
 
 static void udl_user_framebuffer_destroy(struct drm_framebuffer *fb)
diff --git a/drivers/gpu/drm/udl/udl_gem.c b/drivers/gpu/drm/udl/udl_gem.c
index 4acc8c7431cd..afd212c99216 100644
--- a/drivers/gpu/drm/udl/udl_gem.c
+++ b/drivers/gpu/drm/udl/udl_gem.c
@@ -181,11 +181,6 @@ int udl_gem_vmap(struct udl_gem_object *obj)
 	int ret;
 
 	if (obj->base.import_attach) {
-		ret = dma_buf_begin_cpu_access(obj->base.import_attach->dmabuf,
-					       0, obj->base.size, DMA_BIDIRECTIONAL);
-		if (ret)
-			return -EINVAL;
-
 		obj->vmapping = dma_buf_vmap(obj->base.import_attach->dmabuf);
 		if (!obj->vmapping)
 			return -ENOMEM;
@@ -206,8 +201,6 @@ void udl_gem_vunmap(struct udl_gem_object *obj)
 {
 	if (obj->base.import_attach) {
 		dma_buf_vunmap(obj->base.import_attach->dmabuf, obj->vmapping);
-		dma_buf_end_cpu_access(obj->base.import_attach->dmabuf, 0,
-				       obj->base.size, DMA_BIDIRECTIONAL);
 		return;
 	}
 
diff --git a/drivers/gpu/drm/udl/udl_main.c b/drivers/gpu/drm/udl/udl_main.c
index 1f6dbfd62c2a..0ce2d7195256 100644
--- a/drivers/gpu/drm/udl/udl_main.c
+++ b/drivers/gpu/drm/udl/udl_main.c
@@ -41,11 +41,8 @@ static int udl_parse_vendor_descriptor(struct drm_device *dev,
 	total_len = usb_get_descriptor(usbdev, 0x5f, /* vendor specific */
 				    0, desc, MAX_VENDOR_DESCRIPTOR_SIZE);
 	if (total_len > 5) {
-		DRM_INFO("vendor descriptor length:%x data:%02x %02x %02x %02x" \
-			"%02x %02x %02x %02x %02x %02x %02x\n",
-			total_len, desc[0],
-			desc[1], desc[2], desc[3], desc[4], desc[5], desc[6],
-			desc[7], desc[8], desc[9], desc[10]);
+		DRM_INFO("vendor descriptor length:%x data:%*ph\n",
+			total_len, 11, desc);
 
 		if ((desc[0] != total_len) || /* descriptor length */
 		    (desc[1] != 0x5f) ||   /* vendor descriptor type */
diff --git a/drivers/gpu/drm/udl/udl_modeset.c b/drivers/gpu/drm/udl/udl_modeset.c
index 52ac2b2d9b73..e96d2349bd54 100644
--- a/drivers/gpu/drm/udl/udl_modeset.c
+++ b/drivers/gpu/drm/udl/udl_modeset.c
@@ -391,7 +391,7 @@ static const struct drm_crtc_funcs udl_crtc_funcs = {
 	.destroy = udl_crtc_destroy,
 };
 
-int udl_crtc_init(struct drm_device *dev)
+static int udl_crtc_init(struct drm_device *dev)
 {
 	struct drm_crtc *crtc;
 
diff --git a/drivers/gpu/drm/udl/udl_transfer.c b/drivers/gpu/drm/udl/udl_transfer.c
index e96348143a4e..dc095526ffb7 100644
--- a/drivers/gpu/drm/udl/udl_transfer.c
+++ b/drivers/gpu/drm/udl/udl_transfer.c
@@ -126,10 +126,10 @@ static void udl_compress_hline16(
 
 	while ((pixel_end > pixel) &&
 	       (cmd_buffer_end - MIN_RLX_CMD_BYTES > cmd)) {
-		uint8_t *raw_pixels_count_byte = 0;
-		uint8_t *cmd_pixels_count_byte = 0;
-		const u8 *raw_pixel_start = 0;
-		const u8 *cmd_pixel_start, *cmd_pixel_end = 0;
+		uint8_t *raw_pixels_count_byte = NULL;
+		uint8_t *cmd_pixels_count_byte = NULL;
+		const u8 *raw_pixel_start = NULL;
+		const u8 *cmd_pixel_start, *cmd_pixel_end = NULL;
 
 		prefetchw((void *) cmd); /* pull in one cache line at least */
 
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index c84d9ba66f3b..ed3c1e7ddde9 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -438,7 +438,6 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
 		DRM_ERROR("Failed allocating a device private struct.\n");
 		return -ENOMEM;
 	}
-	memset(dev_priv, 0, sizeof(*dev_priv));
 
 	pci_set_master(dev->pdev);
 
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
index c50724bd30f6..54743943d8b3 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
@@ -483,7 +483,6 @@ static int do_surface_dirty_sou(struct vmw_private *dev_priv,
 	}
 
 	/* only need to do this once */
-	memset(cmd, 0, fifo_size);
 	cmd->header.id = cpu_to_le32(SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN);
 	cmd->header.size = cpu_to_le32(fifo_size - sizeof(cmd->header));
 
diff --git a/drivers/staging/omapdrm/omap_connector.c b/drivers/staging/omapdrm/omap_connector.c
index 55e9c8655850..38be186c249a 100644
--- a/drivers/staging/omapdrm/omap_connector.c
+++ b/drivers/staging/omapdrm/omap_connector.c
@@ -200,14 +200,11 @@ static int omap_connector_get_modes(struct drm_connector *connector)
 			drm_mode_connector_update_edid_property(
 					connector, edid);
 			n = drm_add_edid_modes(connector, edid);
-			kfree(connector->display_info.raw_edid);
-			connector->display_info.raw_edid = edid;
 		} else {
 			drm_mode_connector_update_edid_property(
 					connector, NULL);
-			connector->display_info.raw_edid = NULL;
-			kfree(edid);
 		}
+		kfree(edid);
 	} else {
 		struct drm_display_mode *mode = drm_mode_create(dev);
 		struct omap_video_timings timings = {0};
diff --git a/drivers/staging/omapdrm/omap_fbdev.c b/drivers/staging/omapdrm/omap_fbdev.c
index 8c6ed3b0c6f6..8a027bb77d97 100644
--- a/drivers/staging/omapdrm/omap_fbdev.c
+++ b/drivers/staging/omapdrm/omap_fbdev.c
@@ -276,7 +276,7 @@ fail:
 		if (fbi)
 			framebuffer_release(fbi);
 		if (fb)
-			fb->funcs->destroy(fb);
+			drm_framebuffer_remove(fb);
 	}
 
 	return ret;
@@ -401,7 +401,7 @@ void omap_fbdev_free(struct drm_device *dev)
 
 	/* this will free the backing object */
 	if (fbdev->fb)
-		fbdev->fb->funcs->destroy(fbdev->fb);
+		drm_framebuffer_remove(fbdev->fb);
 
 	kfree(fbdev);