summary refs log tree commit diff
path: root/drivers/gpu/drm/nouveau/nvkm
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2019-03-08 08:23:15 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2019-03-08 08:23:15 -0800
commit851ca779d110f694b5d078bc4af06d3ad37169e8 (patch)
tree3d03de09e44ef02a6f73924f32fa21646347e64e /drivers/gpu/drm/nouveau/nvkm
parentb5dd0c658c31b469ccff1b637e5124851e7a4a1c (diff)
parent4b057e73f28f1df13b77b77a52094238ffdf8abd (diff)
downloadlinux-851ca779d110f694b5d078bc4af06d3ad37169e8.tar.gz
Merge tag 'drm-next-2019-03-06' of git://anongit.freedesktop.org/drm/drm
Pull drm updates from Dave Airlie:
 "This is the main drm pull request for the 5.1 merge window.

  The big changes I'd highlight are:
   - nouveau has HMM support now, there is finally an in-tree user so we
     can quieten down the rip it out people.
   - i915 now enables fastboot by default on Skylake+
   - Displayport Multistream support has been refactored and should
     hopefully be more reliable.

  Core:
   - header cleanups aiming towards removing drmP.h
   - dma-buf fence seqnos to 64-bits
   - common helper for DP mst hotplug for radeon,i915,amdgpu + new
     refcounting scheme
   - MST i2c improvements
   - drm_syncobj_cb removal
   - ARM FB compression fourcc
   - P010 + P016 fourcc
   - allwinner tiled format modifier
   - i2c over aux I2C_M_STOP support
   - DRM_AUTH handling fixes

  TTM:
   - ref/unref renaming

  New driver:
   - ARM komeda display driver

  scheduler:
   - refactor mirror list handling
   - rework hw fence processing
   - 0 run queue entity fix

  bridge:
   - TI DS90C185 LVDS bridge
   - thc631lvdm83d bridge improvements
   - cadence + allwinner DSI ported to generic phy

  panels:
   - Sitronix ST7701 panel
   - Kingdisplay KD097D04
   - LeMaker BL035-RGB-002
   - PDA 91-00156-A0
   - Innolux EE101IA-01D

  i915:
   - Enable fastboot by default on SKL+/VLV/CHV
   - Export RPCS configuration for ICL media driver
   - Coffelake PCI ID
   - CNL clocks setup fixes
   - ACPI/PMIC support for MIPI/DSI
   - Per-engine WA init for all engines
   - Shrinker locking fixes
   - Kerneldoc updates
   - Lots of ring improvements and reset fixes
   - Coffeelake GVT Support
   - VFIO GVT EDID Region support
   - runtime PM wakeref tracking
   - ILK->IVB primary plane enable delays
   - userptr mutex locking fixes
   - DSI fixes
   - LVDS/TV cleanups
   - HW readout fixes
   - LUT robustness fixes
   - ICL display and watermark fixes
   - gem mmap race fix

  amdgpu:
   - add scheduled dependencies interface
   - DCC on scanout surfaces
   - vega10/20 BACO support
   - Multiple IH rings on soc15
   - XGMI locking fixes
   - DC i2c/aux cleanups
   - runtime SMU debug interface
   - Kexec improvmeents
   - SR-IOV fixes
   - DC freesync + ABM fixes
   - GDS fixes
   - GPUVM fixes
   - vega20 PCIE DPM switching fixes
   - Context priority handling fixes

  radeon:
   - fix missing break in evergreen parser

  nouveau:
   - SVM support via HMM

  msm:
   - QCOM Compressed modifier support

  exynos:
   - s5pv210 rotator support

  imx:
   - zpos property support
   - pending update fixes

  v3d:
   - cache flush improvments

  vc4:
   - reflection support
   - HDMI overscan support

  tegra:
   - CEC refactoring
   - HDMI audio fixes
   - Tegra186 prep work
   - SOR crossbar device tree fixes

  sun4i:
   - implicit fencing support
   - YUV and scalar support improvements
   - A23 support
   - tiling fixes

  atmel-hlcdc:
   - clipping and rotation property fixes

  qxl:
   - BO and PRIME improvements
   - generic fbdev emulation

  dw-hdmi:
   - HDMI 2.0 2160p
   - YUV420 ouput

  rockchip:
   - implicit fencing support
   - reflection proerties

  virtio-gpu:
   - use generic fbdev emulation

  tilcdc:
   - cpufreq vs crtc init fix

  rcar-du:
   - R8A774C0 support
   - D3/E3 RGB output routing fixes and DPAD0 support
   - RA87744 LVDS support

  bochs:
   - atomic and generic fbdev emulation
   - ID mismatch error on bochs load

  meson:
   - remove firmware fbs"

* tag 'drm-next-2019-03-06' of git://anongit.freedesktop.org/drm/drm: (1130 commits)
  drm/amd/display: Use vrr friendly pageflip throttling in DC.
  drm/imx: only send commit done event when all state has been applied
  drm/imx: allow building under COMPILE_TEST
  drm/imx: imx-tve: depend on COMMON_CLK
  drm/imx: ipuv3-plane: add zpos property
  drm/imx: ipuv3-plane: add function to query atomic update status
  gpu: ipu-v3: prg: add function to get channel configure status
  gpu: ipu-v3: pre: add double buffer status readback
  drm/amdgpu: Bump amdgpu version for context priority override.
  drm/amdgpu/powerplay: fix typo in BACO header guards
  drm/amdgpu/powerplay: fix return codes in BACO code
  drm/amdgpu: add missing license on baco files
  drm/bochs: Fix the ID mismatch error
  drm/nouveau/dmem: use dma addresses during migration copies
  drm/nouveau/dmem: use physical vram addresses during migration copies
  drm/nouveau/dmem: extend copy function to allow direct use of physical addresses
  drm/nouveau/svm: new ioctl to migrate process memory to GPU memory
  drm/nouveau/dmem: device memory helpers for SVM
  drm/nouveau/svm: initial support for shared virtual memory
  drm/nouveau: prepare for enabling svm with existing userspace interfaces
  ...
Diffstat (limited to 'drivers/gpu/drm/nouveau/nvkm')
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/core/subdev.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/ce/Kbuild2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/ce/tu102.c (renamed from drivers/gpu/drm/nouveau/nvkm/engine/ce/tu104.c)6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/base.c86
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/priv.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/user.c15
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/Kbuild6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/gf119.c16
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/gv100.c7
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/ior.h2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c12
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.h3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv50.h2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/roottu102.c (renamed from drivers/gpu/drm/nouveau/nvkm/engine/disp/roottu104.c)20
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/sortu102.c (renamed from drivers/gpu/drm/nouveau/nvkm/engine/disp/sortu104.c)14
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/tu102.c (renamed from drivers/gpu/drm/nouveau/nvkm/engine/disp/tu104.c)14
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/Kbuild6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/changk104.h2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifotu102.c (renamed from drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifotu104.c)10
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/tu102.c (renamed from drivers/gpu/drm/nouveau/nvkm/engine/fifo/tu104.c)30
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/user.h2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/usertu102.c (renamed from drivers/gpu/drm/nouveau/nvkm/engine/fifo/usertu104.c)10
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/base.c27
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.c10
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c331
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h16
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/priv.h5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/nvdec/base.c10
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/sec2/Kbuild1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/sec2/base.c23
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/sec2/gp102.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/sec2/priv.h3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/sec2/tu102.c33
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/falcon/base.c3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue.c6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/Kbuild1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bar/Kbuild2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bar/tu102.c (renamed from drivers/gpu/drm/nouveau/nvkm/subdev/bar/tu104.c)30
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/dp.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/init.c11
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/devinit/Kbuild2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/devinit/tu102.c (renamed from drivers/gpu/drm/nouveau/nvkm/subdev/devinit/tu104.c)14
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fault/Kbuild3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fault/base.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fault/gp100.c3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fault/gv100.c16
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fault/priv.h7
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fault/tu102.c (renamed from drivers/gpu/drm/nouveau/nvkm/subdev/fault/tu104.c)39
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fault/user.c106
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/gddr3.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/Kbuild1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/gv100.c62
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mc/Kbuild2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mc/tu102.c (renamed from drivers/gpu/drm/nouveau/nvkm/subdev/mc/tu104.c)10
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/Kbuild4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gp100.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gp10b.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gv100.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/priv.h2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/tu102.c (renamed from drivers/gpu/drm/nouveau/nvkm/subdev/mmu/tu104.c)8
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/uvmm.c83
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c382
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.h82
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgf100.c56
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgk104.c10
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgk20a.c10
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgm200.c22
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgm20b.c14
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp100.c210
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp10b.c12
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgv100.c12
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmmcp77.c8
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv04.c15
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv41.c6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv44.c6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv50.c6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmtu102.c (renamed from drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmtu104.c)13
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/memx.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r352.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/top/base.c16
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/top/gk104.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/volt/Kbuild1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/volt/gf117.c60
83 files changed, 1654 insertions, 452 deletions
diff --git a/drivers/gpu/drm/nouveau/nvkm/core/subdev.c b/drivers/gpu/drm/nouveau/nvkm/core/subdev.c
index c61b467cf45e..245990de1e90 100644
--- a/drivers/gpu/drm/nouveau/nvkm/core/subdev.c
+++ b/drivers/gpu/drm/nouveau/nvkm/core/subdev.c
@@ -39,6 +39,7 @@ nvkm_subdev_name[NVKM_SUBDEV_NR] = {
 	[NVKM_SUBDEV_FB      ] = "fb",
 	[NVKM_SUBDEV_FUSE    ] = "fuse",
 	[NVKM_SUBDEV_GPIO    ] = "gpio",
+	[NVKM_SUBDEV_GSP     ] = "gsp",
 	[NVKM_SUBDEV_I2C     ] = "i2c",
 	[NVKM_SUBDEV_IBUS    ] = "priv",
 	[NVKM_SUBDEV_ICCSENSE] = "iccsense",
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/ce/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/ce/Kbuild
index 177a23301d6a..9211663239af 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/ce/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/ce/Kbuild
@@ -6,4 +6,4 @@ nvkm-y += nvkm/engine/ce/gm200.o
 nvkm-y += nvkm/engine/ce/gp100.o
 nvkm-y += nvkm/engine/ce/gp102.o
 nvkm-y += nvkm/engine/ce/gv100.o
-nvkm-y += nvkm/engine/ce/tu104.o
+nvkm-y += nvkm/engine/ce/tu102.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/ce/tu104.c b/drivers/gpu/drm/nouveau/nvkm/engine/ce/tu102.c
index 3c25043bbb33..b4308e2d8c75 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/ce/tu104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/ce/tu102.c
@@ -24,7 +24,7 @@
 #include <nvif/class.h>
 
 static const struct nvkm_engine_func
-tu104_ce = {
+tu102_ce = {
 	.intr = gp100_ce_intr,
 	.sclass = {
 		{ -1, -1, TURING_DMA_COPY_A },
@@ -33,8 +33,8 @@ tu104_ce = {
 };
 
 int
-tu104_ce_new(struct nvkm_device *device, int index,
+tu102_ce_new(struct nvkm_device *device, int index,
 	     struct nvkm_engine **pengine)
 {
-	return nvkm_engine_new_(&tu104_ce, device, index, true, pengine);
+	return nvkm_engine_new_(&tu102_ce, device, index, true, pengine);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
index d9edb5785813..7971096b6767 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
@@ -1613,7 +1613,7 @@ nvd7_chipset = {
 	.pci = gf106_pci_new,
 	.therm = gf119_therm_new,
 	.timer = nv41_timer_new,
-	.volt = gf100_volt_new,
+	.volt = gf117_volt_new,
 	.ce[0] = gf100_ce_new,
 	.disp = gf119_disp_new,
 	.dma = gf119_dma_new,
@@ -2405,6 +2405,7 @@ nv140_chipset = {
 	.fb = gv100_fb_new,
 	.fuse = gm107_fuse_new,
 	.gpio = gk104_gpio_new,
+	.gsp = gv100_gsp_new,
 	.i2c = gm200_i2c_new,
 	.ibus = gm200_ibus_new,
 	.imem = nv50_instmem_new,
@@ -2437,97 +2438,106 @@ nv140_chipset = {
 static const struct nvkm_device_chip
 nv162_chipset = {
 	.name = "TU102",
-	.bar = tu104_bar_new,
+	.bar = tu102_bar_new,
 	.bios = nvkm_bios_new,
 	.bus = gf100_bus_new,
-	.devinit = tu104_devinit_new,
-	.fault = tu104_fault_new,
+	.devinit = tu102_devinit_new,
+	.fault = tu102_fault_new,
 	.fb = gv100_fb_new,
 	.fuse = gm107_fuse_new,
 	.gpio = gk104_gpio_new,
+	.gsp = gv100_gsp_new,
 	.i2c = gm200_i2c_new,
 	.ibus = gm200_ibus_new,
 	.imem = nv50_instmem_new,
 	.ltc = gp102_ltc_new,
-	.mc = tu104_mc_new,
-	.mmu = tu104_mmu_new,
+	.mc = tu102_mc_new,
+	.mmu = tu102_mmu_new,
 	.pci = gp100_pci_new,
 	.pmu = gp102_pmu_new,
 	.therm = gp100_therm_new,
 	.timer = gk20a_timer_new,
 	.top = gk104_top_new,
-	.ce[0] = tu104_ce_new,
-	.ce[1] = tu104_ce_new,
-	.ce[2] = tu104_ce_new,
-	.ce[3] = tu104_ce_new,
-	.ce[4] = tu104_ce_new,
-	.disp = tu104_disp_new,
+	.ce[0] = tu102_ce_new,
+	.ce[1] = tu102_ce_new,
+	.ce[2] = tu102_ce_new,
+	.ce[3] = tu102_ce_new,
+	.ce[4] = tu102_ce_new,
+	.disp = tu102_disp_new,
 	.dma = gv100_dma_new,
-	.fifo = tu104_fifo_new,
+	.fifo = tu102_fifo_new,
+	.nvdec[0] = gp102_nvdec_new,
+	.sec2 = tu102_sec2_new,
 };
 
 static const struct nvkm_device_chip
 nv164_chipset = {
 	.name = "TU104",
-	.bar = tu104_bar_new,
+	.bar = tu102_bar_new,
 	.bios = nvkm_bios_new,
 	.bus = gf100_bus_new,
-	.devinit = tu104_devinit_new,
-	.fault = tu104_fault_new,
+	.devinit = tu102_devinit_new,
+	.fault = tu102_fault_new,
 	.fb = gv100_fb_new,
 	.fuse = gm107_fuse_new,
 	.gpio = gk104_gpio_new,
+	.gsp = gv100_gsp_new,
 	.i2c = gm200_i2c_new,
 	.ibus = gm200_ibus_new,
 	.imem = nv50_instmem_new,
 	.ltc = gp102_ltc_new,
-	.mc = tu104_mc_new,
-	.mmu = tu104_mmu_new,
+	.mc = tu102_mc_new,
+	.mmu = tu102_mmu_new,
 	.pci = gp100_pci_new,
 	.pmu = gp102_pmu_new,
 	.therm = gp100_therm_new,
 	.timer = gk20a_timer_new,
 	.top = gk104_top_new,
-	.ce[0] = tu104_ce_new,
-	.ce[1] = tu104_ce_new,
-	.ce[2] = tu104_ce_new,
-	.ce[3] = tu104_ce_new,
-	.ce[4] = tu104_ce_new,
-	.disp = tu104_disp_new,
+	.ce[0] = tu102_ce_new,
+	.ce[1] = tu102_ce_new,
+	.ce[2] = tu102_ce_new,
+	.ce[3] = tu102_ce_new,
+	.ce[4] = tu102_ce_new,
+	.disp = tu102_disp_new,
 	.dma = gv100_dma_new,
-	.fifo = tu104_fifo_new,
+	.fifo = tu102_fifo_new,
+	.nvdec[0] = gp102_nvdec_new,
+	.sec2 = tu102_sec2_new,
 };
 
 static const struct nvkm_device_chip
 nv166_chipset = {
 	.name = "TU106",
-	.bar = tu104_bar_new,
+	.bar = tu102_bar_new,
 	.bios = nvkm_bios_new,
 	.bus = gf100_bus_new,
-	.devinit = tu104_devinit_new,
-	.fault = tu104_fault_new,
+	.devinit = tu102_devinit_new,
+	.fault = tu102_fault_new,
 	.fb = gv100_fb_new,
 	.fuse = gm107_fuse_new,
 	.gpio = gk104_gpio_new,
+	.gsp = gv100_gsp_new,
 	.i2c = gm200_i2c_new,
 	.ibus = gm200_ibus_new,
 	.imem = nv50_instmem_new,
 	.ltc = gp102_ltc_new,
-	.mc = tu104_mc_new,
-	.mmu = tu104_mmu_new,
+	.mc = tu102_mc_new,
+	.mmu = tu102_mmu_new,
 	.pci = gp100_pci_new,
 	.pmu = gp102_pmu_new,
 	.therm = gp100_therm_new,
 	.timer = gk20a_timer_new,
 	.top = gk104_top_new,
-	.ce[0] = tu104_ce_new,
-	.ce[1] = tu104_ce_new,
-	.ce[2] = tu104_ce_new,
-	.ce[3] = tu104_ce_new,
-	.ce[4] = tu104_ce_new,
-	.disp = tu104_disp_new,
+	.ce[0] = tu102_ce_new,
+	.ce[1] = tu102_ce_new,
+	.ce[2] = tu102_ce_new,
+	.ce[3] = tu102_ce_new,
+	.ce[4] = tu102_ce_new,
+	.disp = tu102_disp_new,
 	.dma = gv100_dma_new,
-	.fifo = tu104_fifo_new,
+	.fifo = tu102_fifo_new,
+	.nvdec[0] = gp102_nvdec_new,
+	.sec2 = tu102_sec2_new,
 };
 
 static int
@@ -2567,6 +2577,7 @@ nvkm_device_subdev(struct nvkm_device *device, int index)
 	_(FB      , device->fb      , &device->fb->subdev);
 	_(FUSE    , device->fuse    , &device->fuse->subdev);
 	_(GPIO    , device->gpio    , &device->gpio->subdev);
+	_(GSP     , device->gsp     , &device->gsp->subdev);
 	_(I2C     , device->i2c     , &device->i2c->subdev);
 	_(IBUS    , device->ibus    ,  device->ibus);
 	_(ICCSENSE, device->iccsense, &device->iccsense->subdev);
@@ -3050,6 +3061,7 @@ nvkm_device_ctor(const struct nvkm_device_func *func,
 		_(NVKM_SUBDEV_FB      ,       fb);
 		_(NVKM_SUBDEV_FUSE    ,     fuse);
 		_(NVKM_SUBDEV_GPIO    ,     gpio);
+		_(NVKM_SUBDEV_GSP     ,      gsp);
 		_(NVKM_SUBDEV_I2C     ,      i2c);
 		_(NVKM_SUBDEV_IBUS    ,     ibus);
 		_(NVKM_SUBDEV_ICCSENSE, iccsense);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/priv.h b/drivers/gpu/drm/nouveau/nvkm/engine/device/priv.h
index 253ab914a8ef..2a53e37dfa7a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/priv.h
@@ -12,6 +12,7 @@
 #include <subdev/fb.h>
 #include <subdev/fuse.h>
 #include <subdev/gpio.h>
+#include <subdev/gsp.h>
 #include <subdev/i2c.h>
 #include <subdev/ibus.h>
 #include <subdev/iccsense.h>
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/user.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/user.c
index 092ddc4ffefa..03c6d9aef075 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/user.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/user.c
@@ -365,16 +365,15 @@ nvkm_udevice_child_get(struct nvkm_object *object, int index,
 	}
 
 	if (!sclass) {
-		switch (index) {
-		case 0: sclass = &nvkm_control_oclass; break;
-		case 1:
-			if (!device->mmu)
-				return -EINVAL;
+		if (index-- == 0)
+			sclass = &nvkm_control_oclass;
+		else if (device->mmu && index-- == 0)
 			sclass = &device->mmu->user;
-			break;
-		default:
+		else if (device->fault && index-- == 0)
+			sclass = &device->fault->user;
+		else
 			return -EINVAL;
-		}
+
 		oclass->base = sclass->base;
 	}
 
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/disp/Kbuild
index c6a257ba4347..2c28a5e747cc 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/Kbuild
@@ -15,7 +15,7 @@ nvkm-y += nvkm/engine/disp/gm200.o
 nvkm-y += nvkm/engine/disp/gp100.o
 nvkm-y += nvkm/engine/disp/gp102.o
 nvkm-y += nvkm/engine/disp/gv100.o
-nvkm-y += nvkm/engine/disp/tu104.o
+nvkm-y += nvkm/engine/disp/tu102.o
 nvkm-y += nvkm/engine/disp/vga.o
 
 nvkm-y += nvkm/engine/disp/head.o
@@ -39,7 +39,7 @@ nvkm-y += nvkm/engine/disp/sorgk104.o
 nvkm-y += nvkm/engine/disp/sorgm107.o
 nvkm-y += nvkm/engine/disp/sorgm200.o
 nvkm-y += nvkm/engine/disp/sorgv100.o
-nvkm-y += nvkm/engine/disp/sortu104.o
+nvkm-y += nvkm/engine/disp/sortu102.o
 
 nvkm-y += nvkm/engine/disp/outp.o
 nvkm-y += nvkm/engine/disp/dp.o
@@ -71,7 +71,7 @@ nvkm-y += nvkm/engine/disp/rootgm200.o
 nvkm-y += nvkm/engine/disp/rootgp100.o
 nvkm-y += nvkm/engine/disp/rootgp102.o
 nvkm-y += nvkm/engine/disp/rootgv100.o
-nvkm-y += nvkm/engine/disp/roottu104.o
+nvkm-y += nvkm/engine/disp/roottu102.o
 
 nvkm-y += nvkm/engine/disp/channv50.o
 nvkm-y += nvkm/engine/disp/changf119.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gf119.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gf119.c
index 794e90982641..e675d9b9d5d7 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gf119.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gf119.c
@@ -91,15 +91,21 @@ gf119_disp_intr_error(struct nv50_disp *disp, int chid)
 {
 	struct nvkm_subdev *subdev = &disp->base.engine.subdev;
 	struct nvkm_device *device = subdev->device;
-	u32 mthd = nvkm_rd32(device, 0x6101f0 + (chid * 12));
+	u32 stat = nvkm_rd32(device, 0x6101f0 + (chid * 12));
+	u32 type = (stat & 0x00007000) >> 12;
+	u32 mthd = (stat & 0x00000ffc);
 	u32 data = nvkm_rd32(device, 0x6101f4 + (chid * 12));
-	u32 unkn = nvkm_rd32(device, 0x6101f8 + (chid * 12));
+	u32 code = nvkm_rd32(device, 0x6101f8 + (chid * 12));
+	const struct nvkm_enum *reason =
+		nvkm_enum_find(nv50_disp_intr_error_type, type);
 
-	nvkm_error(subdev, "chid %d mthd %04x data %08x %08x %08x\n",
-		   chid, (mthd & 0x0000ffc), data, mthd, unkn);
+	nvkm_error(subdev, "chid %d stat %08x reason %d [%s] mthd %04x "
+			   "data %08x code %08x\n",
+		   chid, stat, type, reason ? reason->name : "",
+		   mthd, data, code);
 
 	if (chid < ARRAY_SIZE(disp->chan)) {
-		switch (mthd & 0xffc) {
+		switch (mthd) {
 		case 0x0080:
 			nv50_disp_chan_mthd(disp->chan[chid], NV_DBG_ERROR);
 			break;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gv100.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gv100.c
index 47be0ba4aebe..892be6c9b76c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gv100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gv100.c
@@ -103,10 +103,13 @@ gv100_disp_exception(struct nv50_disp *disp, int chid)
 	u32 mthd = (stat & 0x00000fff) << 2;
 	u32 data = nvkm_rd32(device, 0x611024 + (chid * 12));
 	u32 code = nvkm_rd32(device, 0x611028 + (chid * 12));
+	const struct nvkm_enum *reason =
+		nvkm_enum_find(nv50_disp_intr_error_type, type);
 
-	nvkm_error(subdev, "chid %d %08x [type %d mthd %04x] "
+	nvkm_error(subdev, "chid %d stat %08x reason %d [%s] mthd %04x "
 			   "data %08x code %08x\n",
-		   chid, stat, type, mthd, data, code);
+		   chid, stat, type, reason ? reason->name : "",
+		   mthd, data, code);
 
 	if (chid < ARRAY_SIZE(disp->chan) && disp->chan[chid]) {
 		switch (mthd) {
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/ior.h b/drivers/gpu/drm/nouveau/nvkm/engine/disp/ior.h
index 790e42f460fd..1681ddccd298 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/ior.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/ior.h
@@ -201,5 +201,5 @@ int gm200_sor_new(struct nvkm_disp *, int);
 int gv100_sor_cnt(struct nvkm_disp *, unsigned long *);
 int gv100_sor_new(struct nvkm_disp *, int);
 
-int tu104_sor_new(struct nvkm_disp *, int);
+int tu102_sor_new(struct nvkm_disp *, int);
 #endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c
index def005dd5fda..e21556bf2cb1 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c
@@ -28,7 +28,6 @@
 #include "rootnv50.h"
 
 #include <core/client.h>
-#include <core/enum.h>
 #include <core/ramht.h>
 #include <subdev/bios.h>
 #include <subdev/bios/disp.h>
@@ -593,12 +592,15 @@ nv50_disp_super(struct work_struct *work)
 	nvkm_wr32(device, 0x610030, 0x80000000);
 }
 
-static const struct nvkm_enum
+const struct nvkm_enum
 nv50_disp_intr_error_type[] = {
-	{ 3, "ILLEGAL_MTHD" },
-	{ 4, "INVALID_VALUE" },
+	{ 0, "NONE" },
+	{ 1, "PUSHBUFFER_ERR" },
+	{ 2, "TRAP" },
+	{ 3, "RESERVED_METHOD" },
+	{ 4, "INVALID_ARG" },
 	{ 5, "INVALID_STATE" },
-	{ 7, "INVALID_HANDLE" },
+	{ 7, "UNRESOLVABLE_HANDLE" },
 	{}
 };
 
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.h b/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.h
index c36a8a7cafa1..e5d00f478bb1 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.h
@@ -5,6 +5,8 @@
 #include "priv.h"
 struct nvkm_head;
 
+#include <core/enum.h>
+
 struct nv50_disp {
 	const struct nv50_disp_func *func;
 	struct nvkm_disp base;
@@ -71,6 +73,7 @@ int nv50_disp_init(struct nv50_disp *);
 void nv50_disp_fini(struct nv50_disp *);
 void nv50_disp_intr(struct nv50_disp *);
 void nv50_disp_super(struct work_struct *);
+extern const struct nvkm_enum nv50_disp_intr_error_type[];
 
 int gf119_disp_init(struct nv50_disp *);
 void gf119_disp_fini(struct nv50_disp *);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv50.h b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv50.h
index 97de928cbde1..aee9822a7a87 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv50.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv50.h
@@ -37,5 +37,5 @@ extern const struct nvkm_disp_oclass gm200_disp_root_oclass;
 extern const struct nvkm_disp_oclass gp100_disp_root_oclass;
 extern const struct nvkm_disp_oclass gp102_disp_root_oclass;
 extern const struct nvkm_disp_oclass gv100_disp_root_oclass;
-extern const struct nvkm_disp_oclass tu104_disp_root_oclass;
+extern const struct nvkm_disp_oclass tu102_disp_root_oclass;
 #endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/roottu104.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/roottu102.c
index ad438c62f66c..579a5d02308a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/roottu104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/roottu102.c
@@ -25,28 +25,28 @@
 #include <nvif/class.h>
 
 static const struct nv50_disp_root_func
-tu104_disp_root = {
+tu102_disp_root = {
 	.user = {
-		{{0,0,TU104_DISP_CURSOR                }, gv100_disp_curs_new },
-		{{0,0,TU104_DISP_WINDOW_IMM_CHANNEL_DMA}, gv100_disp_wimm_new },
-		{{0,0,TU104_DISP_CORE_CHANNEL_DMA      }, gv100_disp_core_new },
-		{{0,0,TU104_DISP_WINDOW_CHANNEL_DMA    }, gv100_disp_wndw_new },
+		{{0,0,TU102_DISP_CURSOR                }, gv100_disp_curs_new },
+		{{0,0,TU102_DISP_WINDOW_IMM_CHANNEL_DMA}, gv100_disp_wimm_new },
+		{{0,0,TU102_DISP_CORE_CHANNEL_DMA      }, gv100_disp_core_new },
+		{{0,0,TU102_DISP_WINDOW_CHANNEL_DMA    }, gv100_disp_wndw_new },
 		{}
 	},
 };
 
 static int
-tu104_disp_root_new(struct nvkm_disp *disp, const struct nvkm_oclass *oclass,
+tu102_disp_root_new(struct nvkm_disp *disp, const struct nvkm_oclass *oclass,
 		    void *data, u32 size, struct nvkm_object **pobject)
 {
-	return nv50_disp_root_new_(&tu104_disp_root, disp, oclass,
+	return nv50_disp_root_new_(&tu102_disp_root, disp, oclass,
 				   data, size, pobject);
 }
 
 const struct nvkm_disp_oclass
-tu104_disp_root_oclass = {
-	.base.oclass = TU104_DISP,
+tu102_disp_root_oclass = {
+	.base.oclass = TU102_DISP,
 	.base.minver = -1,
 	.base.maxver = -1,
-	.ctor = tu104_disp_root_new,
+	.ctor = tu102_disp_root_new,
 };
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/sortu104.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sortu102.c
index df026a525ef1..d57b73ada89e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/sortu104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sortu102.c
@@ -24,7 +24,7 @@
 #include <subdev/timer.h>
 
 static void
-tu104_sor_dp_vcpi(struct nvkm_ior *sor, int head,
+tu102_sor_dp_vcpi(struct nvkm_ior *sor, int head,
 		  u8 slot, u8 slot_nr, u16 pbn, u16 aligned)
 {
 	struct nvkm_device *device = sor->disp->engine.subdev.device;
@@ -35,7 +35,7 @@ tu104_sor_dp_vcpi(struct nvkm_ior *sor, int head,
 }
 
 static int
-tu104_sor_dp_links(struct nvkm_ior *sor, struct nvkm_i2c_aux *aux)
+tu102_sor_dp_links(struct nvkm_ior *sor, struct nvkm_i2c_aux *aux)
 {
 	struct nvkm_device *device = sor->disp->engine.subdev.device;
 	const u32 soff = nv50_ior_base(sor);
@@ -62,7 +62,7 @@ tu104_sor_dp_links(struct nvkm_ior *sor, struct nvkm_i2c_aux *aux)
 }
 
 static const struct nvkm_ior_func
-tu104_sor = {
+tu102_sor = {
 	.route = {
 		.get = gm200_sor_route_get,
 		.set = gm200_sor_route_set,
@@ -75,11 +75,11 @@ tu104_sor = {
 	},
 	.dp = {
 		.lanes = { 0, 1, 2, 3 },
-		.links = tu104_sor_dp_links,
+		.links = tu102_sor_dp_links,
 		.power = g94_sor_dp_power,
 		.pattern = gm107_sor_dp_pattern,
 		.drive = gm200_sor_dp_drive,
-		.vcpi = tu104_sor_dp_vcpi,
+		.vcpi = tu102_sor_dp_vcpi,
 		.audio = gv100_sor_dp_audio,
 		.audio_sym = gv100_sor_dp_audio_sym,
 		.watermark = gv100_sor_dp_watermark,
@@ -91,7 +91,7 @@ tu104_sor = {
 };
 
 int
-tu104_sor_new(struct nvkm_disp *disp, int id)
+tu102_sor_new(struct nvkm_disp *disp, int id)
 {
-	return nvkm_ior_new_(&tu104_sor, disp, SOR, id);
+	return nvkm_ior_new_(&tu102_sor, disp, SOR, id);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/tu104.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/tu102.c
index 13fa21459d38..883ae4151ff8 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/tu104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/tu102.c
@@ -29,7 +29,7 @@
 #include <subdev/timer.h>
 
 static int
-tu104_disp_init(struct nv50_disp *disp)
+tu102_disp_init(struct nv50_disp *disp)
 {
 	struct nvkm_device *device = disp->base.engine.subdev.device;
 	struct nvkm_head *head;
@@ -132,21 +132,21 @@ tu104_disp_init(struct nv50_disp *disp)
 }
 
 static const struct nv50_disp_func
-tu104_disp = {
-	.init = tu104_disp_init,
+tu102_disp = {
+	.init = tu102_disp_init,
 	.fini = gv100_disp_fini,
 	.intr = gv100_disp_intr,
 	.uevent = &gv100_disp_chan_uevent,
 	.super = gv100_disp_super,
-	.root = &tu104_disp_root_oclass,
+	.root = &tu102_disp_root_oclass,
 	.wndw = { .cnt = gv100_disp_wndw_cnt },
 	.head = { .cnt = gv100_head_cnt, .new = gv100_head_new },
-	.sor = { .cnt = gv100_sor_cnt, .new = tu104_sor_new },
+	.sor = { .cnt = gv100_sor_cnt, .new = tu102_sor_new },
 	.ramht_size = 0x2000,
 };
 
 int
-tu104_disp_new(struct nvkm_device *device, int index, struct nvkm_disp **pdisp)
+tu102_disp_new(struct nvkm_device *device, int index, struct nvkm_disp **pdisp)
 {
-	return nv50_disp_new_(&tu104_disp, device, index, pdisp);
+	return nv50_disp_new_(&tu102_disp, device, index, pdisp);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/Kbuild
index 87d8e054e40a..05aada541ea5 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/Kbuild
@@ -16,7 +16,7 @@ nvkm-y += nvkm/engine/fifo/gm20b.o
 nvkm-y += nvkm/engine/fifo/gp100.o
 nvkm-y += nvkm/engine/fifo/gp10b.o
 nvkm-y += nvkm/engine/fifo/gv100.o
-nvkm-y += nvkm/engine/fifo/tu104.o
+nvkm-y += nvkm/engine/fifo/tu102.o
 
 nvkm-y += nvkm/engine/fifo/chan.o
 nvkm-y += nvkm/engine/fifo/channv50.o
@@ -34,7 +34,7 @@ nvkm-y += nvkm/engine/fifo/gpfifog84.o
 nvkm-y += nvkm/engine/fifo/gpfifogf100.o
 nvkm-y += nvkm/engine/fifo/gpfifogk104.o
 nvkm-y += nvkm/engine/fifo/gpfifogv100.o
-nvkm-y += nvkm/engine/fifo/gpfifotu104.o
+nvkm-y += nvkm/engine/fifo/gpfifotu102.o
 
 nvkm-y += nvkm/engine/fifo/usergv100.o
-nvkm-y += nvkm/engine/fifo/usertu104.o
+nvkm-y += nvkm/engine/fifo/usertu102.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/changk104.h b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/changk104.h
index a14545d871d8..f8557cdfbd81 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/changk104.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/changk104.h
@@ -47,6 +47,6 @@ int gv100_fifo_gpfifo_engine_init(struct nvkm_fifo_chan *,
 int gv100_fifo_gpfifo_engine_fini(struct nvkm_fifo_chan *,
 				  struct nvkm_engine *, bool);
 
-int tu104_fifo_gpfifo_new(struct gk104_fifo *, const struct nvkm_oclass *,
+int tu102_fifo_gpfifo_new(struct gk104_fifo *, const struct nvkm_oclass *,
 			  void *data, u32 size, struct nvkm_object **);
 #endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifotu104.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifotu102.c
index ff70484dd01a..abef7fb6e2d3 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifotu104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifotu102.c
@@ -29,14 +29,14 @@
 #include <nvif/unpack.h>
 
 static u32
-tu104_fifo_gpfifo_submit_token(struct nvkm_fifo_chan *base)
+tu102_fifo_gpfifo_submit_token(struct nvkm_fifo_chan *base)
 {
 	struct gk104_fifo_chan *chan = gk104_fifo_chan(base);
 	return (chan->runl << 16) | chan->base.chid;
 }
 
 static const struct nvkm_fifo_chan_func
-tu104_fifo_gpfifo = {
+tu102_fifo_gpfifo = {
 	.dtor = gk104_fifo_gpfifo_dtor,
 	.init = gk104_fifo_gpfifo_init,
 	.fini = gk104_fifo_gpfifo_fini,
@@ -45,11 +45,11 @@ tu104_fifo_gpfifo = {
 	.engine_dtor = gk104_fifo_gpfifo_engine_dtor,
 	.engine_init = gv100_fifo_gpfifo_engine_init,
 	.engine_fini = gv100_fifo_gpfifo_engine_fini,
-	.submit_token = tu104_fifo_gpfifo_submit_token,
+	.submit_token = tu102_fifo_gpfifo_submit_token,
 };
 
 int
-tu104_fifo_gpfifo_new(struct gk104_fifo *fifo, const struct nvkm_oclass *oclass,
+tu102_fifo_gpfifo_new(struct gk104_fifo *fifo, const struct nvkm_oclass *oclass,
 		      void *data, u32 size, struct nvkm_object **pobject)
 {
 	struct nvkm_object *parent = oclass->parent;
@@ -67,7 +67,7 @@ tu104_fifo_gpfifo_new(struct gk104_fifo *fifo, const struct nvkm_oclass *oclass,
 			   args->v0.ilength, args->v0.runlist, args->v0.priv);
 		if (args->v0.priv && !oclass->client->super)
 			return -EINVAL;
-		return gv100_fifo_gpfifo_new_(&tu104_fifo_gpfifo, fifo,
+		return gv100_fifo_gpfifo_new_(&tu102_fifo_gpfifo, fifo,
 					      &args->v0.runlist,
 					      &args->v0.chid,
 					       args->v0.vmm,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/tu104.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/tu102.c
index 98c80705bc61..005f3e1729b9 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/tu104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/tu102.c
@@ -29,7 +29,7 @@
 #include <nvif/class.h>
 
 static void
-tu104_fifo_runlist_commit(struct gk104_fifo *fifo, int runl,
+tu102_fifo_runlist_commit(struct gk104_fifo *fifo, int runl,
 			  struct nvkm_memory *mem, int nr)
 {
 	struct nvkm_device *device = fifo->base.engine.subdev.device;
@@ -44,15 +44,15 @@ tu104_fifo_runlist_commit(struct gk104_fifo *fifo, int runl,
 }
 
 const struct gk104_fifo_runlist_func
-tu104_fifo_runlist = {
+tu102_fifo_runlist = {
 	.size = 16,
 	.cgrp = gv100_fifo_runlist_cgrp,
 	.chan = gv100_fifo_runlist_chan,
-	.commit = tu104_fifo_runlist_commit,
+	.commit = tu102_fifo_runlist_commit,
 };
 
 static const struct nvkm_enum
-tu104_fifo_fault_engine[] = {
+tu102_fifo_fault_engine[] = {
 	{ 0x01, "DISPLAY" },
 	{ 0x03, "PTP" },
 	{ 0x06, "PWR_PMU" },
@@ -80,7 +80,7 @@ tu104_fifo_fault_engine[] = {
 };
 
 static void
-tu104_fifo_pbdma_init(struct gk104_fifo *fifo)
+tu102_fifo_pbdma_init(struct gk104_fifo *fifo)
 {
 	struct nvkm_device *device = fifo->base.engine.subdev.device;
 	const u32 mask = (1 << fifo->pbdma_nr) - 1;
@@ -89,28 +89,28 @@ tu104_fifo_pbdma_init(struct gk104_fifo *fifo)
 }
 
 static const struct gk104_fifo_pbdma_func
-tu104_fifo_pbdma = {
+tu102_fifo_pbdma = {
 	.nr = gm200_fifo_pbdma_nr,
-	.init = tu104_fifo_pbdma_init,
+	.init = tu102_fifo_pbdma_init,
 	.init_timeout = gk208_fifo_pbdma_init_timeout,
 };
 
 static const struct gk104_fifo_func
-tu104_fifo = {
-	.pbdma = &tu104_fifo_pbdma,
+tu102_fifo = {
+	.pbdma = &tu102_fifo_pbdma,
 	.fault.access = gv100_fifo_fault_access,
-	.fault.engine = tu104_fifo_fault_engine,
+	.fault.engine = tu102_fifo_fault_engine,
 	.fault.reason = gv100_fifo_fault_reason,
 	.fault.hubclient = gv100_fifo_fault_hubclient,
 	.fault.gpcclient = gv100_fifo_fault_gpcclient,
-	.runlist = &tu104_fifo_runlist,
-	.user = {{-1,-1,VOLTA_USERMODE_A       }, tu104_fifo_user_new   },
-	.chan = {{ 0, 0,TURING_CHANNEL_GPFIFO_A}, tu104_fifo_gpfifo_new },
+	.runlist = &tu102_fifo_runlist,
+	.user = {{-1,-1,VOLTA_USERMODE_A       }, tu102_fifo_user_new   },
+	.chan = {{ 0, 0,TURING_CHANNEL_GPFIFO_A}, tu102_fifo_gpfifo_new },
 	.cgrp_force = true,
 };
 
 int
-tu104_fifo_new(struct nvkm_device *device, int index, struct nvkm_fifo **pfifo)
+tu102_fifo_new(struct nvkm_device *device, int index, struct nvkm_fifo **pfifo)
 {
-	return gk104_fifo_new_(&tu104_fifo, device, index, 4096, pfifo);
+	return gk104_fifo_new_(&tu102_fifo, device, index, 4096, pfifo);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/user.h b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/user.h
index 14b0c6bde8eb..54a3a3092cc0 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/user.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/user.h
@@ -3,6 +3,6 @@
 #include "priv.h"
 int gv100_fifo_user_new(const struct nvkm_oclass *, void *, u32,
 			struct nvkm_object **);
-int tu104_fifo_user_new(const struct nvkm_oclass *, void *, u32,
+int tu102_fifo_user_new(const struct nvkm_oclass *, void *, u32,
 			struct nvkm_object **);
 #endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/usertu104.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/usertu102.c
index 8f98548a21f6..217268f8ccad 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/usertu104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/usertu102.c
@@ -22,7 +22,7 @@
 #include "user.h"
 
 static int
-tu104_fifo_user_map(struct nvkm_object *object, void *argv, u32 argc,
+tu102_fifo_user_map(struct nvkm_object *object, void *argv, u32 argc,
 		    enum nvkm_object_map *type, u64 *addr, u64 *size)
 {
 	struct nvkm_device *device = object->engine->subdev.device;
@@ -33,13 +33,13 @@ tu104_fifo_user_map(struct nvkm_object *object, void *argv, u32 argc,
 }
 
 static const struct nvkm_object_func
-tu104_fifo_user = {
-	.map = tu104_fifo_user_map,
+tu102_fifo_user = {
+	.map = tu102_fifo_user_map,
 };
 
 int
-tu104_fifo_user_new(const struct nvkm_oclass *oclass, void *argv, u32 argc,
+tu102_fifo_user_new(const struct nvkm_oclass *oclass, void *argv, u32 argc,
 		    struct nvkm_object **pobject)
 {
-	return nvkm_object_new_(&tu104_fifo_user, oclass, argv, argc, pobject);
+	return nvkm_object_new_(&tu102_fifo_user, oclass, argv, argc, pobject);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/base.c
index cd8cf6f7024c..d41fb94524e9 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/base.c
@@ -25,6 +25,33 @@
 
 #include <engine/fifo.h>
 
+u32
+nvkm_gr_ctxsw_inst(struct nvkm_device *device)
+{
+	struct nvkm_gr *gr = device->gr;
+	if (gr && gr->func->ctxsw.inst)
+		return gr->func->ctxsw.inst(gr);
+	return 0;
+}
+
+int
+nvkm_gr_ctxsw_resume(struct nvkm_device *device)
+{
+	struct nvkm_gr *gr = device->gr;
+	if (gr && gr->func->ctxsw.resume)
+		return gr->func->ctxsw.resume(gr);
+	return 0;
+}
+
+int
+nvkm_gr_ctxsw_pause(struct nvkm_device *device)
+{
+	struct nvkm_gr *gr = device->gr;
+	if (gr && gr->func->ctxsw.pause)
+		return gr->func->ctxsw.pause(gr);
+	return 0;
+}
+
 static bool
 nvkm_gr_chsw_load(struct nvkm_engine *engine)
 {
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.c
index e813a3f8ea93..85f2d1e950e8 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.c
@@ -1523,13 +1523,9 @@ gf100_grctx_generate(struct gf100_gr *gr)
 	/* Make channel current. */
 	addr = nvkm_memory_addr(inst) >> 12;
 	if (gr->firmware) {
-		nvkm_wr32(device, 0x409840, 0x00000030);
-		nvkm_wr32(device, 0x409500, 0x80000000 | addr);
-		nvkm_wr32(device, 0x409504, 0x00000003);
-		nvkm_msec(device, 2000,
-			if (nvkm_rd32(device, 0x409800) & 0x00000010)
-				break;
-		);
+		ret = gf100_gr_fecs_bind_pointer(gr, 0x80000000 | addr);
+		if (ret)
+			goto done;
 
 		nvkm_kmap(data);
 		nvkm_wo32(data, 0x1c, 1);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
index 70d3d41e616c..81a13cf9a292 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
@@ -715,6 +715,211 @@ gf100_gr_pack_mmio[] = {
  * PGRAPH engine/subdev functions
  ******************************************************************************/
 
+static u32
+gf100_gr_ctxsw_inst(struct nvkm_gr *gr)
+{
+	return nvkm_rd32(gr->engine.subdev.device, 0x409b00);
+}
+
+static int
+gf100_gr_fecs_ctrl_ctxsw(struct gf100_gr *gr, u32 mthd)
+{
+	struct nvkm_device *device = gr->base.engine.subdev.device;
+
+	nvkm_wr32(device, 0x409804, 0xffffffff);
+	nvkm_wr32(device, 0x409840, 0xffffffff);
+	nvkm_wr32(device, 0x409500, 0xffffffff);
+	nvkm_wr32(device, 0x409504, mthd);
+	nvkm_msec(device, 2000,
+		u32 stat = nvkm_rd32(device, 0x409804);
+		if (stat == 0x00000002)
+			return -EIO;
+		if (stat == 0x00000001)
+			return 0;
+	);
+
+	return -ETIMEDOUT;
+}
+
+int
+gf100_gr_fecs_start_ctxsw(struct nvkm_gr *base)
+{
+	struct gf100_gr *gr = gf100_gr(base);
+	int ret = 0;
+
+	mutex_lock(&gr->fecs.mutex);
+	if (!--gr->fecs.disable) {
+		if (WARN_ON(ret = gf100_gr_fecs_ctrl_ctxsw(gr, 0x39)))
+			gr->fecs.disable++;
+	}
+	mutex_unlock(&gr->fecs.mutex);
+	return ret;
+}
+
+int
+gf100_gr_fecs_stop_ctxsw(struct nvkm_gr *base)
+{
+	struct gf100_gr *gr = gf100_gr(base);
+	int ret = 0;
+
+	mutex_lock(&gr->fecs.mutex);
+	if (!gr->fecs.disable++) {
+		if (WARN_ON(ret = gf100_gr_fecs_ctrl_ctxsw(gr, 0x38)))
+			gr->fecs.disable--;
+	}
+	mutex_unlock(&gr->fecs.mutex);
+	return ret;
+}
+
+int
+gf100_gr_fecs_bind_pointer(struct gf100_gr *gr, u32 inst)
+{
+	struct nvkm_device *device = gr->base.engine.subdev.device;
+
+	nvkm_wr32(device, 0x409840, 0x00000030);
+	nvkm_wr32(device, 0x409500, inst);
+	nvkm_wr32(device, 0x409504, 0x00000003);
+	nvkm_msec(device, 2000,
+		u32 stat = nvkm_rd32(device, 0x409800);
+		if (stat & 0x00000020)
+			return -EIO;
+		if (stat & 0x00000010)
+			return 0;
+	);
+
+	return -ETIMEDOUT;
+}
+
+static int
+gf100_gr_fecs_set_reglist_virtual_address(struct gf100_gr *gr, u64 addr)
+{
+	struct nvkm_device *device = gr->base.engine.subdev.device;
+
+	nvkm_wr32(device, 0x409810, addr >> 8);
+	nvkm_wr32(device, 0x409800, 0x00000000);
+	nvkm_wr32(device, 0x409500, 0x00000001);
+	nvkm_wr32(device, 0x409504, 0x00000032);
+	nvkm_msec(device, 2000,
+		if (nvkm_rd32(device, 0x409800) == 0x00000001)
+			return 0;
+	);
+
+	return -ETIMEDOUT;
+}
+
+static int
+gf100_gr_fecs_set_reglist_bind_instance(struct gf100_gr *gr, u32 inst)
+{
+	struct nvkm_device *device = gr->base.engine.subdev.device;
+
+	nvkm_wr32(device, 0x409810, inst);
+	nvkm_wr32(device, 0x409800, 0x00000000);
+	nvkm_wr32(device, 0x409500, 0x00000001);
+	nvkm_wr32(device, 0x409504, 0x00000031);
+	nvkm_msec(device, 2000,
+		if (nvkm_rd32(device, 0x409800) == 0x00000001)
+			return 0;
+	);
+
+	return -ETIMEDOUT;
+}
+
+static int
+gf100_gr_fecs_discover_reglist_image_size(struct gf100_gr *gr, u32 *psize)
+{
+	struct nvkm_device *device = gr->base.engine.subdev.device;
+
+	nvkm_wr32(device, 0x409800, 0x00000000);
+	nvkm_wr32(device, 0x409500, 0x00000001);
+	nvkm_wr32(device, 0x409504, 0x00000030);
+	nvkm_msec(device, 2000,
+		if ((*psize = nvkm_rd32(device, 0x409800)))
+			return 0;
+	);
+
+	return -ETIMEDOUT;
+}
+
+static int
+gf100_gr_fecs_elpg_bind(struct gf100_gr *gr)
+{
+	u32 size;
+	int ret;
+
+	ret = gf100_gr_fecs_discover_reglist_image_size(gr, &size);
+	if (ret)
+		return ret;
+
+	/*XXX: We need to allocate + map the above into PMU's inst block,
+	 *     which which means we probably need a proper PMU before we
+	 *     even bother.
+	 */
+
+	ret = gf100_gr_fecs_set_reglist_bind_instance(gr, 0);
+	if (ret)
+		return ret;
+
+	return gf100_gr_fecs_set_reglist_virtual_address(gr, 0);
+}
+
+static int
+gf100_gr_fecs_discover_pm_image_size(struct gf100_gr *gr, u32 *psize)
+{
+	struct nvkm_device *device = gr->base.engine.subdev.device;
+
+	nvkm_wr32(device, 0x409840, 0xffffffff);
+	nvkm_wr32(device, 0x409500, 0x00000000);
+	nvkm_wr32(device, 0x409504, 0x00000025);
+	nvkm_msec(device, 2000,
+		if ((*psize = nvkm_rd32(device, 0x409800)))
+			return 0;
+	);
+
+	return -ETIMEDOUT;
+}
+
+static int
+gf100_gr_fecs_discover_zcull_image_size(struct gf100_gr *gr, u32 *psize)
+{
+	struct nvkm_device *device = gr->base.engine.subdev.device;
+
+	nvkm_wr32(device, 0x409840, 0xffffffff);
+	nvkm_wr32(device, 0x409500, 0x00000000);
+	nvkm_wr32(device, 0x409504, 0x00000016);
+	nvkm_msec(device, 2000,
+		if ((*psize = nvkm_rd32(device, 0x409800)))
+			return 0;
+	);
+
+	return -ETIMEDOUT;
+}
+
+static int
+gf100_gr_fecs_discover_image_size(struct gf100_gr *gr, u32 *psize)
+{
+	struct nvkm_device *device = gr->base.engine.subdev.device;
+
+	nvkm_wr32(device, 0x409840, 0xffffffff);
+	nvkm_wr32(device, 0x409500, 0x00000000);
+	nvkm_wr32(device, 0x409504, 0x00000010);
+	nvkm_msec(device, 2000,
+		if ((*psize = nvkm_rd32(device, 0x409800)))
+			return 0;
+	);
+
+	return -ETIMEDOUT;
+}
+
+static void
+gf100_gr_fecs_set_watchdog_timeout(struct gf100_gr *gr, u32 timeout)
+{
+	struct nvkm_device *device = gr->base.engine.subdev.device;
+
+	nvkm_wr32(device, 0x409840, 0xffffffff);
+	nvkm_wr32(device, 0x409500, timeout);
+	nvkm_wr32(device, 0x409504, 0x00000021);
+}
+
 static bool
 gf100_gr_chsw_load(struct nvkm_gr *base)
 {
@@ -1487,6 +1692,7 @@ gf100_gr_init_ctxctl_ext(struct gf100_gr *gr)
 	struct nvkm_device *device = subdev->device;
 	struct nvkm_secboot *sb = device->secboot;
 	u32 secboot_mask = 0;
+	int ret;
 
 	/* load fuc microcode */
 	nvkm_mc_unk260(device, 0);
@@ -1495,12 +1701,12 @@ gf100_gr_init_ctxctl_ext(struct gf100_gr *gr)
 	if (nvkm_secboot_is_managed(sb, NVKM_SECBOOT_FALCON_FECS))
 		secboot_mask |= BIT(NVKM_SECBOOT_FALCON_FECS);
 	else
-		gf100_gr_init_fw(gr->fecs, &gr->fuc409c, &gr->fuc409d);
+		gf100_gr_init_fw(gr->fecs.falcon, &gr->fuc409c, &gr->fuc409d);
 
 	if (nvkm_secboot_is_managed(sb, NVKM_SECBOOT_FALCON_GPCCS))
 		secboot_mask |= BIT(NVKM_SECBOOT_FALCON_GPCCS);
 	else
-		gf100_gr_init_fw(gr->gpccs, &gr->fuc41ac, &gr->fuc41ad);
+		gf100_gr_init_fw(gr->gpccs.falcon, &gr->fuc41ac, &gr->fuc41ad);
 
 	if (secboot_mask != 0) {
 		int ret = nvkm_secboot_reset(sb, secboot_mask);
@@ -1515,8 +1721,8 @@ gf100_gr_init_ctxctl_ext(struct gf100_gr *gr)
 	nvkm_wr32(device, 0x41a10c, 0x00000000);
 	nvkm_wr32(device, 0x40910c, 0x00000000);
 
-	nvkm_falcon_start(gr->gpccs);
-	nvkm_falcon_start(gr->fecs);
+	nvkm_falcon_start(gr->gpccs.falcon);
+	nvkm_falcon_start(gr->fecs.falcon);
 
 	if (nvkm_msec(device, 2000,
 		if (nvkm_rd32(device, 0x409800) & 0x00000001)
@@ -1524,72 +1730,36 @@ gf100_gr_init_ctxctl_ext(struct gf100_gr *gr)
 	) < 0)
 		return -EBUSY;
 
-	nvkm_wr32(device, 0x409840, 0xffffffff);
-	nvkm_wr32(device, 0x409500, 0x7fffffff);
-	nvkm_wr32(device, 0x409504, 0x00000021);
+	gf100_gr_fecs_set_watchdog_timeout(gr, 0x7fffffff);
 
-	nvkm_wr32(device, 0x409840, 0xffffffff);
-	nvkm_wr32(device, 0x409500, 0x00000000);
-	nvkm_wr32(device, 0x409504, 0x00000010);
-	if (nvkm_msec(device, 2000,
-		if ((gr->size = nvkm_rd32(device, 0x409800)))
-			break;
-	) < 0)
-		return -EBUSY;
-
-	nvkm_wr32(device, 0x409840, 0xffffffff);
-	nvkm_wr32(device, 0x409500, 0x00000000);
-	nvkm_wr32(device, 0x409504, 0x00000016);
-	if (nvkm_msec(device, 2000,
-		if (nvkm_rd32(device, 0x409800))
-			break;
-	) < 0)
-		return -EBUSY;
+	/* Determine how much memory is required to store main context image. */
+	ret = gf100_gr_fecs_discover_image_size(gr, &gr->size);
+	if (ret)
+		return ret;
 
-	nvkm_wr32(device, 0x409840, 0xffffffff);
-	nvkm_wr32(device, 0x409500, 0x00000000);
-	nvkm_wr32(device, 0x409504, 0x00000025);
-	if (nvkm_msec(device, 2000,
-		if (nvkm_rd32(device, 0x409800))
-			break;
-	) < 0)
-		return -EBUSY;
+	/* Determine how much memory is required to store ZCULL image. */
+	ret = gf100_gr_fecs_discover_zcull_image_size(gr, &gr->size_zcull);
+	if (ret)
+		return ret;
 
-	if (device->chipset >= 0xe0) {
-		nvkm_wr32(device, 0x409800, 0x00000000);
-		nvkm_wr32(device, 0x409500, 0x00000001);
-		nvkm_wr32(device, 0x409504, 0x00000030);
-		if (nvkm_msec(device, 2000,
-			if (nvkm_rd32(device, 0x409800))
-				break;
-		) < 0)
-			return -EBUSY;
-
-		nvkm_wr32(device, 0x409810, 0xb00095c8);
-		nvkm_wr32(device, 0x409800, 0x00000000);
-		nvkm_wr32(device, 0x409500, 0x00000001);
-		nvkm_wr32(device, 0x409504, 0x00000031);
-		if (nvkm_msec(device, 2000,
-			if (nvkm_rd32(device, 0x409800))
-				break;
-		) < 0)
-			return -EBUSY;
-
-		nvkm_wr32(device, 0x409810, 0x00080420);
-		nvkm_wr32(device, 0x409800, 0x00000000);
-		nvkm_wr32(device, 0x409500, 0x00000001);
-		nvkm_wr32(device, 0x409504, 0x00000032);
-		if (nvkm_msec(device, 2000,
-			if (nvkm_rd32(device, 0x409800))
-				break;
-		) < 0)
-			return -EBUSY;
+	/* Determine how much memory is required to store PerfMon image. */
+	ret = gf100_gr_fecs_discover_pm_image_size(gr, &gr->size_pm);
+	if (ret)
+		return ret;
 
-		nvkm_wr32(device, 0x409614, 0x00000070);
-		nvkm_wr32(device, 0x409614, 0x00000770);
-		nvkm_wr32(device, 0x40802c, 0x00000001);
+	/*XXX: We (likely) require PMU support to even bother with this.
+	 *
+	 *     Also, it seems like not all GPUs support ELPG.  Traces I
+	 *     have here show RM enabling it on Kepler/Turing, but none
+	 *     of the GPUs between those.  NVGPU decides this by PCIID.
+	 */
+	if (0) {
+		ret = gf100_gr_fecs_elpg_bind(gr);
+		if (ret)
+			return ret;
 	}
 
+	/* Generate golden context image. */
 	if (gr->data == NULL) {
 		int ret = gf100_grctx_generate(gr);
 		if (ret) {
@@ -1614,15 +1784,19 @@ gf100_gr_init_ctxctl_int(struct gf100_gr *gr)
 
 	/* load HUB microcode */
 	nvkm_mc_unk260(device, 0);
-	nvkm_falcon_load_dmem(gr->fecs, gr->func->fecs.ucode->data.data, 0x0,
+	nvkm_falcon_load_dmem(gr->fecs.falcon,
+			      gr->func->fecs.ucode->data.data, 0x0,
 			      gr->func->fecs.ucode->data.size, 0);
-	nvkm_falcon_load_imem(gr->fecs, gr->func->fecs.ucode->code.data, 0x0,
+	nvkm_falcon_load_imem(gr->fecs.falcon,
+			      gr->func->fecs.ucode->code.data, 0x0,
 			      gr->func->fecs.ucode->code.size, 0, 0, false);
 
 	/* load GPC microcode */
-	nvkm_falcon_load_dmem(gr->gpccs, gr->func->gpccs.ucode->data.data, 0x0,
+	nvkm_falcon_load_dmem(gr->gpccs.falcon,
+			      gr->func->gpccs.ucode->data.data, 0x0,
 			      gr->func->gpccs.ucode->data.size, 0);
-	nvkm_falcon_load_imem(gr->gpccs, gr->func->gpccs.ucode->code.data, 0x0,
+	nvkm_falcon_load_imem(gr->gpccs.falcon,
+			      gr->func->gpccs.ucode->code.data, 0x0,
 			      gr->func->gpccs.ucode->code.size, 0, 0, false);
 	nvkm_mc_unk260(device, 1);
 
@@ -1769,11 +1943,13 @@ gf100_gr_oneinit(struct nvkm_gr *base)
 	int i, j;
 	int ret;
 
-	ret = nvkm_falcon_v1_new(subdev, "FECS", 0x409000, &gr->fecs);
+	ret = nvkm_falcon_v1_new(subdev, "FECS", 0x409000, &gr->fecs.falcon);
 	if (ret)
 		return ret;
 
-	ret = nvkm_falcon_v1_new(subdev, "GPCCS", 0x41a000, &gr->gpccs);
+	mutex_init(&gr->fecs.mutex);
+
+	ret = nvkm_falcon_v1_new(subdev, "GPCCS", 0x41a000, &gr->gpccs.falcon);
 	if (ret)
 		return ret;
 
@@ -1816,11 +1992,11 @@ gf100_gr_init_(struct nvkm_gr *base)
 
 	nvkm_pmu_pgob(gr->base.engine.subdev.device->pmu, false);
 
-	ret = nvkm_falcon_get(gr->fecs, subdev);
+	ret = nvkm_falcon_get(gr->fecs.falcon, subdev);
 	if (ret)
 		return ret;
 
-	ret = nvkm_falcon_get(gr->gpccs, subdev);
+	ret = nvkm_falcon_get(gr->gpccs.falcon, subdev);
 	if (ret)
 		return ret;
 
@@ -1832,8 +2008,8 @@ gf100_gr_fini_(struct nvkm_gr *base, bool suspend)
 {
 	struct gf100_gr *gr = gf100_gr(base);
 	struct nvkm_subdev *subdev = &gr->base.engine.subdev;
-	nvkm_falcon_put(gr->gpccs, subdev);
-	nvkm_falcon_put(gr->fecs, subdev);
+	nvkm_falcon_put(gr->gpccs.falcon, subdev);
+	nvkm_falcon_put(gr->fecs.falcon, subdev);
 	return 0;
 }
 
@@ -1859,8 +2035,8 @@ gf100_gr_dtor(struct nvkm_gr *base)
 		gr->func->dtor(gr);
 	kfree(gr->data);
 
-	nvkm_falcon_del(&gr->gpccs);
-	nvkm_falcon_del(&gr->fecs);
+	nvkm_falcon_del(&gr->gpccs.falcon);
+	nvkm_falcon_del(&gr->fecs.falcon);
 
 	gf100_gr_dtor_fw(&gr->fuc409c);
 	gf100_gr_dtor_fw(&gr->fuc409d);
@@ -1886,6 +2062,9 @@ gf100_gr_ = {
 	.chan_new = gf100_gr_chan_new,
 	.object_get = gf100_gr_object_get,
 	.chsw_load = gf100_gr_chsw_load,
+	.ctxsw.pause = gf100_gr_fecs_stop_ctxsw,
+	.ctxsw.resume = gf100_gr_fecs_start_ctxsw,
+	.ctxsw.inst = gf100_gr_ctxsw_inst,
 };
 
 int
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h
index dc46cf0131db..fafdd0bbea9b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h
@@ -82,8 +82,16 @@ struct gf100_gr {
 	const struct gf100_gr_func *func;
 	struct nvkm_gr base;
 
-	struct nvkm_falcon *fecs;
-	struct nvkm_falcon *gpccs;
+	struct {
+		struct nvkm_falcon *falcon;
+		struct mutex mutex;
+		u32 disable;
+	} fecs;
+
+	struct {
+		struct nvkm_falcon *falcon;
+	} gpccs;
+
 	struct gf100_gr_fuc fuc409c;
 	struct gf100_gr_fuc fuc409d;
 	struct gf100_gr_fuc fuc41ac;
@@ -128,6 +136,8 @@ struct gf100_gr {
 	struct gf100_gr_mmio mmio_list[4096/8];
 	u32  size;
 	u32 *data;
+	u32 size_zcull;
+	u32 size_pm;
 };
 
 int gf100_gr_ctor(const struct gf100_gr_func *, struct nvkm_device *,
@@ -136,6 +146,8 @@ int gf100_gr_new_(const struct gf100_gr_func *, struct nvkm_device *,
 		  int, struct nvkm_gr **);
 void *gf100_gr_dtor(struct nvkm_gr *);
 
+int gf100_gr_fecs_bind_pointer(struct gf100_gr *, u32 inst);
+
 struct gf100_gr_func_zbc {
 	void (*clear_color)(struct gf100_gr *, int zbc);
 	void (*clear_depth)(struct gf100_gr *, int zbc);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/priv.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/priv.h
index 66359c23cbce..d4d5601c51e7 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/priv.h
@@ -27,6 +27,11 @@ struct nvkm_gr_func {
 	 */
 	u64 (*units)(struct nvkm_gr *);
 	bool (*chsw_load)(struct nvkm_gr *);
+	struct {
+		int (*pause)(struct nvkm_gr *);
+		int (*resume)(struct nvkm_gr *);
+		u32 (*inst)(struct nvkm_gr *);
+	} ctxsw;
 	struct nvkm_sclass sclass[];
 };
 
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/base.c
index 4807021fd990..4a63581bdd5e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/base.c
@@ -21,13 +21,21 @@
  */
 #include "priv.h"
 
+#include <subdev/top.h>
 #include <engine/falcon.h>
 
 static int
 nvkm_nvdec_oneinit(struct nvkm_engine *engine)
 {
 	struct nvkm_nvdec *nvdec = nvkm_nvdec(engine);
-	return nvkm_falcon_v1_new(&nvdec->engine.subdev, "NVDEC", 0x84000,
+	struct nvkm_subdev *subdev = &nvdec->engine.subdev;
+
+	nvdec->addr = nvkm_top_addr(subdev->device, subdev->index);
+	if (!nvdec->addr)
+		return -EINVAL;
+
+	/*XXX: fix naming of this when adding support for multiple-NVDEC */
+	return nvkm_falcon_v1_new(subdev, "NVDEC", nvdec->addr,
 				  &nvdec->falcon);
 }
 
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/sec2/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/sec2/Kbuild
index 4b17254cfbd0..d9cdea7d9353 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/sec2/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/sec2/Kbuild
@@ -1,2 +1,3 @@
 nvkm-y += nvkm/engine/sec2/base.o
 nvkm-y += nvkm/engine/sec2/gp102.o
+nvkm-y += nvkm/engine/sec2/tu102.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/sec2/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/sec2/base.c
index f865d2a3e184..1b49e5b6717f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/sec2/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/sec2/base.c
@@ -22,6 +22,7 @@
 #include "priv.h"
 
 #include <core/msgqueue.h>
+#include <subdev/top.h>
 #include <engine/falcon.h>
 
 static void *
@@ -39,18 +40,18 @@ nvkm_sec2_intr(struct nvkm_engine *engine)
 	struct nvkm_sec2 *sec2 = nvkm_sec2(engine);
 	struct nvkm_subdev *subdev = &engine->subdev;
 	struct nvkm_device *device = subdev->device;
-	u32 disp = nvkm_rd32(device, 0x8701c);
-	u32 intr = nvkm_rd32(device, 0x87008) & disp & ~(disp >> 16);
+	u32 disp = nvkm_rd32(device, sec2->addr + 0x01c);
+	u32 intr = nvkm_rd32(device, sec2->addr + 0x008) & disp & ~(disp >> 16);
 
 	if (intr & 0x00000040) {
 		schedule_work(&sec2->work);
-		nvkm_wr32(device, 0x87004, 0x00000040);
+		nvkm_wr32(device, sec2->addr + 0x004, 0x00000040);
 		intr &= ~0x00000040;
 	}
 
 	if (intr) {
 		nvkm_error(subdev, "unhandled intr %08x\n", intr);
-		nvkm_wr32(device, 0x87004, intr);
+		nvkm_wr32(device, sec2->addr + 0x004, intr);
 
 	}
 }
@@ -74,8 +75,15 @@ static int
 nvkm_sec2_oneinit(struct nvkm_engine *engine)
 {
 	struct nvkm_sec2 *sec2 = nvkm_sec2(engine);
-	return nvkm_falcon_v1_new(&sec2->engine.subdev, "SEC2", 0x87000,
-				  &sec2->falcon);
+	struct nvkm_subdev *subdev = &sec2->engine.subdev;
+
+	if (!sec2->addr) {
+		sec2->addr = nvkm_top_addr(subdev->device, subdev->index);
+		if (WARN_ON(!sec2->addr))
+			return -EINVAL;
+	}
+
+	return nvkm_falcon_v1_new(subdev, "SEC2", sec2->addr, &sec2->falcon);
 }
 
 static int
@@ -95,13 +103,14 @@ nvkm_sec2 = {
 };
 
 int
-nvkm_sec2_new_(struct nvkm_device *device, int index,
+nvkm_sec2_new_(struct nvkm_device *device, int index, u32 addr,
 	       struct nvkm_sec2 **psec2)
 {
 	struct nvkm_sec2 *sec2;
 
 	if (!(sec2 = *psec2 = kzalloc(sizeof(*sec2), GFP_KERNEL)))
 		return -ENOMEM;
+	sec2->addr = addr;
 	INIT_WORK(&sec2->work, nvkm_sec2_recv);
 
 	return nvkm_engine_ctor(&nvkm_sec2, device, index, true, &sec2->engine);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/sec2/gp102.c b/drivers/gpu/drm/nouveau/nvkm/engine/sec2/gp102.c
index 9be1524c08f5..858cf27fa010 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/sec2/gp102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/sec2/gp102.c
@@ -26,5 +26,5 @@ int
 gp102_sec2_new(struct nvkm_device *device, int index,
 	       struct nvkm_sec2 **psec2)
 {
-	return nvkm_sec2_new_(device, index, psec2);
+	return nvkm_sec2_new_(device, index, 0, psec2);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/sec2/priv.h b/drivers/gpu/drm/nouveau/nvkm/engine/sec2/priv.h
index 2f97c806a79d..ab0165e2d1a3 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/sec2/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/sec2/priv.h
@@ -5,6 +5,5 @@
 
 #define nvkm_sec2(p) container_of((p), struct nvkm_sec2, engine)
 
-int nvkm_sec2_new_(struct nvkm_device *, int, struct nvkm_sec2 **);
-
+int nvkm_sec2_new_(struct nvkm_device *, int, u32 addr, struct nvkm_sec2 **);
 #endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/sec2/tu102.c b/drivers/gpu/drm/nouveau/nvkm/engine/sec2/tu102.c
new file mode 100644
index 000000000000..d655576164b1
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/sec2/tu102.c
@@ -0,0 +1,33 @@
+/*
+ * Copyright 2019 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include "priv.h"
+
+int
+tu102_sec2_new(struct nvkm_device *device, int index,
+	       struct nvkm_sec2 **psec2)
+{
+	/* TOP info wasn't updated on Turing to reflect the PRI
+	 * address change for some reason.  We override it here.
+	 */
+	return nvkm_sec2_new_(device, index, 0x840000, psec2);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/falcon/base.c b/drivers/gpu/drm/nouveau/nvkm/falcon/base.c
index 427340153640..366c87de6e72 100644
--- a/drivers/gpu/drm/nouveau/nvkm/falcon/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/falcon/base.c
@@ -204,6 +204,9 @@ nvkm_falcon_ctor(const struct nvkm_falcon_func *func,
 		debug_reg = 0x408;
 		falcon->has_emem = true;
 		break;
+	case NVKM_SUBDEV_GSP:
+		debug_reg = 0x0; /*XXX*/
+		break;
 	default:
 		nvkm_warn(subdev, "unsupported falcon %s!\n",
 			  nvkm_subdev_name[subdev->index]);
diff --git a/drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue.c b/drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue.c
index 771e16a16267..a8bee1e046aa 100644
--- a/drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue.c
+++ b/drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue.c
@@ -269,7 +269,7 @@ cmd_write(struct nvkm_msgqueue *priv, struct nvkm_msgqueue_hdr *cmd,
 		commit = false;
 	}
 
-	   cmd_queue_close(priv, queue, commit);
+	cmd_queue_close(priv, queue, commit);
 
 	return ret;
 }
@@ -347,7 +347,7 @@ nvkm_msgqueue_post(struct nvkm_msgqueue *priv, enum msgqueue_msg_priority prio,
 	ret = cmd_write(priv, cmd, queue);
 	if (ret) {
 		seq->state = SEQ_STATE_PENDING;
-		      msgqueue_seq_release(priv, seq);
+		msgqueue_seq_release(priv, seq);
 	}
 
 	return ret;
@@ -373,7 +373,7 @@ msgqueue_msg_handle(struct nvkm_msgqueue *priv, struct nvkm_msgqueue_hdr *hdr)
 	if (seq->completion)
 		complete(seq->completion);
 
-	   msgqueue_seq_release(priv, seq);
+	msgqueue_seq_release(priv, seq);
 
 	return 0;
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/Kbuild
index cfdffef1afb9..a339fe03d423 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/Kbuild
@@ -7,6 +7,7 @@ include $(src)/nvkm/subdev/fault/Kbuild
 include $(src)/nvkm/subdev/fb/Kbuild
 include $(src)/nvkm/subdev/fuse/Kbuild
 include $(src)/nvkm/subdev/gpio/Kbuild
+include $(src)/nvkm/subdev/gsp/Kbuild
 include $(src)/nvkm/subdev/i2c/Kbuild
 include $(src)/nvkm/subdev/ibus/Kbuild
 include $(src)/nvkm/subdev/iccsense/Kbuild
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/Kbuild
index ab0282dc0736..dc300600c019 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/Kbuild
@@ -5,4 +5,4 @@ nvkm-y += nvkm/subdev/bar/gf100.o
 nvkm-y += nvkm/subdev/bar/gk20a.o
 nvkm-y += nvkm/subdev/bar/gm107.o
 nvkm-y += nvkm/subdev/bar/gm20b.o
-nvkm-y += nvkm/subdev/bar/tu104.o
+nvkm-y += nvkm/subdev/bar/tu102.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/tu104.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/tu102.c
index ecaead156e9b..798f65ec3a86 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/tu104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/tu102.c
@@ -25,7 +25,7 @@
 #include <subdev/timer.h>
 
 static void
-tu104_bar_bar2_wait(struct nvkm_bar *bar)
+tu102_bar_bar2_wait(struct nvkm_bar *bar)
 {
 	struct nvkm_device *device = bar->subdev.device;
 	nvkm_msec(device, 2000,
@@ -35,13 +35,13 @@ tu104_bar_bar2_wait(struct nvkm_bar *bar)
 }
 
 static void
-tu104_bar_bar2_fini(struct nvkm_bar *bar)
+tu102_bar_bar2_fini(struct nvkm_bar *bar)
 {
 	nvkm_mask(bar->subdev.device, 0xb80f48, 0x80000000, 0x00000000);
 }
 
 static void
-tu104_bar_bar2_init(struct nvkm_bar *base)
+tu102_bar_bar2_init(struct nvkm_bar *base)
 {
 	struct nvkm_device *device = base->subdev.device;
 	struct gf100_bar *bar = gf100_bar(base);
@@ -52,7 +52,7 @@ tu104_bar_bar2_init(struct nvkm_bar *base)
 }
 
 static void
-tu104_bar_bar1_wait(struct nvkm_bar *bar)
+tu102_bar_bar1_wait(struct nvkm_bar *bar)
 {
 	struct nvkm_device *device = bar->subdev.device;
 	nvkm_msec(device, 2000,
@@ -62,13 +62,13 @@ tu104_bar_bar1_wait(struct nvkm_bar *bar)
 }
 
 static void
-tu104_bar_bar1_fini(struct nvkm_bar *bar)
+tu102_bar_bar1_fini(struct nvkm_bar *bar)
 {
 	nvkm_mask(bar->subdev.device, 0xb80f40, 0x80000000, 0x00000000);
 }
 
 static void
-tu104_bar_bar1_init(struct nvkm_bar *base)
+tu102_bar_bar1_init(struct nvkm_bar *base)
 {
 	struct nvkm_device *device = base->subdev.device;
 	struct gf100_bar *bar = gf100_bar(base);
@@ -77,22 +77,22 @@ tu104_bar_bar1_init(struct nvkm_bar *base)
 }
 
 static const struct nvkm_bar_func
-tu104_bar = {
+tu102_bar = {
 	.dtor = gf100_bar_dtor,
 	.oneinit = gf100_bar_oneinit,
-	.bar1.init = tu104_bar_bar1_init,
-	.bar1.fini = tu104_bar_bar1_fini,
-	.bar1.wait = tu104_bar_bar1_wait,
+	.bar1.init = tu102_bar_bar1_init,
+	.bar1.fini = tu102_bar_bar1_fini,
+	.bar1.wait = tu102_bar_bar1_wait,
 	.bar1.vmm = gf100_bar_bar1_vmm,
-	.bar2.init = tu104_bar_bar2_init,
-	.bar2.fini = tu104_bar_bar2_fini,
-	.bar2.wait = tu104_bar_bar2_wait,
+	.bar2.init = tu102_bar_bar2_init,
+	.bar2.fini = tu102_bar_bar2_fini,
+	.bar2.wait = tu102_bar_bar2_wait,
 	.bar2.vmm = gf100_bar_bar2_vmm,
 	.flush = g84_bar_flush,
 };
 
 int
-tu104_bar_new(struct nvkm_device *device, int index, struct nvkm_bar **pbar)
+tu102_bar_new(struct nvkm_device *device, int index, struct nvkm_bar **pbar)
 {
-	return gf100_bar_new_(&tu104_bar, device, index, pbar);
+	return gf100_bar_new_(&tu102_bar, device, index, pbar);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/dp.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/dp.c
index 3133b28f849c..b099d1209be8 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/dp.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/dp.c
@@ -212,7 +212,7 @@ nvbios_dpcfg_match(struct nvkm_bios *bios, u16 outp, u8 pc, u8 vs, u8 pe,
 	u16 data;
 
 	if (*ver >= 0x30) {
-		const u8 vsoff[] = { 0, 4, 7, 9 };
+		static const u8 vsoff[] = { 0, 4, 7, 9 };
 		idx = (pc * 10) + vsoff[vs] + pe;
 		if (*ver >= 0x40 && *ver <= 0x41 && *hdr >= 0x12)
 			idx += nvbios_rd08(bios, outp + 0x11) * 40;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/init.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/init.c
index 9cc10e438b3d..ec0e9f7224b5 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/init.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/init.c
@@ -806,12 +806,12 @@ init_generic_condition(struct nvbios_init *init)
 	init->offset += 3;
 
 	switch (cond) {
-	case 0:
+	case 0: /* CONDITION_ID_INT_DP. */
 		if (init_conn(init) != DCB_CONNECTOR_eDP)
 			init_exec_set(init, false);
 		break;
-	case 1:
-	case 2:
+	case 1: /* CONDITION_ID_USE_SPPLL0. */
+	case 2: /* CONDITION_ID_USE_SPPLL1. */
 		if ( init->outp &&
 		    (data = nvbios_dpout_match(bios, DCB_OUTPUT_DP,
 					       (init->outp->or << 0) |
@@ -826,10 +826,13 @@ init_generic_condition(struct nvbios_init *init)
 		if (init_exec(init))
 			warn("script needs dp output table data\n");
 		break;
-	case 5:
+	case 5: /* CONDITION_ID_ASSR_SUPPORT. */
 		if (!(init_rdauxr(init, 0x0d) & 1))
 			init_exec_set(init, false);
 		break;
+	case 7: /* CONDITION_ID_NO_PANEL_SEQ_DELAYS. */
+		init_exec_set(init, false);
+		break;
 	default:
 		warn("INIT_GENERIC_CONDITON: unknown 0x%02x\n", cond);
 		init->offset += size;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/Kbuild
index 3ef505a5c01b..f3c388932b6f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/Kbuild
@@ -13,4 +13,4 @@ nvkm-y += nvkm/subdev/devinit/gf100.o
 nvkm-y += nvkm/subdev/devinit/gm107.o
 nvkm-y += nvkm/subdev/devinit/gm200.o
 nvkm-y += nvkm/subdev/devinit/gv100.o
-nvkm-y += nvkm/subdev/devinit/tu104.o
+nvkm-y += nvkm/subdev/devinit/tu102.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/tu104.c b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/tu102.c
index aae87b3fc429..397670e72fff 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/tu104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/tu102.c
@@ -26,7 +26,7 @@
 #include <subdev/clk/pll.h>
 
 static int
-tu104_devinit_pll_set(struct nvkm_devinit *init, u32 type, u32 freq)
+tu102_devinit_pll_set(struct nvkm_devinit *init, u32 type, u32 freq)
 {
 	struct nvkm_subdev *subdev = &init->subdev;
 	struct nvkm_device *device = subdev->device;
@@ -66,7 +66,7 @@ tu104_devinit_pll_set(struct nvkm_devinit *init, u32 type, u32 freq)
 }
 
 static int
-tu104_devinit_post(struct nvkm_devinit *base, bool post)
+tu102_devinit_post(struct nvkm_devinit *base, bool post)
 {
 	struct nv50_devinit *init = nv50_devinit(base);
 	gm200_devinit_preos(init, post);
@@ -74,16 +74,16 @@ tu104_devinit_post(struct nvkm_devinit *base, bool post)
 }
 
 static const struct nvkm_devinit_func
-tu104_devinit = {
+tu102_devinit = {
 	.init = nv50_devinit_init,
-	.post = tu104_devinit_post,
-	.pll_set = tu104_devinit_pll_set,
+	.post = tu102_devinit_post,
+	.pll_set = tu102_devinit_pll_set,
 	.disable = gm107_devinit_disable,
 };
 
 int
-tu104_devinit_new(struct nvkm_device *device, int index,
+tu102_devinit_new(struct nvkm_device *device, int index,
 		struct nvkm_devinit **pinit)
 {
-	return nv50_devinit_new_(&tu104_devinit, device, index, pinit);
+	return nv50_devinit_new_(&tu102_devinit, device, index, pinit);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fault/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/fault/Kbuild
index 794eb1745b2f..42586267fc08 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fault/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fault/Kbuild
@@ -1,4 +1,5 @@
 nvkm-y += nvkm/subdev/fault/base.o
+nvkm-y += nvkm/subdev/fault/user.o
 nvkm-y += nvkm/subdev/fault/gp100.o
 nvkm-y += nvkm/subdev/fault/gv100.o
-nvkm-y += nvkm/subdev/fault/tu104.o
+nvkm-y += nvkm/subdev/fault/tu102.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fault/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fault/base.c
index 4ba1e21e8fda..ca251560d3e0 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fault/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fault/base.c
@@ -176,5 +176,7 @@ nvkm_fault_new_(const struct nvkm_fault_func *func, struct nvkm_device *device,
 		return -ENOMEM;
 	nvkm_subdev_ctor(&nvkm_fault, device, index, &fault->subdev);
 	fault->func = func;
+	fault->user.ctor = nvkm_ufault_new;
+	fault->user.base = func->user.base;
 	return 0;
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fault/gp100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fault/gp100.c
index 8fb96fe614f9..4f3c4e091117 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fault/gp100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fault/gp100.c
@@ -23,6 +23,8 @@
 
 #include <subdev/mc.h>
 
+#include <nvif/class.h>
+
 static void
 gp100_fault_buffer_intr(struct nvkm_fault_buffer *buffer, bool enable)
 {
@@ -69,6 +71,7 @@ gp100_fault = {
 	.buffer.init = gp100_fault_buffer_init,
 	.buffer.fini = gp100_fault_buffer_fini,
 	.buffer.intr = gp100_fault_buffer_intr,
+	.user = { { 0, 0, MAXWELL_FAULT_BUFFER_A }, 0 },
 };
 
 int
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fault/gv100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fault/gv100.c
index 6fc54e17c935..6747f09c2dc3 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fault/gv100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fault/gv100.c
@@ -25,6 +25,8 @@
 #include <subdev/mmu.h>
 #include <engine/fifo.h>
 
+#include <nvif/class.h>
+
 static void
 gv100_fault_buffer_process(struct nvkm_fault_buffer *buffer)
 {
@@ -166,6 +168,13 @@ gv100_fault_intr(struct nvkm_fault *fault)
 		}
 	}
 
+	if (stat & 0x08000000) {
+		if (fault->buffer[1]) {
+			nvkm_event_send(&fault->event, 1, 1, NULL, 0);
+			stat &= ~0x08000000;
+		}
+	}
+
 	if (stat) {
 		nvkm_debug(subdev, "intr %08x\n", stat);
 	}
@@ -208,6 +217,13 @@ gv100_fault = {
 	.buffer.init = gv100_fault_buffer_init,
 	.buffer.fini = gv100_fault_buffer_fini,
 	.buffer.intr = gv100_fault_buffer_intr,
+	/*TODO: Figure out how to expose non-replayable fault buffer, which,
+	 *      for some reason, is where recoverable CE faults appear...
+	 *
+	 * 	It's a bit tricky, as both NVKM and SVM will need access to
+	 * 	the non-replayable fault buffer.
+	 */
+	.user = { { 0, 0, VOLTA_FAULT_BUFFER_A }, 1 },
 };
 
 int
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fault/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/fault/priv.h
index 8ca8b2876dad..975e66ac6344 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fault/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fault/priv.h
@@ -34,7 +34,14 @@ struct nvkm_fault_func {
 		void (*fini)(struct nvkm_fault_buffer *);
 		void (*intr)(struct nvkm_fault_buffer *, bool enable);
 	} buffer;
+	struct {
+		struct nvkm_sclass base;
+		int rp;
+	} user;
 };
 
 int gv100_fault_oneinit(struct nvkm_fault *);
+
+int nvkm_ufault_new(struct nvkm_device *, const struct nvkm_oclass *,
+		    void *, u32, struct nvkm_object **);
 #endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fault/tu104.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fault/tu102.c
index 9c8a3adf99d7..fa1dfe5692b0 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fault/tu104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fault/tu102.c
@@ -28,7 +28,7 @@
 #include <nvif/class.h>
 
 static void
-tu104_fault_buffer_intr(struct nvkm_fault_buffer *buffer, bool enable)
+tu102_fault_buffer_intr(struct nvkm_fault_buffer *buffer, bool enable)
 {
 	/*XXX: Earlier versions of RM touched the old regs on Turing,
 	 *     which don't appear to actually work anymore, but newer
@@ -37,7 +37,7 @@ tu104_fault_buffer_intr(struct nvkm_fault_buffer *buffer, bool enable)
 }
 
 static void
-tu104_fault_buffer_fini(struct nvkm_fault_buffer *buffer)
+tu102_fault_buffer_fini(struct nvkm_fault_buffer *buffer)
 {
 	struct nvkm_device *device = buffer->fault->subdev.device;
 	const u32 foff = buffer->id * 0x20;
@@ -45,7 +45,7 @@ tu104_fault_buffer_fini(struct nvkm_fault_buffer *buffer)
 }
 
 static void
-tu104_fault_buffer_init(struct nvkm_fault_buffer *buffer)
+tu102_fault_buffer_init(struct nvkm_fault_buffer *buffer)
 {
 	struct nvkm_device *device = buffer->fault->subdev.device;
 	const u32 foff = buffer->id * 0x20;
@@ -57,7 +57,7 @@ tu104_fault_buffer_init(struct nvkm_fault_buffer *buffer)
 }
 
 static void
-tu104_fault_buffer_info(struct nvkm_fault_buffer *buffer)
+tu102_fault_buffer_info(struct nvkm_fault_buffer *buffer)
 {
 	struct nvkm_device *device = buffer->fault->subdev.device;
 	const u32 foff = buffer->id * 0x20;
@@ -70,7 +70,7 @@ tu104_fault_buffer_info(struct nvkm_fault_buffer *buffer)
 }
 
 static void
-tu104_fault_intr_fault(struct nvkm_fault *fault)
+tu102_fault_intr_fault(struct nvkm_fault *fault)
 {
 	struct nvkm_subdev *subdev = &fault->subdev;
 	struct nvkm_device *device = subdev->device;
@@ -96,14 +96,14 @@ tu104_fault_intr_fault(struct nvkm_fault *fault)
 }
 
 static void
-tu104_fault_intr(struct nvkm_fault *fault)
+tu102_fault_intr(struct nvkm_fault *fault)
 {
 	struct nvkm_subdev *subdev = &fault->subdev;
 	struct nvkm_device *device = subdev->device;
 	u32 stat = nvkm_rd32(device, 0xb83094);
 
 	if (stat & 0x80000000) {
-		tu104_fault_intr_fault(fault);
+		tu102_fault_intr_fault(fault);
 		nvkm_wr32(device, 0xb83094, 0x80000000);
 		stat &= ~0x80000000;
 	}
@@ -129,7 +129,7 @@ tu104_fault_intr(struct nvkm_fault *fault)
 }
 
 static void
-tu104_fault_fini(struct nvkm_fault *fault)
+tu102_fault_fini(struct nvkm_fault *fault)
 {
 	nvkm_notify_put(&fault->nrpfb);
 	if (fault->buffer[0])
@@ -138,7 +138,7 @@ tu104_fault_fini(struct nvkm_fault *fault)
 }
 
 static void
-tu104_fault_init(struct nvkm_fault *fault)
+tu102_fault_init(struct nvkm_fault *fault)
 {
 	/*XXX: enable priv faults */
 	fault->func->buffer.init(fault->buffer[0]);
@@ -146,22 +146,23 @@ tu104_fault_init(struct nvkm_fault *fault)
 }
 
 static const struct nvkm_fault_func
-tu104_fault = {
+tu102_fault = {
 	.oneinit = gv100_fault_oneinit,
-	.init = tu104_fault_init,
-	.fini = tu104_fault_fini,
-	.intr = tu104_fault_intr,
+	.init = tu102_fault_init,
+	.fini = tu102_fault_fini,
+	.intr = tu102_fault_intr,
 	.buffer.nr = 2,
 	.buffer.entry_size = 32,
-	.buffer.info = tu104_fault_buffer_info,
-	.buffer.init = tu104_fault_buffer_init,
-	.buffer.fini = tu104_fault_buffer_fini,
-	.buffer.intr = tu104_fault_buffer_intr,
+	.buffer.info = tu102_fault_buffer_info,
+	.buffer.init = tu102_fault_buffer_init,
+	.buffer.fini = tu102_fault_buffer_fini,
+	.buffer.intr = tu102_fault_buffer_intr,
+	.user = { { 0, 0, VOLTA_FAULT_BUFFER_A }, 1 },
 };
 
 int
-tu104_fault_new(struct nvkm_device *device, int index,
+tu102_fault_new(struct nvkm_device *device, int index,
 		struct nvkm_fault **pfault)
 {
-	return nvkm_fault_new_(&tu104_fault, device, index, pfault);
+	return nvkm_fault_new_(&tu102_fault, device, index, pfault);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fault/user.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fault/user.c
new file mode 100644
index 000000000000..ac835c9582fd
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fault/user.c
@@ -0,0 +1,106 @@
+/*
+ * Copyright 2018 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "priv.h"
+
+#include <core/memory.h>
+#include <subdev/mmu.h>
+
+#include <nvif/clb069.h>
+#include <nvif/unpack.h>
+
+static int
+nvkm_ufault_map(struct nvkm_object *object, void *argv, u32 argc,
+		enum nvkm_object_map *type, u64 *addr, u64 *size)
+{
+	struct nvkm_fault_buffer *buffer = nvkm_fault_buffer(object);
+	struct nvkm_device *device = buffer->fault->subdev.device;
+	*type = NVKM_OBJECT_MAP_IO;
+	*addr = device->func->resource_addr(device, 3) + buffer->addr;
+	*size = nvkm_memory_size(buffer->mem);
+	return 0;
+}
+
+static int
+nvkm_ufault_ntfy(struct nvkm_object *object, u32 type,
+		 struct nvkm_event **pevent)
+{
+	struct nvkm_fault_buffer *buffer = nvkm_fault_buffer(object);
+	if (type == NVB069_V0_NTFY_FAULT) {
+		*pevent = &buffer->fault->event;
+		return 0;
+	}
+	return -EINVAL;
+}
+
+static int
+nvkm_ufault_fini(struct nvkm_object *object, bool suspend)
+{
+	struct nvkm_fault_buffer *buffer = nvkm_fault_buffer(object);
+	buffer->fault->func->buffer.fini(buffer);
+	return 0;
+}
+
+static int
+nvkm_ufault_init(struct nvkm_object *object)
+{
+	struct nvkm_fault_buffer *buffer = nvkm_fault_buffer(object);
+	buffer->fault->func->buffer.init(buffer);
+	return 0;
+}
+
+static void *
+nvkm_ufault_dtor(struct nvkm_object *object)
+{
+	return NULL;
+}
+
+static const struct nvkm_object_func
+nvkm_ufault = {
+	.dtor = nvkm_ufault_dtor,
+	.init = nvkm_ufault_init,
+	.fini = nvkm_ufault_fini,
+	.ntfy = nvkm_ufault_ntfy,
+	.map = nvkm_ufault_map,
+};
+
+int
+nvkm_ufault_new(struct nvkm_device *device, const struct nvkm_oclass *oclass,
+		void *argv, u32 argc, struct nvkm_object **pobject)
+{
+	union {
+		struct nvif_clb069_v0 v0;
+	} *args = argv;
+	struct nvkm_fault *fault = device->fault;
+	struct nvkm_fault_buffer *buffer = fault->buffer[fault->func->user.rp];
+	int ret = -ENOSYS;
+
+	if (!(ret = nvif_unpack(ret, &argv, &argc, args->v0, 0, 0, false))) {
+		args->v0.entries = buffer->entries;
+		args->v0.get = buffer->get;
+		args->v0.put = buffer->put;
+	} else
+		return ret;
+
+	nvkm_object_ctor(&nvkm_ufault, oclass, &buffer->object);
+	*pobject = &buffer->object;
+	return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gddr3.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gddr3.c
index 60ece0a8a2e1..1d2d6bae73cd 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gddr3.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gddr3.c
@@ -87,7 +87,7 @@ nvkm_gddr3_calc(struct nvkm_ram *ram)
 		WR  = (ram->next->bios.timing[2] & 0x007f0000) >> 16;
 		/* XXX: Get these values from the VBIOS instead */
 		DLL = !(ram->mr[1] & 0x1);
-		RON = !(ram->mr[1] & 0x300) >> 8;
+		RON = !((ram->mr[1] & 0x300) >> 8);
 		break;
 	default:
 		return -ENOSYS;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/Kbuild
new file mode 100644
index 000000000000..26fc6feb807e
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/Kbuild
@@ -0,0 +1 @@
+nvkm-y += nvkm/subdev/gsp/gv100.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/gv100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/gv100.c
new file mode 100644
index 000000000000..dccfaf1162e2
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/gv100.c
@@ -0,0 +1,62 @@
+/*
+ * Copyright 2019 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include <subdev/gsp.h>
+#include <subdev/top.h>
+#include <engine/falcon.h>
+
+static int
+gv100_gsp_oneinit(struct nvkm_subdev *subdev)
+{
+	struct nvkm_gsp *gsp = nvkm_gsp(subdev);
+
+	gsp->addr = nvkm_top_addr(subdev->device, subdev->index);
+	if (!gsp->addr)
+		return -EINVAL;
+
+	return nvkm_falcon_v1_new(subdev, "GSP", gsp->addr, &gsp->falcon);
+}
+
+static void *
+gv100_gsp_dtor(struct nvkm_subdev *subdev)
+{
+	struct nvkm_gsp *gsp = nvkm_gsp(subdev);
+	nvkm_falcon_del(&gsp->falcon);
+	return gsp;
+}
+
+static const struct nvkm_subdev_func
+gv100_gsp = {
+	.dtor = gv100_gsp_dtor,
+	.oneinit = gv100_gsp_oneinit,
+};
+
+int
+gv100_gsp_new(struct nvkm_device *device, int index, struct nvkm_gsp **pgsp)
+{
+	struct nvkm_gsp *gsp;
+
+	if (!(gsp = *pgsp = kzalloc(sizeof(*gsp), GFP_KERNEL)))
+		return -ENOMEM;
+
+	nvkm_subdev_ctor(&gv100_gsp, device, index, &gsp->subdev);
+	return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/Kbuild
index f3b06329c338..c64e399326b3 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/Kbuild
@@ -12,4 +12,4 @@ nvkm-y += nvkm/subdev/mc/gk104.o
 nvkm-y += nvkm/subdev/mc/gk20a.o
 nvkm-y += nvkm/subdev/mc/gp100.o
 nvkm-y += nvkm/subdev/mc/gp10b.o
-nvkm-y += nvkm/subdev/mc/tu104.o
+nvkm-y += nvkm/subdev/mc/tu102.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/tu104.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/tu102.c
index b7165bd18999..d098c44a4fcb 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/tu104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/tu102.c
@@ -22,7 +22,7 @@
 #include "priv.h"
 
 static void
-tu104_mc_intr_hack(struct nvkm_mc *mc, bool *handled)
+tu102_mc_intr_hack(struct nvkm_mc *mc, bool *handled)
 {
 	struct nvkm_device *device = mc->subdev.device;
 	u32 stat = nvkm_rd32(device, 0xb81010);
@@ -37,19 +37,19 @@ tu104_mc_intr_hack(struct nvkm_mc *mc, bool *handled)
 }
 
 static const struct nvkm_mc_func
-tu104_mc = {
+tu102_mc = {
 	.init = nv50_mc_init,
 	.intr = gp100_mc_intr,
 	.intr_unarm = gp100_mc_intr_unarm,
 	.intr_rearm = gp100_mc_intr_rearm,
 	.intr_mask = gp100_mc_intr_mask,
 	.intr_stat = gf100_mc_intr_stat,
-	.intr_hack = tu104_mc_intr_hack,
+	.intr_hack = tu102_mc_intr_hack,
 	.reset = gk104_mc_reset,
 };
 
 int
-tu104_mc_new(struct nvkm_device *device, int index, struct nvkm_mc **pmc)
+tu102_mc_new(struct nvkm_device *device, int index, struct nvkm_mc **pmc)
 {
-	return gp100_mc_new_(&tu104_mc, device, index, pmc);
+	return gp100_mc_new_(&tu102_mc, device, index, pmc);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/Kbuild
index 8966180b36cc..db9c56028f21 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/Kbuild
@@ -13,7 +13,7 @@ nvkm-y += nvkm/subdev/mmu/gm20b.o
 nvkm-y += nvkm/subdev/mmu/gp100.o
 nvkm-y += nvkm/subdev/mmu/gp10b.o
 nvkm-y += nvkm/subdev/mmu/gv100.o
-nvkm-y += nvkm/subdev/mmu/tu104.o
+nvkm-y += nvkm/subdev/mmu/tu102.o
 
 nvkm-y += nvkm/subdev/mmu/mem.o
 nvkm-y += nvkm/subdev/mmu/memnv04.o
@@ -34,7 +34,7 @@ nvkm-y += nvkm/subdev/mmu/vmmgm20b.o
 nvkm-y += nvkm/subdev/mmu/vmmgp100.o
 nvkm-y += nvkm/subdev/mmu/vmmgp10b.o
 nvkm-y += nvkm/subdev/mmu/vmmgv100.o
-nvkm-y += nvkm/subdev/mmu/vmmtu104.o
+nvkm-y += nvkm/subdev/mmu/vmmtu102.o
 
 nvkm-y += nvkm/subdev/mmu/umem.o
 nvkm-y += nvkm/subdev/mmu/ummu.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gp100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gp100.c
index 651b8805c67c..65cb9d28e60e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gp100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gp100.c
@@ -31,7 +31,7 @@ gp100_mmu = {
 	.dma_bits = 47,
 	.mmu = {{ -1, -1, NVIF_CLASS_MMU_GF100}},
 	.mem = {{ -1,  0, NVIF_CLASS_MEM_GF100}, gf100_mem_new, gf100_mem_map },
-	.vmm = {{ -1, -1, NVIF_CLASS_VMM_GP100}, gp100_vmm_new },
+	.vmm = {{ -1,  0, NVIF_CLASS_VMM_GP100}, gp100_vmm_new },
 	.kind = gm200_mmu_kind,
 	.kind_sys = true,
 };
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gp10b.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gp10b.c
index 3bd3db31e0bb..0a50be9a785a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gp10b.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gp10b.c
@@ -31,7 +31,7 @@ gp10b_mmu = {
 	.dma_bits = 47,
 	.mmu = {{ -1, -1, NVIF_CLASS_MMU_GF100}},
 	.mem = {{ -1, -1, NVIF_CLASS_MEM_GF100}, .umap = gf100_mem_map },
-	.vmm = {{ -1, -1, NVIF_CLASS_VMM_GP100}, gp10b_vmm_new },
+	.vmm = {{ -1,  0, NVIF_CLASS_VMM_GP100}, gp10b_vmm_new },
 	.kind = gm200_mmu_kind,
 	.kind_sys = true,
 };
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gv100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gv100.c
index f666cb57f69e..e0997eedd6d9 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gv100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gv100.c
@@ -31,7 +31,7 @@ gv100_mmu = {
 	.dma_bits = 47,
 	.mmu = {{ -1, -1, NVIF_CLASS_MMU_GF100}},
 	.mem = {{ -1,  0, NVIF_CLASS_MEM_GF100}, gf100_mem_new, gf100_mem_map },
-	.vmm = {{ -1, -1, NVIF_CLASS_VMM_GP100}, gv100_vmm_new },
+	.vmm = {{ -1,  0, NVIF_CLASS_VMM_GP100}, gv100_vmm_new },
 	.kind = gm200_mmu_kind,
 	.kind_sys = true,
 };
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/priv.h
index 948a48c21be4..2ad1102a4e31 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/priv.h
@@ -28,7 +28,7 @@ struct nvkm_mmu_func {
 
 	struct {
 		struct nvkm_sclass user;
-		int (*ctor)(struct nvkm_mmu *, u64 addr, u64 size,
+		int (*ctor)(struct nvkm_mmu *, bool managed, u64 addr, u64 size,
 			    void *argv, u32 argc, struct lock_class_key *,
 			    const char *name, struct nvkm_vmm **);
 		bool global;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/tu104.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/tu102.c
index 8e6f4096170d..c0db0ce10cba 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/tu104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/tu102.c
@@ -27,17 +27,17 @@
 #include <nvif/class.h>
 
 static const struct nvkm_mmu_func
-tu104_mmu = {
+tu102_mmu = {
 	.dma_bits = 47,
 	.mmu = {{ -1, -1, NVIF_CLASS_MMU_GF100}},
 	.mem = {{ -1,  0, NVIF_CLASS_MEM_GF100}, gf100_mem_new, gf100_mem_map },
-	.vmm = {{ -1,  0, NVIF_CLASS_VMM_GP100}, tu104_vmm_new },
+	.vmm = {{ -1,  0, NVIF_CLASS_VMM_GP100}, tu102_vmm_new },
 	.kind = gm200_mmu_kind,
 	.kind_sys = true,
 };
 
 int
-tu104_mmu_new(struct nvkm_device *device, int index, struct nvkm_mmu **pmmu)
+tu102_mmu_new(struct nvkm_device *device, int index, struct nvkm_mmu **pmmu)
 {
-	return nvkm_mmu_new_(&tu104_mmu, device, index, pmmu);
+	return nvkm_mmu_new_(&tu102_mmu, device, index, pmmu);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/uvmm.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/uvmm.c
index 6889076097ec..c43b8248c682 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/uvmm.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/uvmm.c
@@ -43,6 +43,69 @@ nvkm_uvmm_search(struct nvkm_client *client, u64 handle)
 }
 
 static int
+nvkm_uvmm_mthd_pfnclr(struct nvkm_uvmm *uvmm, void *argv, u32 argc)
+{
+	struct nvkm_client *client = uvmm->object.client;
+	union {
+		struct nvif_vmm_pfnclr_v0 v0;
+	} *args = argv;
+	struct nvkm_vmm *vmm = uvmm->vmm;
+	int ret = -ENOSYS;
+	u64 addr, size;
+
+	if (!(ret = nvif_unpack(ret, &argv, &argc, args->v0, 0, 0, false))) {
+		addr = args->v0.addr;
+		size = args->v0.size;
+	} else
+		return ret;
+
+	if (!client->super)
+		return -ENOENT;
+
+	if (size) {
+		mutex_lock(&vmm->mutex);
+		ret = nvkm_vmm_pfn_unmap(vmm, addr, size);
+		mutex_unlock(&vmm->mutex);
+	}
+
+	return ret;
+}
+
+static int
+nvkm_uvmm_mthd_pfnmap(struct nvkm_uvmm *uvmm, void *argv, u32 argc)
+{
+	struct nvkm_client *client = uvmm->object.client;
+	union {
+		struct nvif_vmm_pfnmap_v0 v0;
+	} *args = argv;
+	struct nvkm_vmm *vmm = uvmm->vmm;
+	int ret = -ENOSYS;
+	u64 addr, size, *phys;
+	u8  page;
+
+	if (!(ret = nvif_unpack(ret, &argv, &argc, args->v0, 0, 0, true))) {
+		page = args->v0.page;
+		addr = args->v0.addr;
+		size = args->v0.size;
+		phys = args->v0.phys;
+		if (argc != (size >> page) * sizeof(args->v0.phys[0]))
+			return -EINVAL;
+	} else
+		return ret;
+
+	if (!client->super)
+		return -ENOENT;
+
+	if (size) {
+		mutex_lock(&vmm->mutex);
+		ret = nvkm_vmm_pfn_map(vmm, page, addr, size, phys);
+		mutex_unlock(&vmm->mutex);
+	}
+
+	return ret;
+}
+
+static int
 nvkm_uvmm_mthd_unmap(struct nvkm_uvmm *uvmm, void *argv, u32 argc)
 {
 	struct nvkm_client *client = uvmm->object.client;
@@ -78,7 +141,7 @@ nvkm_uvmm_mthd_unmap(struct nvkm_uvmm *uvmm, void *argv, u32 argc)
 		goto done;
 	}
 
-	nvkm_vmm_unmap_locked(vmm, vma);
+	nvkm_vmm_unmap_locked(vmm, vma, false);
 	ret = 0;
 done:
 	mutex_unlock(&vmm->mutex);
@@ -124,6 +187,11 @@ nvkm_uvmm_mthd_map(struct nvkm_uvmm *uvmm, void *argv, u32 argc)
 		goto fail;
 	}
 
+	if (ret = -EINVAL, vma->mapped && !vma->memory) {
+		VMM_DEBUG(vmm, "pfnmap %016llx", addr);
+		goto fail;
+	}
+
 	if (ret = -EINVAL, vma->addr != addr || vma->size != size) {
 		if (addr + size > vma->addr + vma->size || vma->memory ||
 		    (vma->refd == NVKM_VMA_PAGE_NONE && !vma->mapref)) {
@@ -271,6 +339,15 @@ nvkm_uvmm_mthd(struct nvkm_object *object, u32 mthd, void *argv, u32 argc)
 	case NVIF_VMM_V0_PUT   : return nvkm_uvmm_mthd_put   (uvmm, argv, argc);
 	case NVIF_VMM_V0_MAP   : return nvkm_uvmm_mthd_map   (uvmm, argv, argc);
 	case NVIF_VMM_V0_UNMAP : return nvkm_uvmm_mthd_unmap (uvmm, argv, argc);
+	case NVIF_VMM_V0_PFNMAP: return nvkm_uvmm_mthd_pfnmap(uvmm, argv, argc);
+	case NVIF_VMM_V0_PFNCLR: return nvkm_uvmm_mthd_pfnclr(uvmm, argv, argc);
+	case NVIF_VMM_V0_MTHD(0x00) ... NVIF_VMM_V0_MTHD(0x7f):
+		if (uvmm->vmm->func->mthd) {
+			return uvmm->vmm->func->mthd(uvmm->vmm,
+						     uvmm->object.client,
+						     mthd, argv, argc);
+		}
+		break;
 	default:
 		break;
 	}
@@ -304,8 +381,10 @@ nvkm_uvmm_new(const struct nvkm_oclass *oclass, void *argv, u32 argc,
 	struct nvkm_uvmm *uvmm;
 	int ret = -ENOSYS;
 	u64 addr, size;
+	bool managed;
 
 	if (!(ret = nvif_unpack(ret, &argv, &argc, args->v0, 0, 0, more))) {
+		managed = args->v0.managed != 0;
 		addr = args->v0.addr;
 		size = args->v0.size;
 	} else
@@ -317,7 +396,7 @@ nvkm_uvmm_new(const struct nvkm_oclass *oclass, void *argv, u32 argc,
 	*pobject = &uvmm->object;
 
 	if (!mmu->vmm) {
-		ret = mmu->func->vmm.ctor(mmu, addr, size, argv, argc,
+		ret = mmu->func->vmm.ctor(mmu, managed, addr, size, argv, argc,
 					  NULL, "user", &uvmm->vmm);
 		if (ret)
 			return ret;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c
index 6b87fff014b3..fa93f964e6a4 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c
@@ -255,11 +255,23 @@ nvkm_vmm_unref_sptes(struct nvkm_vmm_iter *it, struct nvkm_vmm_pt *pgt,
 }
 
 static bool
-nvkm_vmm_unref_ptes(struct nvkm_vmm_iter *it, u32 ptei, u32 ptes)
+nvkm_vmm_unref_ptes(struct nvkm_vmm_iter *it, bool pfn, u32 ptei, u32 ptes)
 {
 	const struct nvkm_vmm_desc *desc = it->desc;
 	const int type = desc->type == SPT;
 	struct nvkm_vmm_pt *pgt = it->pt[0];
+	bool dma;
+
+	if (pfn) {
+		/* Need to clear PTE valid bits before we dma_unmap_page(). */
+		dma = desc->func->pfn_clear(it->vmm, pgt->pt[type], ptei, ptes);
+		if (dma) {
+			/* GPU may have cached the PT, flush before unmap. */
+			nvkm_vmm_flush_mark(it);
+			nvkm_vmm_flush(it);
+			desc->func->pfn_unmap(it->vmm, pgt->pt[type], ptei, ptes);
+		}
+	}
 
 	/* Drop PTE references. */
 	pgt->refs[type] -= ptes;
@@ -349,7 +361,7 @@ nvkm_vmm_ref_sptes(struct nvkm_vmm_iter *it, struct nvkm_vmm_pt *pgt,
 }
 
 static bool
-nvkm_vmm_ref_ptes(struct nvkm_vmm_iter *it, u32 ptei, u32 ptes)
+nvkm_vmm_ref_ptes(struct nvkm_vmm_iter *it, bool pfn, u32 ptei, u32 ptes)
 {
 	const struct nvkm_vmm_desc *desc = it->desc;
 	const int type = desc->type == SPT;
@@ -379,7 +391,7 @@ nvkm_vmm_sparse_ptes(const struct nvkm_vmm_desc *desc,
 }
 
 static bool
-nvkm_vmm_sparse_unref_ptes(struct nvkm_vmm_iter *it, u32 ptei, u32 ptes)
+nvkm_vmm_sparse_unref_ptes(struct nvkm_vmm_iter *it, bool pfn, u32 ptei, u32 ptes)
 {
 	struct nvkm_vmm_pt *pt = it->pt[0];
 	if (it->desc->type == PGD)
@@ -387,14 +399,14 @@ nvkm_vmm_sparse_unref_ptes(struct nvkm_vmm_iter *it, u32 ptei, u32 ptes)
 	else
 	if (it->desc->type == LPT)
 		memset(&pt->pte[ptei], 0x00, sizeof(pt->pte[0]) * ptes);
-	return nvkm_vmm_unref_ptes(it, ptei, ptes);
+	return nvkm_vmm_unref_ptes(it, pfn, ptei, ptes);
 }
 
 static bool
-nvkm_vmm_sparse_ref_ptes(struct nvkm_vmm_iter *it, u32 ptei, u32 ptes)
+nvkm_vmm_sparse_ref_ptes(struct nvkm_vmm_iter *it, bool pfn, u32 ptei, u32 ptes)
 {
 	nvkm_vmm_sparse_ptes(it->desc, it->pt[0], ptei, ptes);
-	return nvkm_vmm_ref_ptes(it, ptei, ptes);
+	return nvkm_vmm_ref_ptes(it, pfn, ptei, ptes);
 }
 
 static bool
@@ -487,8 +499,8 @@ nvkm_vmm_ref_swpt(struct nvkm_vmm_iter *it, struct nvkm_vmm_pt *pgd, u32 pdei)
 
 static inline u64
 nvkm_vmm_iter(struct nvkm_vmm *vmm, const struct nvkm_vmm_page *page,
-	      u64 addr, u64 size, const char *name, bool ref,
-	      bool (*REF_PTES)(struct nvkm_vmm_iter *, u32, u32),
+	      u64 addr, u64 size, const char *name, bool ref, bool pfn,
+	      bool (*REF_PTES)(struct nvkm_vmm_iter *, bool pfn, u32, u32),
 	      nvkm_vmm_pte_func MAP_PTES, struct nvkm_vmm_map *map,
 	      nvkm_vmm_pxe_func CLR_PTES)
 {
@@ -548,7 +560,7 @@ nvkm_vmm_iter(struct nvkm_vmm *vmm, const struct nvkm_vmm_page *page,
 		}
 
 		/* Handle PTE updates. */
-		if (!REF_PTES || REF_PTES(&it, ptei, ptes)) {
+		if (!REF_PTES || REF_PTES(&it, pfn, ptei, ptes)) {
 			struct nvkm_mmu_pt *pt = pgt->pt[type];
 			if (MAP_PTES || CLR_PTES) {
 				if (MAP_PTES)
@@ -590,7 +602,7 @@ static void
 nvkm_vmm_ptes_sparse_put(struct nvkm_vmm *vmm, const struct nvkm_vmm_page *page,
 			 u64 addr, u64 size)
 {
-	nvkm_vmm_iter(vmm, page, addr, size, "sparse unref", false,
+	nvkm_vmm_iter(vmm, page, addr, size, "sparse unref", false, false,
 		      nvkm_vmm_sparse_unref_ptes, NULL, NULL,
 		      page->desc->func->invalid ?
 		      page->desc->func->invalid : page->desc->func->unmap);
@@ -602,8 +614,8 @@ nvkm_vmm_ptes_sparse_get(struct nvkm_vmm *vmm, const struct nvkm_vmm_page *page,
 {
 	if ((page->type & NVKM_VMM_PAGE_SPARSE)) {
 		u64 fail = nvkm_vmm_iter(vmm, page, addr, size, "sparse ref",
-					 true, nvkm_vmm_sparse_ref_ptes, NULL,
-					 NULL, page->desc->func->sparse);
+					 true, false, nvkm_vmm_sparse_ref_ptes,
+					 NULL, NULL, page->desc->func->sparse);
 		if (fail != ~0ULL) {
 			if ((size = fail - addr))
 				nvkm_vmm_ptes_sparse_put(vmm, page, addr, size);
@@ -666,11 +678,11 @@ nvkm_vmm_ptes_sparse(struct nvkm_vmm *vmm, u64 addr, u64 size, bool ref)
 
 static void
 nvkm_vmm_ptes_unmap_put(struct nvkm_vmm *vmm, const struct nvkm_vmm_page *page,
-			u64 addr, u64 size, bool sparse)
+			u64 addr, u64 size, bool sparse, bool pfn)
 {
 	const struct nvkm_vmm_desc_func *func = page->desc->func;
 	nvkm_vmm_iter(vmm, page, addr, size, "unmap + unref",
-		      false, nvkm_vmm_unref_ptes, NULL, NULL,
+		      false, pfn, nvkm_vmm_unref_ptes, NULL, NULL,
 		      sparse ? func->sparse : func->invalid ? func->invalid :
 							      func->unmap);
 }
@@ -681,10 +693,10 @@ nvkm_vmm_ptes_get_map(struct nvkm_vmm *vmm, const struct nvkm_vmm_page *page,
 		      nvkm_vmm_pte_func func)
 {
 	u64 fail = nvkm_vmm_iter(vmm, page, addr, size, "ref + map", true,
-				 nvkm_vmm_ref_ptes, func, map, NULL);
+				 false, nvkm_vmm_ref_ptes, func, map, NULL);
 	if (fail != ~0ULL) {
 		if ((size = fail - addr))
-			nvkm_vmm_ptes_unmap_put(vmm, page, addr, size, false);
+			nvkm_vmm_ptes_unmap_put(vmm, page, addr, size, false, false);
 		return -ENOMEM;
 	}
 	return 0;
@@ -692,10 +704,11 @@ nvkm_vmm_ptes_get_map(struct nvkm_vmm *vmm, const struct nvkm_vmm_page *page,
 
 static void
 nvkm_vmm_ptes_unmap(struct nvkm_vmm *vmm, const struct nvkm_vmm_page *page,
-		    u64 addr, u64 size, bool sparse)
+		    u64 addr, u64 size, bool sparse, bool pfn)
 {
 	const struct nvkm_vmm_desc_func *func = page->desc->func;
-	nvkm_vmm_iter(vmm, page, addr, size, "unmap", false, NULL, NULL, NULL,
+	nvkm_vmm_iter(vmm, page, addr, size, "unmap", false, pfn,
+		      NULL, NULL, NULL,
 		      sparse ? func->sparse : func->invalid ? func->invalid :
 							      func->unmap);
 }
@@ -705,7 +718,7 @@ nvkm_vmm_ptes_map(struct nvkm_vmm *vmm, const struct nvkm_vmm_page *page,
 		  u64 addr, u64 size, struct nvkm_vmm_map *map,
 		  nvkm_vmm_pte_func func)
 {
-	nvkm_vmm_iter(vmm, page, addr, size, "map", false,
+	nvkm_vmm_iter(vmm, page, addr, size, "map", false, false,
 		      NULL, func, map, NULL);
 }
 
@@ -713,7 +726,7 @@ static void
 nvkm_vmm_ptes_put(struct nvkm_vmm *vmm, const struct nvkm_vmm_page *page,
 		  u64 addr, u64 size)
 {
-	nvkm_vmm_iter(vmm, page, addr, size, "unref", false,
+	nvkm_vmm_iter(vmm, page, addr, size, "unref", false, false,
 		      nvkm_vmm_unref_ptes, NULL, NULL, NULL);
 }
 
@@ -721,7 +734,7 @@ static int
 nvkm_vmm_ptes_get(struct nvkm_vmm *vmm, const struct nvkm_vmm_page *page,
 		  u64 addr, u64 size)
 {
-	u64 fail = nvkm_vmm_iter(vmm, page, addr, size, "ref", true,
+	u64 fail = nvkm_vmm_iter(vmm, page, addr, size, "ref", true, false,
 				 nvkm_vmm_ref_ptes, NULL, NULL, NULL);
 	if (fail != ~0ULL) {
 		if (fail != addr)
@@ -763,6 +776,7 @@ nvkm_vma_tail(struct nvkm_vma *vma, u64 tail)
 	new->part = vma->part;
 	new->user = vma->user;
 	new->busy = vma->busy;
+	new->mapped = vma->mapped;
 	list_add(&new->head, &vma->head);
 	return new;
 }
@@ -935,11 +949,40 @@ nvkm_vmm_node_split(struct nvkm_vmm *vmm,
 }
 
 static void
+nvkm_vma_dump(struct nvkm_vma *vma)
+{
+	printk(KERN_ERR "%016llx %016llx %c%c%c%c%c%c%c%c%c %p\n",
+	       vma->addr, (u64)vma->size,
+	       vma->used ? '-' : 'F',
+	       vma->mapref ? 'R' : '-',
+	       vma->sparse ? 'S' : '-',
+	       vma->page != NVKM_VMA_PAGE_NONE ? '0' + vma->page : '-',
+	       vma->refd != NVKM_VMA_PAGE_NONE ? '0' + vma->refd : '-',
+	       vma->part ? 'P' : '-',
+	       vma->user ? 'U' : '-',
+	       vma->busy ? 'B' : '-',
+	       vma->mapped ? 'M' : '-',
+	       vma->memory);
+}
+
+static void
+nvkm_vmm_dump(struct nvkm_vmm *vmm)
+{
+	struct nvkm_vma *vma;
+	list_for_each_entry(vma, &vmm->list, head) {
+		nvkm_vma_dump(vma);
+	}
+}
+
+static void
 nvkm_vmm_dtor(struct nvkm_vmm *vmm)
 {
 	struct nvkm_vma *vma;
 	struct rb_node *node;
 
+	if (0)
+		nvkm_vmm_dump(vmm);
+
 	while ((node = rb_first(&vmm->root))) {
 		struct nvkm_vma *vma = rb_entry(node, typeof(*vma), tree);
 		nvkm_vmm_put(vmm, &vma);
@@ -972,16 +1015,32 @@ nvkm_vmm_dtor(struct nvkm_vmm *vmm)
 	}
 }
 
+static int
+nvkm_vmm_ctor_managed(struct nvkm_vmm *vmm, u64 addr, u64 size)
+{
+	struct nvkm_vma *vma;
+	if (!(vma = nvkm_vma_new(addr, size)))
+		return -ENOMEM;
+	vma->mapref = true;
+	vma->sparse = false;
+	vma->used = true;
+	vma->user = true;
+	nvkm_vmm_node_insert(vmm, vma);
+	list_add_tail(&vma->head, &vmm->list);
+	return 0;
+}
+
 int
 nvkm_vmm_ctor(const struct nvkm_vmm_func *func, struct nvkm_mmu *mmu,
-	      u32 pd_header, u64 addr, u64 size, struct lock_class_key *key,
-	      const char *name, struct nvkm_vmm *vmm)
+	      u32 pd_header, bool managed, u64 addr, u64 size,
+	      struct lock_class_key *key, const char *name,
+	      struct nvkm_vmm *vmm)
 {
 	static struct lock_class_key _key;
 	const struct nvkm_vmm_page *page = func->page;
 	const struct nvkm_vmm_desc *desc;
 	struct nvkm_vma *vma;
-	int levels, bits = 0;
+	int levels, bits = 0, ret;
 
 	vmm->func = func;
 	vmm->mmu = mmu;
@@ -1009,11 +1068,6 @@ nvkm_vmm_ctor(const struct nvkm_vmm_func *func, struct nvkm_mmu *mmu,
 	if (WARN_ON(levels > NVKM_VMM_LEVELS_MAX))
 		return -EINVAL;
 
-	vmm->start = addr;
-	vmm->limit = size ? (addr + size) : (1ULL << bits);
-	if (vmm->start > vmm->limit || vmm->limit > (1ULL << bits))
-		return -EINVAL;
-
 	/* Allocate top-level page table. */
 	vmm->pd = nvkm_vmm_pt_new(desc, false, NULL);
 	if (!vmm->pd)
@@ -1036,50 +1090,273 @@ nvkm_vmm_ctor(const struct nvkm_vmm_func *func, struct nvkm_mmu *mmu,
 	vmm->free = RB_ROOT;
 	vmm->root = RB_ROOT;
 
-	if (!(vma = nvkm_vma_new(vmm->start, vmm->limit - vmm->start)))
-		return -ENOMEM;
+	if (managed) {
+		/* Address-space will be managed by the client for the most
+		 * part, except for a specified area where NVKM allocations
+		 * are allowed to be placed.
+		 */
+		vmm->start = 0;
+		vmm->limit = 1ULL << bits;
+		if (addr + size < addr || addr + size > vmm->limit)
+			return -EINVAL;
+
+		/* Client-managed area before the NVKM-managed area. */
+		if (addr && (ret = nvkm_vmm_ctor_managed(vmm, 0, addr)))
+			return ret;
+
+		/* NVKM-managed area. */
+		if (size) {
+			if (!(vma = nvkm_vma_new(addr, size)))
+				return -ENOMEM;
+			nvkm_vmm_free_insert(vmm, vma);
+			list_add_tail(&vma->head, &vmm->list);
+		}
+
+		/* Client-managed area after the NVKM-managed area. */
+		addr = addr + size;
+		size = vmm->limit - addr;
+		if (size && (ret = nvkm_vmm_ctor_managed(vmm, addr, size)))
+			return ret;
+	} else {
+		/* Address-space fully managed by NVKM, requiring calls to
+		 * nvkm_vmm_get()/nvkm_vmm_put() to allocate address-space.
+		 */
+		vmm->start = addr;
+		vmm->limit = size ? (addr + size) : (1ULL << bits);
+		if (vmm->start > vmm->limit || vmm->limit > (1ULL << bits))
+			return -EINVAL;
+
+		if (!(vma = nvkm_vma_new(vmm->start, vmm->limit - vmm->start)))
+			return -ENOMEM;
+
+		nvkm_vmm_free_insert(vmm, vma);
+		list_add(&vma->head, &vmm->list);
+	}
 
-	nvkm_vmm_free_insert(vmm, vma);
-	list_add(&vma->head, &vmm->list);
 	return 0;
 }
 
 int
 nvkm_vmm_new_(const struct nvkm_vmm_func *func, struct nvkm_mmu *mmu,
-	      u32 hdr, u64 addr, u64 size, struct lock_class_key *key,
-	      const char *name, struct nvkm_vmm **pvmm)
+	      u32 hdr, bool managed, u64 addr, u64 size,
+	      struct lock_class_key *key, const char *name,
+	      struct nvkm_vmm **pvmm)
 {
 	if (!(*pvmm = kzalloc(sizeof(**pvmm), GFP_KERNEL)))
 		return -ENOMEM;
-	return nvkm_vmm_ctor(func, mmu, hdr, addr, size, key, name, *pvmm);
+	return nvkm_vmm_ctor(func, mmu, hdr, managed, addr, size, key, name, *pvmm);
+}
+
+static struct nvkm_vma *
+nvkm_vmm_pfn_split_merge(struct nvkm_vmm *vmm, struct nvkm_vma *vma,
+			 u64 addr, u64 size, u8 page, bool map)
+{
+	struct nvkm_vma *prev = NULL;
+	struct nvkm_vma *next = NULL;
+
+	if (vma->addr == addr && vma->part && (prev = node(vma, prev))) {
+		if (prev->memory || prev->mapped != map)
+			prev = NULL;
+	}
+
+	if (vma->addr + vma->size == addr + size && (next = node(vma, next))) {
+		if (!next->part ||
+		    next->memory || next->mapped != map)
+			next = NULL;
+	}
+
+	if (prev || next)
+		return nvkm_vmm_node_merge(vmm, prev, vma, next, size);
+	return nvkm_vmm_node_split(vmm, vma, addr, size);
+}
+
+int
+nvkm_vmm_pfn_unmap(struct nvkm_vmm *vmm, u64 addr, u64 size)
+{
+	struct nvkm_vma *vma = nvkm_vmm_node_search(vmm, addr);
+	struct nvkm_vma *next;
+	u64 limit = addr + size;
+	u64 start = addr;
+
+	if (!vma)
+		return -EINVAL;
+
+	do {
+		if (!vma->mapped || vma->memory)
+			continue;
+
+		size = min(limit - start, vma->size - (start - vma->addr));
+
+		nvkm_vmm_ptes_unmap_put(vmm, &vmm->func->page[vma->refd],
+					start, size, false, true);
+
+		next = nvkm_vmm_pfn_split_merge(vmm, vma, start, size, 0, false);
+		if (!WARN_ON(!next)) {
+			vma = next;
+			vma->refd = NVKM_VMA_PAGE_NONE;
+			vma->mapped = false;
+		}
+	} while ((vma = node(vma, next)) && (start = vma->addr) < limit);
+
+	return 0;
+}
+
+/*TODO:
+ * - Avoid PT readback (for dma_unmap etc), this might end up being dealt
+ *   with inside HMM, which would be a lot nicer for us to deal with.
+ * - Multiple page sizes (particularly for huge page support).
+ * - Support for systems without a 4KiB page size.
+ */
+int
+nvkm_vmm_pfn_map(struct nvkm_vmm *vmm, u8 shift, u64 addr, u64 size, u64 *pfn)
+{
+	const struct nvkm_vmm_page *page = vmm->func->page;
+	struct nvkm_vma *vma, *tmp;
+	u64 limit = addr + size;
+	u64 start = addr;
+	int pm = size >> shift;
+	int pi = 0;
+
+	/* Only support mapping where the page size of the incoming page
+	 * array matches a page size available for direct mapping.
+	 */
+	while (page->shift && page->shift != shift &&
+	       page->desc->func->pfn == NULL)
+		page++;
+
+	if (!page->shift || !IS_ALIGNED(addr, 1ULL << shift) ||
+			    !IS_ALIGNED(size, 1ULL << shift) ||
+	    addr + size < addr || addr + size > vmm->limit) {
+		VMM_DEBUG(vmm, "paged map %d %d %016llx %016llx\n",
+			  shift, page->shift, addr, size);
+		return -EINVAL;
+	}
+
+	if (!(vma = nvkm_vmm_node_search(vmm, addr)))
+		return -ENOENT;
+
+	do {
+		bool map = !!(pfn[pi] & NVKM_VMM_PFN_V);
+		bool mapped = vma->mapped;
+		u64 size = limit - start;
+		u64 addr = start;
+		int pn, ret = 0;
+
+		/* Narrow the operation window to cover a single action (page
+		 * should be mapped or not) within a single VMA.
+		 */
+		for (pn = 0; pi + pn < pm; pn++) {
+			if (map != !!(pfn[pi + pn] & NVKM_VMM_PFN_V))
+				break;
+		}
+		size = min_t(u64, size, pn << page->shift);
+		size = min_t(u64, size, vma->size + vma->addr - addr);
+
+		/* Reject any operation to unmanaged regions, and areas that
+		 * have nvkm_memory objects mapped in them already.
+		 */
+		if (!vma->mapref || vma->memory) {
+			ret = -EINVAL;
+			goto next;
+		}
+
+		/* In order to both properly refcount GPU page tables, and
+		 * prevent "normal" mappings and these direct mappings from
+		 * interfering with each other, we need to track contiguous
+		 * ranges that have been mapped with this interface.
+		 *
+		 * Here we attempt to either split an existing VMA so we're
+		 * able to flag the region as either unmapped/mapped, or to
+		 * merge with adjacent VMAs that are already compatible.
+		 *
+		 * If the region is already compatible, nothing is required.
+		 */
+		if (map != mapped) {
+			tmp = nvkm_vmm_pfn_split_merge(vmm, vma, addr, size,
+						       page -
+						       vmm->func->page, map);
+			if (WARN_ON(!tmp)) {
+				ret = -ENOMEM;
+				goto next;
+			}
+
+			if ((tmp->mapped = map))
+				tmp->refd = page - vmm->func->page;
+			else
+				tmp->refd = NVKM_VMA_PAGE_NONE;
+			vma = tmp;
+		}
+
+		/* Update HW page tables. */
+		if (map) {
+			struct nvkm_vmm_map args;
+			args.page = page;
+			args.pfn = &pfn[pi];
+
+			if (!mapped) {
+				ret = nvkm_vmm_ptes_get_map(vmm, page, addr,
+							    size, &args, page->
+							    desc->func->pfn);
+			} else {
+				nvkm_vmm_ptes_map(vmm, page, addr, size, &args,
+						  page->desc->func->pfn);
+			}
+		} else {
+			if (mapped) {
+				nvkm_vmm_ptes_unmap_put(vmm, page, addr, size,
+							false, true);
+			}
+		}
+
+next:
+		/* Iterate to next operation. */
+		if (vma->addr + vma->size == addr + size)
+			vma = node(vma, next);
+		start += size;
+
+		if (ret) {
+			/* Failure is signalled by clearing the valid bit on
+			 * any PFN that couldn't be modified as requested.
+			 */
+			while (size) {
+				pfn[pi++] = NVKM_VMM_PFN_NONE;
+				size -= 1 << page->shift;
+			}
+		} else {
+			pi += size >> page->shift;
+		}
+	} while (vma && start < limit);
+
+	return 0;
 }
 
 void
 nvkm_vmm_unmap_region(struct nvkm_vmm *vmm, struct nvkm_vma *vma)
 {
-	struct nvkm_vma *next = node(vma, next);
 	struct nvkm_vma *prev = NULL;
+	struct nvkm_vma *next;
 
 	nvkm_memory_tags_put(vma->memory, vmm->mmu->subdev.device, &vma->tags);
 	nvkm_memory_unref(&vma->memory);
+	vma->mapped = false;
 
-	if (!vma->part || ((prev = node(vma, prev)), prev->memory))
+	if (vma->part && (prev = node(vma, prev)) && prev->mapped)
 		prev = NULL;
-	if (!next->part || next->memory)
+	if ((next = node(vma, next)) && (!next->part || next->mapped))
 		next = NULL;
 	nvkm_vmm_node_merge(vmm, prev, vma, next, vma->size);
 }
 
 void
-nvkm_vmm_unmap_locked(struct nvkm_vmm *vmm, struct nvkm_vma *vma)
+nvkm_vmm_unmap_locked(struct nvkm_vmm *vmm, struct nvkm_vma *vma, bool pfn)
 {
 	const struct nvkm_vmm_page *page = &vmm->func->page[vma->refd];
 
 	if (vma->mapref) {
-		nvkm_vmm_ptes_unmap_put(vmm, page, vma->addr, vma->size, vma->sparse);
+		nvkm_vmm_ptes_unmap_put(vmm, page, vma->addr, vma->size, vma->sparse, pfn);
 		vma->refd = NVKM_VMA_PAGE_NONE;
 	} else {
-		nvkm_vmm_ptes_unmap(vmm, page, vma->addr, vma->size, vma->sparse);
+		nvkm_vmm_ptes_unmap(vmm, page, vma->addr, vma->size, vma->sparse, pfn);
 	}
 
 	nvkm_vmm_unmap_region(vmm, vma);
@@ -1090,7 +1367,7 @@ nvkm_vmm_unmap(struct nvkm_vmm *vmm, struct nvkm_vma *vma)
 {
 	if (vma->memory) {
 		mutex_lock(&vmm->mutex);
-		nvkm_vmm_unmap_locked(vmm, vma);
+		nvkm_vmm_unmap_locked(vmm, vma, false);
 		mutex_unlock(&vmm->mutex);
 	}
 }
@@ -1224,6 +1501,7 @@ nvkm_vmm_map_locked(struct nvkm_vmm *vmm, struct nvkm_vma *vma,
 	nvkm_memory_tags_put(vma->memory, vmm->mmu->subdev.device, &vma->tags);
 	nvkm_memory_unref(&vma->memory);
 	vma->memory = nvkm_memory_ref(map->memory);
+	vma->mapped = true;
 	vma->tags = map->tags;
 	return 0;
 }
@@ -1269,14 +1547,16 @@ nvkm_vmm_put_locked(struct nvkm_vmm *vmm, struct nvkm_vma *vma)
 
 	if (vma->mapref || !vma->sparse) {
 		do {
-			const bool map = next->memory != NULL;
+			const bool mem = next->memory != NULL;
+			const bool map = next->mapped;
 			const u8  refd = next->refd;
 			const u64 addr = next->addr;
 			u64 size = next->size;
 
 			/* Merge regions that are in the same state. */
 			while ((next = node(next, next)) && next->part &&
-			       (next->memory != NULL) == map &&
+			       (next->mapped == map) &&
+			       (next->memory != NULL) == mem &&
 			       (next->refd == refd))
 				size += next->size;
 
@@ -1286,7 +1566,8 @@ nvkm_vmm_put_locked(struct nvkm_vmm *vmm, struct nvkm_vma *vma)
 				 * the page tree.
 				 */
 				nvkm_vmm_ptes_unmap_put(vmm, &page[refd], addr,
-							size, vma->sparse);
+							size, vma->sparse,
+							!mem);
 			} else
 			if (refd != NVKM_VMA_PAGE_NONE) {
 				/* Drop allocation-time PTE references. */
@@ -1301,7 +1582,7 @@ nvkm_vmm_put_locked(struct nvkm_vmm *vmm, struct nvkm_vma *vma)
 	 */
 	next = vma;
 	do {
-		if (next->memory)
+		if (next->mapped)
 			nvkm_vmm_unmap_region(vmm, next);
 	} while ((next = node(vma, next)) && next->part);
 
@@ -1522,7 +1803,7 @@ nvkm_vmm_join(struct nvkm_vmm *vmm, struct nvkm_memory *inst)
 }
 
 static bool
-nvkm_vmm_boot_ptes(struct nvkm_vmm_iter *it, u32 ptei, u32 ptes)
+nvkm_vmm_boot_ptes(struct nvkm_vmm_iter *it, bool pfn, u32 ptei, u32 ptes)
 {
 	const struct nvkm_vmm_desc *desc = it->desc;
 	const int type = desc->type == SPT;
@@ -1544,7 +1825,7 @@ nvkm_vmm_boot(struct nvkm_vmm *vmm)
 	if (ret)
 		return ret;
 
-	nvkm_vmm_iter(vmm, page, vmm->start, limit, "bootstrap", false,
+	nvkm_vmm_iter(vmm, page, vmm->start, limit, "bootstrap", false, false,
 		      nvkm_vmm_boot_ptes, NULL, NULL, NULL);
 	vmm->bootstrapped = true;
 	return 0;
@@ -1584,7 +1865,8 @@ nvkm_vmm_new(struct nvkm_device *device, u64 addr, u64 size, void *argv,
 	struct nvkm_mmu *mmu = device->mmu;
 	struct nvkm_vmm *vmm = NULL;
 	int ret;
-	ret = mmu->func->vmm.ctor(mmu, addr, size, argv, argc, key, name, &vmm);
+	ret = mmu->func->vmm.ctor(mmu, false, addr, size, argv, argc,
+				  key, name, &vmm);
 	if (ret)
 		nvkm_vmm_unref(&vmm);
 	*pvmm = vmm;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.h b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.h
index 42ad326521a3..5e55ecbd8005 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.h
@@ -67,6 +67,10 @@ struct nvkm_vmm_desc_func {
 	nvkm_vmm_pte_func mem;
 	nvkm_vmm_pte_func dma;
 	nvkm_vmm_pte_func sgl;
+
+	nvkm_vmm_pte_func pfn;
+	bool (*pfn_clear)(struct nvkm_vmm *, struct nvkm_mmu_pt *, u32 ptei, u32 ptes);
+	nvkm_vmm_pxe_func pfn_unmap;
 };
 
 extern const struct nvkm_vmm_desc_func gf100_vmm_pgd;
@@ -141,6 +145,11 @@ struct nvkm_vmm_func {
 		     struct nvkm_vmm_map *);
 	void (*flush)(struct nvkm_vmm *, int depth);
 
+	int (*mthd)(struct nvkm_vmm *, struct nvkm_client *,
+		    u32 mthd, void *argv, u32 argc);
+
+	void (*invalidate_pdb)(struct nvkm_vmm *, u64 addr);
+
 	u64 page_block;
 	const struct nvkm_vmm_page page[];
 };
@@ -151,11 +160,12 @@ struct nvkm_vmm_join {
 };
 
 int nvkm_vmm_new_(const struct nvkm_vmm_func *, struct nvkm_mmu *,
-		  u32 pd_header, u64 addr, u64 size, struct lock_class_key *,
-		  const char *name, struct nvkm_vmm **);
+		  u32 pd_header, bool managed, u64 addr, u64 size,
+		  struct lock_class_key *, const char *name,
+		  struct nvkm_vmm **);
 int nvkm_vmm_ctor(const struct nvkm_vmm_func *, struct nvkm_mmu *,
-		  u32 pd_header, u64 addr, u64 size, struct lock_class_key *,
-		  const char *name, struct nvkm_vmm *);
+		  u32 pd_header, bool managed, u64 addr, u64 size,
+		  struct lock_class_key *, const char *name, struct nvkm_vmm *);
 struct nvkm_vma *nvkm_vmm_node_search(struct nvkm_vmm *, u64 addr);
 struct nvkm_vma *nvkm_vmm_node_split(struct nvkm_vmm *, struct nvkm_vma *,
 				     u64 addr, u64 size);
@@ -163,13 +173,25 @@ int nvkm_vmm_get_locked(struct nvkm_vmm *, bool getref, bool mapref,
 			bool sparse, u8 page, u8 align, u64 size,
 			struct nvkm_vma **pvma);
 void nvkm_vmm_put_locked(struct nvkm_vmm *, struct nvkm_vma *);
-void nvkm_vmm_unmap_locked(struct nvkm_vmm *, struct nvkm_vma *);
-void nvkm_vmm_unmap_region(struct nvkm_vmm *vmm, struct nvkm_vma *vma);
+void nvkm_vmm_unmap_locked(struct nvkm_vmm *, struct nvkm_vma *, bool pfn);
+void nvkm_vmm_unmap_region(struct nvkm_vmm *, struct nvkm_vma *);
+
+#define NVKM_VMM_PFN_ADDR                                 0xfffffffffffff000ULL
+#define NVKM_VMM_PFN_ADDR_SHIFT                                              12
+#define NVKM_VMM_PFN_APER                                 0x00000000000000f0ULL
+#define NVKM_VMM_PFN_HOST                                 0x0000000000000000ULL
+#define NVKM_VMM_PFN_VRAM                                 0x0000000000000010ULL
+#define NVKM_VMM_PFN_W                                    0x0000000000000002ULL
+#define NVKM_VMM_PFN_V                                    0x0000000000000001ULL
+#define NVKM_VMM_PFN_NONE                                 0x0000000000000000ULL
+
+int nvkm_vmm_pfn_map(struct nvkm_vmm *, u8 page, u64 addr, u64 size, u64 *pfn);
+int nvkm_vmm_pfn_unmap(struct nvkm_vmm *, u64 addr, u64 size);
 
 struct nvkm_vma *nvkm_vma_tail(struct nvkm_vma *, u64 tail);
 
 int nv04_vmm_new_(const struct nvkm_vmm_func *, struct nvkm_mmu *, u32,
-		  u64, u64, void *, u32, struct lock_class_key *,
+		  bool, u64, u64, void *, u32, struct lock_class_key *,
 		  const char *, struct nvkm_vmm **);
 int nv04_vmm_valid(struct nvkm_vmm *, void *, u32, struct nvkm_vmm_map *);
 
@@ -179,70 +201,76 @@ int nv50_vmm_valid(struct nvkm_vmm *, void *, u32, struct nvkm_vmm_map *);
 void nv50_vmm_flush(struct nvkm_vmm *, int);
 
 int gf100_vmm_new_(const struct nvkm_vmm_func *, const struct nvkm_vmm_func *,
-		   struct nvkm_mmu *, u64, u64, void *, u32,
+		   struct nvkm_mmu *, bool, u64, u64, void *, u32,
 		   struct lock_class_key *, const char *, struct nvkm_vmm **);
 int gf100_vmm_join_(struct nvkm_vmm *, struct nvkm_memory *, u64 base);
 int gf100_vmm_join(struct nvkm_vmm *, struct nvkm_memory *);
 void gf100_vmm_part(struct nvkm_vmm *, struct nvkm_memory *);
 int gf100_vmm_aper(enum nvkm_memory_target);
 int gf100_vmm_valid(struct nvkm_vmm *, void *, u32, struct nvkm_vmm_map *);
-void gf100_vmm_flush_(struct nvkm_vmm *, int);
 void gf100_vmm_flush(struct nvkm_vmm *, int);
+void gf100_vmm_invalidate(struct nvkm_vmm *, u32 type);
+void gf100_vmm_invalidate_pdb(struct nvkm_vmm *, u64 addr);
 
 int gk20a_vmm_aper(enum nvkm_memory_target);
 
 int gm200_vmm_new_(const struct nvkm_vmm_func *, const struct nvkm_vmm_func *,
-		   struct nvkm_mmu *, u64, u64, void *, u32,
+		   struct nvkm_mmu *, bool, u64, u64, void *, u32,
 		   struct lock_class_key *, const char *, struct nvkm_vmm **);
 int gm200_vmm_join_(struct nvkm_vmm *, struct nvkm_memory *, u64 base);
 int gm200_vmm_join(struct nvkm_vmm *, struct nvkm_memory *);
 
+int gp100_vmm_new_(const struct nvkm_vmm_func *,
+		   struct nvkm_mmu *, bool, u64, u64, void *, u32,
+		   struct lock_class_key *, const char *, struct nvkm_vmm **);
 int gp100_vmm_join(struct nvkm_vmm *, struct nvkm_memory *);
 int gp100_vmm_valid(struct nvkm_vmm *, void *, u32, struct nvkm_vmm_map *);
 void gp100_vmm_flush(struct nvkm_vmm *, int);
+int gp100_vmm_mthd(struct nvkm_vmm *, struct nvkm_client *, u32, void *, u32);
+void gp100_vmm_invalidate_pdb(struct nvkm_vmm *, u64 addr);
 
 int gv100_vmm_join(struct nvkm_vmm *, struct nvkm_memory *);
 
-int nv04_vmm_new(struct nvkm_mmu *, u64, u64, void *, u32,
+int nv04_vmm_new(struct nvkm_mmu *, bool, u64, u64, void *, u32,
 		 struct lock_class_key *, const char *, struct nvkm_vmm **);
-int nv41_vmm_new(struct nvkm_mmu *, u64, u64, void *, u32,
+int nv41_vmm_new(struct nvkm_mmu *, bool, u64, u64, void *, u32,
 		 struct lock_class_key *, const char *, struct nvkm_vmm **);
-int nv44_vmm_new(struct nvkm_mmu *, u64, u64, void *, u32,
+int nv44_vmm_new(struct nvkm_mmu *, bool, u64, u64, void *, u32,
 		 struct lock_class_key *, const char *, struct nvkm_vmm **);
-int nv50_vmm_new(struct nvkm_mmu *, u64, u64, void *, u32,
+int nv50_vmm_new(struct nvkm_mmu *, bool, u64, u64, void *, u32,
 		 struct lock_class_key *, const char *, struct nvkm_vmm **);
-int mcp77_vmm_new(struct nvkm_mmu *, u64, u64, void *, u32,
+int mcp77_vmm_new(struct nvkm_mmu *, bool, u64, u64, void *, u32,
 		  struct lock_class_key *, const char *, struct nvkm_vmm **);
-int g84_vmm_new(struct nvkm_mmu *, u64, u64, void *, u32,
+int g84_vmm_new(struct nvkm_mmu *, bool, u64, u64, void *, u32,
 		struct lock_class_key *, const char *, struct nvkm_vmm **);
-int gf100_vmm_new(struct nvkm_mmu *, u64, u64, void *, u32,
+int gf100_vmm_new(struct nvkm_mmu *, bool, u64, u64, void *, u32,
 		  struct lock_class_key *, const char *, struct nvkm_vmm **);
-int gk104_vmm_new(struct nvkm_mmu *, u64, u64, void *, u32,
+int gk104_vmm_new(struct nvkm_mmu *, bool, u64, u64, void *, u32,
 		  struct lock_class_key *, const char *, struct nvkm_vmm **);
-int gk20a_vmm_new(struct nvkm_mmu *, u64, u64, void *, u32,
+int gk20a_vmm_new(struct nvkm_mmu *, bool, u64, u64, void *, u32,
 		  struct lock_class_key *, const char *, struct nvkm_vmm **);
-int gm200_vmm_new_fixed(struct nvkm_mmu *, u64, u64, void *, u32,
+int gm200_vmm_new_fixed(struct nvkm_mmu *, bool, u64, u64, void *, u32,
 			struct lock_class_key *, const char *,
 			struct nvkm_vmm **);
-int gm200_vmm_new(struct nvkm_mmu *, u64, u64, void *, u32,
+int gm200_vmm_new(struct nvkm_mmu *, bool, u64, u64, void *, u32,
 		  struct lock_class_key *, const char *,
 		  struct nvkm_vmm **);
-int gm20b_vmm_new_fixed(struct nvkm_mmu *, u64, u64, void *, u32,
+int gm20b_vmm_new_fixed(struct nvkm_mmu *, bool, u64, u64, void *, u32,
 			struct lock_class_key *, const char *,
 			struct nvkm_vmm **);
-int gm20b_vmm_new(struct nvkm_mmu *, u64, u64, void *, u32,
+int gm20b_vmm_new(struct nvkm_mmu *, bool, u64, u64, void *, u32,
 		  struct lock_class_key *, const char *,
 		  struct nvkm_vmm **);
-int gp100_vmm_new(struct nvkm_mmu *, u64, u64, void *, u32,
+int gp100_vmm_new(struct nvkm_mmu *, bool, u64, u64, void *, u32,
 		  struct lock_class_key *, const char *,
 		  struct nvkm_vmm **);
-int gp10b_vmm_new(struct nvkm_mmu *, u64, u64, void *, u32,
+int gp10b_vmm_new(struct nvkm_mmu *, bool, u64, u64, void *, u32,
 		  struct lock_class_key *, const char *,
 		  struct nvkm_vmm **);
-int gv100_vmm_new(struct nvkm_mmu *, u64, u64, void *, u32,
+int gv100_vmm_new(struct nvkm_mmu *, bool, u64, u64, void *, u32,
 		  struct lock_class_key *, const char *,
 		  struct nvkm_vmm **);
-int tu104_vmm_new(struct nvkm_mmu *, u64, u64, void *, u32,
+int tu102_vmm_new(struct nvkm_mmu *, bool, u64, u64, void *, u32,
 		  struct lock_class_key *, const char *,
 		  struct nvkm_vmm **);
 
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgf100.c
index faf5a7e9265e..ab6424faf84c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgf100.c
@@ -178,15 +178,19 @@ gf100_vmm_desc_16_16[] = {
 };
 
 void
-gf100_vmm_flush_(struct nvkm_vmm *vmm, int depth)
+gf100_vmm_invalidate_pdb(struct nvkm_vmm *vmm, u64 addr)
+{
+	struct nvkm_device *device = vmm->mmu->subdev.device;
+	nvkm_wr32(device, 0x100cb8, addr);
+}
+
+void
+gf100_vmm_invalidate(struct nvkm_vmm *vmm, u32 type)
 {
 	struct nvkm_subdev *subdev = &vmm->mmu->subdev;
 	struct nvkm_device *device = subdev->device;
-	u32 type = depth << 24;
-
-	type = 0x00000001; /* PAGE_ALL */
-	if (atomic_read(&vmm->engref[NVKM_SUBDEV_BAR]))
-		type |= 0x00000004; /* HUB_ONLY */
+	struct nvkm_mmu_pt *pd = vmm->pd->pt[0];
+	u64 addr = 0;
 
 	mutex_lock(&subdev->mutex);
 	/* Looks like maybe a "free flush slots" counter, the
@@ -197,7 +201,20 @@ gf100_vmm_flush_(struct nvkm_vmm *vmm, int depth)
 			break;
 	);
 
-	nvkm_wr32(device, 0x100cb8, vmm->pd->pt[0]->addr >> 8);
+	if (!(type & 0x00000002) /* ALL_PDB. */) {
+		switch (nvkm_memory_target(pd->memory)) {
+		case NVKM_MEM_TARGET_VRAM: addr |= 0x00000000; break;
+		case NVKM_MEM_TARGET_HOST: addr |= 0x00000002; break;
+		case NVKM_MEM_TARGET_NCOH: addr |= 0x00000003; break;
+		default:
+			WARN_ON(1);
+			break;
+		}
+		addr |= (vmm->pd->pt[0]->addr >> 12) << 4;
+
+		vmm->func->invalidate_pdb(vmm, addr);
+	}
+
 	nvkm_wr32(device, 0x100cbc, 0x80000000 | type);
 
 	/* Wait for flush to be queued? */
@@ -211,7 +228,10 @@ gf100_vmm_flush_(struct nvkm_vmm *vmm, int depth)
 void
 gf100_vmm_flush(struct nvkm_vmm *vmm, int depth)
 {
-	gf100_vmm_flush_(vmm, 0);
+	u32 type = 0x00000001; /* PAGE_ALL */
+	if (atomic_read(&vmm->engref[NVKM_SUBDEV_BAR]))
+		type |= 0x00000004; /* HUB_ONLY */
+	gf100_vmm_invalidate(vmm, type);
 }
 
 int
@@ -354,6 +374,7 @@ gf100_vmm_17 = {
 	.aper = gf100_vmm_aper,
 	.valid = gf100_vmm_valid,
 	.flush = gf100_vmm_flush,
+	.invalidate_pdb = gf100_vmm_invalidate_pdb,
 	.page = {
 		{ 17, &gf100_vmm_desc_17_17[0], NVKM_VMM_PAGE_xVxC },
 		{ 12, &gf100_vmm_desc_17_12[0], NVKM_VMM_PAGE_xVHx },
@@ -368,6 +389,7 @@ gf100_vmm_16 = {
 	.aper = gf100_vmm_aper,
 	.valid = gf100_vmm_valid,
 	.flush = gf100_vmm_flush,
+	.invalidate_pdb = gf100_vmm_invalidate_pdb,
 	.page = {
 		{ 16, &gf100_vmm_desc_16_16[0], NVKM_VMM_PAGE_xVxC },
 		{ 12, &gf100_vmm_desc_16_12[0], NVKM_VMM_PAGE_xVHx },
@@ -378,14 +400,14 @@ gf100_vmm_16 = {
 int
 gf100_vmm_new_(const struct nvkm_vmm_func *func_16,
 	       const struct nvkm_vmm_func *func_17,
-	       struct nvkm_mmu *mmu, u64 addr, u64 size, void *argv, u32 argc,
-	       struct lock_class_key *key, const char *name,
-	       struct nvkm_vmm **pvmm)
+	       struct nvkm_mmu *mmu, bool managed, u64 addr, u64 size,
+	       void *argv, u32 argc, struct lock_class_key *key,
+	       const char *name, struct nvkm_vmm **pvmm)
 {
 	switch (mmu->subdev.device->fb->page) {
-	case 16: return nv04_vmm_new_(func_16, mmu, 0, addr, size,
+	case 16: return nv04_vmm_new_(func_16, mmu, 0, managed, addr, size,
 				      argv, argc, key, name, pvmm);
-	case 17: return nv04_vmm_new_(func_17, mmu, 0, addr, size,
+	case 17: return nv04_vmm_new_(func_17, mmu, 0, managed, addr, size,
 				      argv, argc, key, name, pvmm);
 	default:
 		WARN_ON(1);
@@ -394,10 +416,10 @@ gf100_vmm_new_(const struct nvkm_vmm_func *func_16,
 }
 
 int
-gf100_vmm_new(struct nvkm_mmu *mmu, u64 addr, u64 size, void *argv, u32 argc,
-	      struct lock_class_key *key, const char *name,
-	      struct nvkm_vmm **pvmm)
+gf100_vmm_new(struct nvkm_mmu *mmu, bool managed, u64 addr, u64 size,
+	      void *argv, u32 argc, struct lock_class_key *key,
+	      const char *name, struct nvkm_vmm **pvmm)
 {
-	return gf100_vmm_new_(&gf100_vmm_16, &gf100_vmm_17, mmu, addr,
+	return gf100_vmm_new_(&gf100_vmm_16, &gf100_vmm_17, mmu, managed, addr,
 			      size, argv, argc, key, name, pvmm);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgk104.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgk104.c
index 0ebb7bccfcd2..0b59c01fd146 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgk104.c
@@ -71,6 +71,7 @@ gk104_vmm_17 = {
 	.aper = gf100_vmm_aper,
 	.valid = gf100_vmm_valid,
 	.flush = gf100_vmm_flush,
+	.invalidate_pdb = gf100_vmm_invalidate_pdb,
 	.page = {
 		{ 17, &gk104_vmm_desc_17_17[0], NVKM_VMM_PAGE_xVxC },
 		{ 12, &gk104_vmm_desc_17_12[0], NVKM_VMM_PAGE_xVHx },
@@ -85,6 +86,7 @@ gk104_vmm_16 = {
 	.aper = gf100_vmm_aper,
 	.valid = gf100_vmm_valid,
 	.flush = gf100_vmm_flush,
+	.invalidate_pdb = gf100_vmm_invalidate_pdb,
 	.page = {
 		{ 16, &gk104_vmm_desc_16_16[0], NVKM_VMM_PAGE_xVxC },
 		{ 12, &gk104_vmm_desc_16_12[0], NVKM_VMM_PAGE_xVHx },
@@ -93,10 +95,10 @@ gk104_vmm_16 = {
 };
 
 int
-gk104_vmm_new(struct nvkm_mmu *mmu, u64 addr, u64 size, void *argv, u32 argc,
-	      struct lock_class_key *key, const char *name,
-	      struct nvkm_vmm **pvmm)
+gk104_vmm_new(struct nvkm_mmu *mmu, bool managed, u64 addr, u64 size,
+	      void *argv, u32 argc, struct lock_class_key *key,
+	      const char *name, struct nvkm_vmm **pvmm)
 {
-	return gf100_vmm_new_(&gk104_vmm_16, &gk104_vmm_17, mmu, addr,
+	return gf100_vmm_new_(&gk104_vmm_16, &gk104_vmm_17, mmu, managed, addr,
 			      size, argv, argc, key, name, pvmm);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgk20a.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgk20a.c
index 8086994a0446..5a9582dce970 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgk20a.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgk20a.c
@@ -40,6 +40,7 @@ gk20a_vmm_17 = {
 	.aper = gf100_vmm_aper,
 	.valid = gf100_vmm_valid,
 	.flush = gf100_vmm_flush,
+	.invalidate_pdb = gf100_vmm_invalidate_pdb,
 	.page = {
 		{ 17, &gk104_vmm_desc_17_17[0], NVKM_VMM_PAGE_xxHC },
 		{ 12, &gk104_vmm_desc_17_12[0], NVKM_VMM_PAGE_xxHx },
@@ -54,6 +55,7 @@ gk20a_vmm_16 = {
 	.aper = gf100_vmm_aper,
 	.valid = gf100_vmm_valid,
 	.flush = gf100_vmm_flush,
+	.invalidate_pdb = gf100_vmm_invalidate_pdb,
 	.page = {
 		{ 16, &gk104_vmm_desc_16_16[0], NVKM_VMM_PAGE_xxHC },
 		{ 12, &gk104_vmm_desc_16_12[0], NVKM_VMM_PAGE_xxHx },
@@ -62,10 +64,10 @@ gk20a_vmm_16 = {
 };
 
 int
-gk20a_vmm_new(struct nvkm_mmu *mmu, u64 addr, u64 size, void *argv, u32 argc,
-	      struct lock_class_key *key, const char *name,
-	      struct nvkm_vmm **pvmm)
+gk20a_vmm_new(struct nvkm_mmu *mmu, bool managed, u64 addr, u64 size,
+	      void *argv, u32 argc, struct lock_class_key *key,
+	      const char *name, struct nvkm_vmm **pvmm)
 {
-	return gf100_vmm_new_(&gk20a_vmm_16, &gk20a_vmm_17, mmu, addr,
+	return gf100_vmm_new_(&gk20a_vmm_16, &gk20a_vmm_17, mmu, managed, addr,
 			      size, argv, argc, key, name, pvmm);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgm200.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgm200.c
index a1676a4644fe..2e61af02d4d8 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgm200.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgm200.c
@@ -113,6 +113,7 @@ gm200_vmm_17 = {
 	.aper = gf100_vmm_aper,
 	.valid = gf100_vmm_valid,
 	.flush = gf100_vmm_flush,
+	.invalidate_pdb = gf100_vmm_invalidate_pdb,
 	.page = {
 		{ 27, &gm200_vmm_desc_17_17[1], NVKM_VMM_PAGE_Sxxx },
 		{ 17, &gm200_vmm_desc_17_17[0], NVKM_VMM_PAGE_SVxC },
@@ -128,6 +129,7 @@ gm200_vmm_16 = {
 	.aper = gf100_vmm_aper,
 	.valid = gf100_vmm_valid,
 	.flush = gf100_vmm_flush,
+	.invalidate_pdb = gf100_vmm_invalidate_pdb,
 	.page = {
 		{ 27, &gm200_vmm_desc_16_16[1], NVKM_VMM_PAGE_Sxxx },
 		{ 16, &gm200_vmm_desc_16_16[0], NVKM_VMM_PAGE_SVxC },
@@ -139,9 +141,9 @@ gm200_vmm_16 = {
 int
 gm200_vmm_new_(const struct nvkm_vmm_func *func_16,
 	       const struct nvkm_vmm_func *func_17,
-	       struct nvkm_mmu *mmu, u64 addr, u64 size, void *argv, u32 argc,
-	       struct lock_class_key *key, const char *name,
-	       struct nvkm_vmm **pvmm)
+	       struct nvkm_mmu *mmu, bool managed, u64 addr, u64 size,
+	       void *argv, u32 argc, struct lock_class_key *key,
+	       const char *name, struct nvkm_vmm **pvmm)
 {
 	const struct nvkm_vmm_func *func;
 	union {
@@ -163,23 +165,23 @@ gm200_vmm_new_(const struct nvkm_vmm_func *func_16,
 	} else
 		return ret;
 
-	return nvkm_vmm_new_(func, mmu, 0, addr, size, key, name, pvmm);
+	return nvkm_vmm_new_(func, mmu, 0, managed, addr, size, key, name, pvmm);
 }
 
 int
-gm200_vmm_new(struct nvkm_mmu *mmu, u64 addr, u64 size, void *argv, u32 argc,
-	      struct lock_class_key *key, const char *name,
-	      struct nvkm_vmm **pvmm)
+gm200_vmm_new(struct nvkm_mmu *mmu, bool managed, u64 addr, u64 size,
+	      void *argv, u32 argc, struct lock_class_key *key,
+	      const char *name, struct nvkm_vmm **pvmm)
 {
-	return gm200_vmm_new_(&gm200_vmm_16, &gm200_vmm_17, mmu, addr,
+	return gm200_vmm_new_(&gm200_vmm_16, &gm200_vmm_17, mmu, managed, addr,
 			      size, argv, argc, key, name, pvmm);
 }
 
 int
-gm200_vmm_new_fixed(struct nvkm_mmu *mmu, u64 addr, u64 size,
+gm200_vmm_new_fixed(struct nvkm_mmu *mmu, bool managed, u64 addr, u64 size,
 		    void *argv, u32 argc, struct lock_class_key *key,
 		    const char *name, struct nvkm_vmm **pvmm)
 {
-	return gf100_vmm_new_(&gm200_vmm_16, &gm200_vmm_17, mmu, addr,
+	return gf100_vmm_new_(&gm200_vmm_16, &gm200_vmm_17, mmu, managed, addr,
 			      size, argv, argc, key, name, pvmm);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgm20b.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgm20b.c
index 64d4b6cff8dd..96b759695dd8 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgm20b.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgm20b.c
@@ -28,6 +28,7 @@ gm20b_vmm_17 = {
 	.aper = gk20a_vmm_aper,
 	.valid = gf100_vmm_valid,
 	.flush = gf100_vmm_flush,
+	.invalidate_pdb = gf100_vmm_invalidate_pdb,
 	.page = {
 		{ 27, &gm200_vmm_desc_17_17[1], NVKM_VMM_PAGE_Sxxx },
 		{ 17, &gm200_vmm_desc_17_17[0], NVKM_VMM_PAGE_SxHC },
@@ -43,6 +44,7 @@ gm20b_vmm_16 = {
 	.aper = gk20a_vmm_aper,
 	.valid = gf100_vmm_valid,
 	.flush = gf100_vmm_flush,
+	.invalidate_pdb = gf100_vmm_invalidate_pdb,
 	.page = {
 		{ 27, &gm200_vmm_desc_16_16[1], NVKM_VMM_PAGE_Sxxx },
 		{ 16, &gm200_vmm_desc_16_16[0], NVKM_VMM_PAGE_SxHC },
@@ -52,19 +54,19 @@ gm20b_vmm_16 = {
 };
 
 int
-gm20b_vmm_new(struct nvkm_mmu *mmu, u64 addr, u64 size, void *argv, u32 argc,
-	      struct lock_class_key *key, const char *name,
-	      struct nvkm_vmm **pvmm)
+gm20b_vmm_new(struct nvkm_mmu *mmu, bool managed, u64 addr, u64 size,
+	      void *argv, u32 argc, struct lock_class_key *key,
+	      const char *name, struct nvkm_vmm **pvmm)
 {
-	return gm200_vmm_new_(&gm20b_vmm_16, &gm20b_vmm_17, mmu, addr,
+	return gm200_vmm_new_(&gm20b_vmm_16, &gm20b_vmm_17, mmu, managed, addr,
 			      size, argv, argc, key, name, pvmm);
 }
 
 int
-gm20b_vmm_new_fixed(struct nvkm_mmu *mmu, u64 addr, u64 size,
+gm20b_vmm_new_fixed(struct nvkm_mmu *mmu, bool managed, u64 addr, u64 size,
 		    void *argv, u32 argc, struct lock_class_key *key,
 		    const char *name, struct nvkm_vmm **pvmm)
 {
-	return gf100_vmm_new_(&gm20b_vmm_16, &gm20b_vmm_17, mmu, addr,
+	return gf100_vmm_new_(&gm20b_vmm_16, &gm20b_vmm_17, mmu, managed, addr,
 			      size, argv, argc, key, name, pvmm);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp100.c
index 059fafe0e771..b4f519768d5e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp100.c
@@ -21,12 +21,90 @@
  */
 #include "vmm.h"
 
+#include <core/client.h>
 #include <subdev/fb.h>
 #include <subdev/ltc.h>
+#include <subdev/timer.h>
+#include <engine/gr.h>
 
 #include <nvif/ifc00d.h>
 #include <nvif/unpack.h>
 
+static void
+gp100_vmm_pfn_unmap(struct nvkm_vmm *vmm,
+		    struct nvkm_mmu_pt *pt, u32 ptei, u32 ptes)
+{
+	struct device *dev = vmm->mmu->subdev.device->dev;
+	dma_addr_t addr;
+
+	nvkm_kmap(pt->memory);
+	while (ptes--) {
+		u32 datalo = nvkm_ro32(pt->memory, pt->base + ptei * 8 + 0);
+		u32 datahi = nvkm_ro32(pt->memory, pt->base + ptei * 8 + 4);
+		u64 data   = (u64)datahi << 32 | datalo;
+		if ((data & (3ULL << 1)) != 0) {
+			addr = (data >> 8) << 12;
+			dma_unmap_page(dev, addr, PAGE_SIZE, DMA_BIDIRECTIONAL);
+		}
+		ptei++;
+	}
+	nvkm_done(pt->memory);
+}
+
+static bool
+gp100_vmm_pfn_clear(struct nvkm_vmm *vmm,
+		    struct nvkm_mmu_pt *pt, u32 ptei, u32 ptes)
+{
+	bool dma = false;
+	nvkm_kmap(pt->memory);
+	while (ptes--) {
+		u32 datalo = nvkm_ro32(pt->memory, pt->base + ptei * 8 + 0);
+		u32 datahi = nvkm_ro32(pt->memory, pt->base + ptei * 8 + 4);
+		u64 data   = (u64)datahi << 32 | datalo;
+		if ((data & BIT_ULL(0)) && (data & (3ULL << 1)) != 0) {
+			VMM_WO064(pt, vmm, ptei * 8, data & ~BIT_ULL(0));
+			dma = true;
+		}
+		ptei++;
+	}
+	nvkm_done(pt->memory);
+	return dma;
+}
+
+static void
+gp100_vmm_pgt_pfn(struct nvkm_vmm *vmm, struct nvkm_mmu_pt *pt,
+		  u32 ptei, u32 ptes, struct nvkm_vmm_map *map)
+{
+	struct device *dev = vmm->mmu->subdev.device->dev;
+	dma_addr_t addr;
+
+	nvkm_kmap(pt->memory);
+	while (ptes--) {
+		u64 data = 0;
+		if (!(*map->pfn & NVKM_VMM_PFN_W))
+			data |= BIT_ULL(6); /* RO. */
+
+		if (!(*map->pfn & NVKM_VMM_PFN_VRAM)) {
+			addr = *map->pfn >> NVKM_VMM_PFN_ADDR_SHIFT;
+			addr = dma_map_page(dev, pfn_to_page(addr), 0,
+					    PAGE_SIZE, DMA_BIDIRECTIONAL);
+			if (!WARN_ON(dma_mapping_error(dev, addr))) {
+				data |= addr >> 4;
+				data |= 2ULL << 1; /* SYSTEM_COHERENT_MEMORY. */
+				data |= BIT_ULL(3); /* VOL. */
+				data |= BIT_ULL(0); /* VALID. */
+			}
+		} else {
+			data |= (*map->pfn & NVKM_VMM_PFN_ADDR) >> 4;
+			data |= BIT_ULL(0); /* VALID. */
+		}
+
+		VMM_WO064(pt, vmm, ptei++ * 8, data);
+		map->pfn++;
+	}
+	nvkm_done(pt->memory);
+}
+
 static inline void
 gp100_vmm_pgt_pte(struct nvkm_vmm *vmm, struct nvkm_mmu_pt *pt,
 		  u32 ptei, u32 ptes, struct nvkm_vmm_map *map, u64 addr)
@@ -89,6 +167,9 @@ gp100_vmm_desc_spt = {
 	.mem = gp100_vmm_pgt_mem,
 	.dma = gp100_vmm_pgt_dma,
 	.sgl = gp100_vmm_pgt_sgl,
+	.pfn = gp100_vmm_pgt_pfn,
+	.pfn_clear = gp100_vmm_pfn_clear,
+	.pfn_unmap = gp100_vmm_pfn_unmap,
 };
 
 static void
@@ -306,16 +387,100 @@ gp100_vmm_valid(struct nvkm_vmm *vmm, void *argv, u32 argc,
 	return 0;
 }
 
+static int
+gp100_vmm_fault_cancel(struct nvkm_vmm *vmm, void *argv, u32 argc)
+{
+	struct nvkm_device *device = vmm->mmu->subdev.device;
+	union {
+		struct gp100_vmm_fault_cancel_v0 v0;
+	} *args = argv;
+	int ret = -ENOSYS;
+	u32 inst, aper;
+
+	if ((ret = nvif_unpack(ret, &argv, &argc, args->v0, 0, 0, false)))
+		return ret;
+
+	/* Translate MaxwellFaultBufferA instance pointer to the same
+	 * format as the NV_GR_FECS_CURRENT_CTX register.
+	 */
+	aper = (args->v0.inst >> 8) & 3;
+	args->v0.inst >>= 12;
+	args->v0.inst |= aper << 28;
+	args->v0.inst |= 0x80000000;
+
+	if (!WARN_ON(nvkm_gr_ctxsw_pause(device))) {
+		if ((inst = nvkm_gr_ctxsw_inst(device)) == args->v0.inst) {
+			gf100_vmm_invalidate(vmm, 0x0000001b
+					     /* CANCEL_TARGETED. */ |
+					     (args->v0.hub    << 20) |
+					     (args->v0.gpc    << 15) |
+					     (args->v0.client << 9));
+		}
+		WARN_ON(nvkm_gr_ctxsw_resume(device));
+	}
+
+	return 0;
+}
+
+static int
+gp100_vmm_fault_replay(struct nvkm_vmm *vmm, void *argv, u32 argc)
+{
+	union {
+		struct gp100_vmm_fault_replay_vn vn;
+	} *args = argv;
+	int ret = -ENOSYS;
+
+	if (!(ret = nvif_unvers(ret, &argv, &argc, args->vn))) {
+		gf100_vmm_invalidate(vmm, 0x0000000b); /* REPLAY_GLOBAL. */
+	}
+
+	return ret;
+}
+
+int
+gp100_vmm_mthd(struct nvkm_vmm *vmm,
+	       struct nvkm_client *client, u32 mthd, void *argv, u32 argc)
+{
+	if (client->super) {
+		switch (mthd) {
+		case GP100_VMM_VN_FAULT_REPLAY:
+			return gp100_vmm_fault_replay(vmm, argv, argc);
+		case GP100_VMM_VN_FAULT_CANCEL:
+			return gp100_vmm_fault_cancel(vmm, argv, argc);
+		default:
+			break;
+		}
+	}
+	return -EINVAL;
+}
+
+void
+gp100_vmm_invalidate_pdb(struct nvkm_vmm *vmm, u64 addr)
+{
+	struct nvkm_device *device = vmm->mmu->subdev.device;
+	nvkm_wr32(device, 0x100cb8, lower_32_bits(addr));
+	nvkm_wr32(device, 0x100cec, upper_32_bits(addr));
+}
+
 void
 gp100_vmm_flush(struct nvkm_vmm *vmm, int depth)
 {
-	gf100_vmm_flush_(vmm, 5 /* CACHE_LEVEL_UP_TO_PDE3 */ - depth);
+	u32 type = (5 /* CACHE_LEVEL_UP_TO_PDE3 */ - depth) << 24;
+	type = 0; /*XXX: need to confirm stuff works with depth enabled... */
+	if (atomic_read(&vmm->engref[NVKM_SUBDEV_BAR]))
+		type |= 0x00000004; /* HUB_ONLY */
+	type |= 0x00000001; /* PAGE_ALL */
+	gf100_vmm_invalidate(vmm, type);
 }
 
 int
 gp100_vmm_join(struct nvkm_vmm *vmm, struct nvkm_memory *inst)
 {
-	const u64 base = BIT_ULL(10) /* VER2 */ | BIT_ULL(11); /* 64KiB */
+	u64 base = BIT_ULL(10) /* VER2 */ | BIT_ULL(11) /* 64KiB */;
+	if (vmm->replay) {
+		base |= BIT_ULL(4); /* FAULT_REPLAY_TEX */
+		base |= BIT_ULL(5); /* FAULT_REPLAY_GCC */
+	}
 	return gf100_vmm_join_(vmm, inst, base);
 }
 
@@ -326,6 +491,8 @@ gp100_vmm = {
 	.aper = gf100_vmm_aper,
 	.valid = gp100_vmm_valid,
 	.flush = gp100_vmm_flush,
+	.mthd = gp100_vmm_mthd,
+	.invalidate_pdb = gp100_vmm_invalidate_pdb,
 	.page = {
 		{ 47, &gp100_vmm_desc_16[4], NVKM_VMM_PAGE_Sxxx },
 		{ 38, &gp100_vmm_desc_16[3], NVKM_VMM_PAGE_Sxxx },
@@ -338,10 +505,39 @@ gp100_vmm = {
 };
 
 int
-gp100_vmm_new(struct nvkm_mmu *mmu, u64 addr, u64 size, void *argv, u32 argc,
-	      struct lock_class_key *key, const char *name,
-	      struct nvkm_vmm **pvmm)
+gp100_vmm_new_(const struct nvkm_vmm_func *func,
+	       struct nvkm_mmu *mmu, bool managed, u64 addr, u64 size,
+	       void *argv, u32 argc, struct lock_class_key *key,
+	       const char *name, struct nvkm_vmm **pvmm)
+{
+	union {
+		struct gp100_vmm_vn vn;
+		struct gp100_vmm_v0 v0;
+	} *args = argv;
+	int ret = -ENOSYS;
+	bool replay;
+
+	if (!(ret = nvif_unpack(ret, &argv, &argc, args->v0, 0, 0, false))) {
+		replay = args->v0.fault_replay != 0;
+	} else
+	if (!(ret = nvif_unvers(ret, &argv, &argc, args->vn))) {
+		replay = false;
+	} else
+		return ret;
+
+	ret = nvkm_vmm_new_(func, mmu, 0, managed, addr, size, key, name, pvmm);
+	if (ret)
+		return ret;
+
+	(*pvmm)->replay = replay;
+	return 0;
+}
+
+int
+gp100_vmm_new(struct nvkm_mmu *mmu, bool managed, u64 addr, u64 size,
+	      void *argv, u32 argc, struct lock_class_key *key,
+	      const char *name, struct nvkm_vmm **pvmm)
 {
-	return nv04_vmm_new_(&gp100_vmm, mmu, 0, addr, size,
-			     argv, argc, key, name, pvmm);
+	return gp100_vmm_new_(&gp100_vmm, mmu, managed, addr, size,
+			      argv, argc, key, name, pvmm);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp10b.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp10b.c
index 3dcc6bddb32f..e081239afe58 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp10b.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp10b.c
@@ -28,6 +28,8 @@ gp10b_vmm = {
 	.aper = gk20a_vmm_aper,
 	.valid = gp100_vmm_valid,
 	.flush = gp100_vmm_flush,
+	.mthd = gp100_vmm_mthd,
+	.invalidate_pdb = gp100_vmm_invalidate_pdb,
 	.page = {
 		{ 47, &gp100_vmm_desc_16[4], NVKM_VMM_PAGE_Sxxx },
 		{ 38, &gp100_vmm_desc_16[3], NVKM_VMM_PAGE_Sxxx },
@@ -40,10 +42,10 @@ gp10b_vmm = {
 };
 
 int
-gp10b_vmm_new(struct nvkm_mmu *mmu, u64 addr, u64 size, void *argv, u32 argc,
-	      struct lock_class_key *key, const char *name,
-	      struct nvkm_vmm **pvmm)
+gp10b_vmm_new(struct nvkm_mmu *mmu, bool managed, u64 addr, u64 size,
+	      void *argv, u32 argc, struct lock_class_key *key,
+	      const char *name, struct nvkm_vmm **pvmm)
 {
-	return nv04_vmm_new_(&gp10b_vmm, mmu, 0, addr, size,
-			     argv, argc, key, name, pvmm);
+	return gp100_vmm_new_(&gp10b_vmm, mmu, managed, addr, size,
+			      argv, argc, key, name, pvmm);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgv100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgv100.c
index 2fa40c16e6d2..f0e21f63253a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgv100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgv100.c
@@ -66,6 +66,8 @@ gv100_vmm = {
 	.aper = gf100_vmm_aper,
 	.valid = gp100_vmm_valid,
 	.flush = gp100_vmm_flush,
+	.mthd = gp100_vmm_mthd,
+	.invalidate_pdb = gp100_vmm_invalidate_pdb,
 	.page = {
 		{ 47, &gp100_vmm_desc_16[4], NVKM_VMM_PAGE_Sxxx },
 		{ 38, &gp100_vmm_desc_16[3], NVKM_VMM_PAGE_Sxxx },
@@ -78,10 +80,10 @@ gv100_vmm = {
 };
 
 int
-gv100_vmm_new(struct nvkm_mmu *mmu, u64 addr, u64 size, void *argv, u32 argc,
-	      struct lock_class_key *key, const char *name,
-	      struct nvkm_vmm **pvmm)
+gv100_vmm_new(struct nvkm_mmu *mmu, bool managed, u64 addr, u64 size,
+	      void *argv, u32 argc, struct lock_class_key *key,
+	      const char *name, struct nvkm_vmm **pvmm)
 {
-	return nv04_vmm_new_(&gv100_vmm, mmu, 0, addr, size,
-			     argv, argc, key, name, pvmm);
+	return gp100_vmm_new_(&gv100_vmm, mmu, managed, addr, size,
+			      argv, argc, key, name, pvmm);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmmcp77.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmmcp77.c
index e63d984cbfd4..bdddd99f5877 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmmcp77.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmmcp77.c
@@ -36,10 +36,10 @@ mcp77_vmm = {
 };
 
 int
-mcp77_vmm_new(struct nvkm_mmu *mmu, u64 addr, u64 size, void *argv, u32 argc,
-	      struct lock_class_key *key, const char *name,
-	      struct nvkm_vmm **pvmm)
+mcp77_vmm_new(struct nvkm_mmu *mmu, bool managed, u64 addr, u64 size,
+	      void *argv, u32 argc, struct lock_class_key *key,
+	      const char *name, struct nvkm_vmm **pvmm)
 {
-	return nv04_vmm_new_(&mcp77_vmm, mmu, 0, addr, size,
+	return nv04_vmm_new_(&mcp77_vmm, mmu, 0, managed, addr, size,
 			     argv, argc, key, name, pvmm);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv04.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv04.c
index 0cab1ffc9f64..4c6b3b7d221f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv04.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv04.c
@@ -100,16 +100,17 @@ nv04_vmm = {
 
 int
 nv04_vmm_new_(const struct nvkm_vmm_func *func, struct nvkm_mmu *mmu,
-	      u32 pd_header, u64 addr, u64 size, void *argv, u32 argc,
-	      struct lock_class_key *key, const char *name,
-	      struct nvkm_vmm **pvmm)
+	      u32 pd_header, bool managed, u64 addr, u64 size,
+	      void *argv, u32 argc, struct lock_class_key *key,
+	      const char *name, struct nvkm_vmm **pvmm)
 {
 	union {
 		struct nv04_vmm_vn vn;
 	} *args = argv;
 	int ret;
 
-	ret = nvkm_vmm_new_(func, mmu, pd_header, addr, size, key, name, pvmm);
+	ret = nvkm_vmm_new_(func, mmu, pd_header, managed, addr, size,
+			    key, name, pvmm);
 	if (ret)
 		return ret;
 
@@ -117,15 +118,15 @@ nv04_vmm_new_(const struct nvkm_vmm_func *func, struct nvkm_mmu *mmu,
 }
 
 int
-nv04_vmm_new(struct nvkm_mmu *mmu, u64 addr, u64 size, void *argv, u32 argc,
-	     struct lock_class_key *key, const char *name,
+nv04_vmm_new(struct nvkm_mmu *mmu, bool managed, u64 addr, u64 size,
+	     void *argv, u32 argc, struct lock_class_key *key, const char *name,
 	     struct nvkm_vmm **pvmm)
 {
 	struct nvkm_memory *mem;
 	struct nvkm_vmm *vmm;
 	int ret;
 
-	ret = nv04_vmm_new_(&nv04_vmm, mmu, 8, addr, size,
+	ret = nv04_vmm_new_(&nv04_vmm, mmu, 8, managed, addr, size,
 			    argv, argc, key, name, &vmm);
 	*pvmm = vmm;
 	if (ret)
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv41.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv41.c
index b595f130e573..1d3369683a21 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv41.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv41.c
@@ -104,10 +104,10 @@ nv41_vmm = {
 };
 
 int
-nv41_vmm_new(struct nvkm_mmu *mmu, u64 addr, u64 size, void *argv, u32 argc,
-	     struct lock_class_key *key, const char *name,
+nv41_vmm_new(struct nvkm_mmu *mmu, bool managed, u64 addr, u64 size,
+	     void *argv, u32 argc, struct lock_class_key *key, const char *name,
 	     struct nvkm_vmm **pvmm)
 {
-	return nv04_vmm_new_(&nv41_vmm, mmu, 0, addr, size,
+	return nv04_vmm_new_(&nv41_vmm, mmu, 0, managed, addr, size,
 			     argv, argc, key, name, pvmm);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv44.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv44.c
index b834e4352334..a82936ba9890 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv44.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv44.c
@@ -205,15 +205,15 @@ nv44_vmm = {
 };
 
 int
-nv44_vmm_new(struct nvkm_mmu *mmu, u64 addr, u64 size, void *argv, u32 argc,
-	     struct lock_class_key *key, const char *name,
+nv44_vmm_new(struct nvkm_mmu *mmu, bool managed, u64 addr, u64 size,
+	     void *argv, u32 argc, struct lock_class_key *key, const char *name,
 	     struct nvkm_vmm **pvmm)
 {
 	struct nvkm_subdev *subdev = &mmu->subdev;
 	struct nvkm_vmm *vmm;
 	int ret;
 
-	ret = nv04_vmm_new_(&nv44_vmm, mmu, 0, addr, size,
+	ret = nv04_vmm_new_(&nv44_vmm, mmu, 0, managed, addr, size,
 			    argv, argc, key, name, &vmm);
 	*pvmm = vmm;
 	if (ret)
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv50.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv50.c
index 64f75d906202..c98afe3134ee 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv50.c
@@ -376,10 +376,10 @@ nv50_vmm = {
 };
 
 int
-nv50_vmm_new(struct nvkm_mmu *mmu, u64 addr, u64 size, void *argv, u32 argc,
-	     struct lock_class_key *key, const char *name,
+nv50_vmm_new(struct nvkm_mmu *mmu, bool managed, u64 addr, u64 size,
+	     void *argv, u32 argc, struct lock_class_key *key, const char *name,
 	     struct nvkm_vmm **pvmm)
 {
-	return nv04_vmm_new_(&nv50_vmm, mmu, 0, addr, size,
+	return nv04_vmm_new_(&nv50_vmm, mmu, 0, managed, addr, size,
 			     argv, argc, key, name, pvmm);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmtu104.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmtu102.c
index adaadd92110f..be91cffc3b52 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmtu104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmtu102.c
@@ -24,7 +24,7 @@
 #include <subdev/timer.h>
 
 static void
-tu104_vmm_flush(struct nvkm_vmm *vmm, int depth)
+tu102_vmm_flush(struct nvkm_vmm *vmm, int depth)
 {
 	struct nvkm_subdev *subdev = &vmm->mmu->subdev;
 	struct nvkm_device *device = subdev->device;
@@ -50,12 +50,13 @@ tu104_vmm_flush(struct nvkm_vmm *vmm, int depth)
 }
 
 static const struct nvkm_vmm_func
-tu104_vmm = {
+tu102_vmm = {
 	.join = gv100_vmm_join,
 	.part = gf100_vmm_part,
 	.aper = gf100_vmm_aper,
 	.valid = gp100_vmm_valid,
-	.flush = tu104_vmm_flush,
+	.flush = tu102_vmm_flush,
+	.mthd = gp100_vmm_mthd,
 	.page = {
 		{ 47, &gp100_vmm_desc_16[4], NVKM_VMM_PAGE_Sxxx },
 		{ 38, &gp100_vmm_desc_16[3], NVKM_VMM_PAGE_Sxxx },
@@ -68,10 +69,10 @@ tu104_vmm = {
 };
 
 int
-tu104_vmm_new(struct nvkm_mmu *mmu, u64 addr, u64 size,
+tu102_vmm_new(struct nvkm_mmu *mmu, bool managed, u64 addr, u64 size,
 	      void *argv, u32 argc, struct lock_class_key *key,
 	      const char *name, struct nvkm_vmm **pvmm)
 {
-	return nv04_vmm_new_(&tu104_vmm, mmu, 0, addr, size,
-			     argv, argc, key, name, pvmm);
+	return gp100_vmm_new_(&tu102_vmm, mmu, managed, addr, size,
+			      argv, argc, key, name, pvmm);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/memx.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/memx.c
index 11b28b086a06..7b052879af72 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/memx.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/memx.c
@@ -88,10 +88,10 @@ nvkm_memx_fini(struct nvkm_memx **pmemx, bool exec)
 	if (exec) {
 		nvkm_pmu_send(pmu, reply, PROC_MEMX, MEMX_MSG_EXEC,
 			      memx->base, finish);
+		nvkm_debug(subdev, "Exec took %uns, PMU_IN %08x\n",
+			   reply[0], reply[1]);
 	}
 
-	nvkm_debug(subdev, "Exec took %uns, PMU_IN %08x\n",
-		   reply[0], reply[1]);
 	kfree(memx);
 	return 0;
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r352.c b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r352.c
index 5c14d6ac855d..1df09ed6fe6d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r352.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r352.c
@@ -853,7 +853,7 @@ acr_r352_shutdown(struct acr_r352 *acr, struct nvkm_secboot *sb)
 		 * and the expected behavior on RM as well
 		 */
 		if (ret && ret != 0x1d) {
-			nvkm_error(subdev, "HS unload failed, ret 0x%08x", ret);
+			nvkm_error(subdev, "HS unload failed, ret 0x%08x\n", ret);
 			return -EINVAL;
 		}
 		nvkm_debug(subdev, "HS unload blob completed\n");
@@ -922,7 +922,7 @@ acr_r352_bootstrap(struct acr_r352 *acr, struct nvkm_secboot *sb)
 	if (ret < 0) {
 		return ret;
 	} else if (ret > 0) {
-		nvkm_error(subdev, "HS load failed, ret 0x%08x", ret);
+		nvkm_error(subdev, "HS load failed, ret 0x%08x\n", ret);
 		return -EINVAL;
 	}
 	nvkm_debug(subdev, "HS load blob completed\n");
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/top/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/top/base.c
index 67ada1d9a28c..cce6e4e90ebf 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/top/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/top/base.c
@@ -41,6 +41,22 @@ nvkm_top_device_new(struct nvkm_top *top)
 }
 
 u32
+nvkm_top_addr(struct nvkm_device *device, enum nvkm_devidx index)
+{
+	struct nvkm_top *top = device->top;
+	struct nvkm_top_device *info;
+
+	if (top) {
+		list_for_each_entry(info, &top->device, head) {
+			if (info->index == index)
+				return info->addr;
+		}
+	}
+
+	return 0;
+}
+
+u32
 nvkm_top_reset(struct nvkm_device *device, enum nvkm_devidx index)
 {
 	struct nvkm_top *top = device->top;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/top/gk104.c b/drivers/gpu/drm/nouveau/nvkm/subdev/top/gk104.c
index 39081eadfd84..e01746ce9fc4 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/top/gk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/top/gk104.c
@@ -73,6 +73,7 @@ gk104_top_oneinit(struct nvkm_top *top)
 #define A_(A) if (inst == 0) info->index = NVKM_ENGINE_##A
 #define B_(A) if (inst + NVKM_ENGINE_##A##0 < NVKM_ENGINE_##A##_LAST + 1)      \
 		info->index = NVKM_ENGINE_##A##0 + inst
+#define C_(A) if (inst == 0) info->index = NVKM_SUBDEV_##A
 		switch (type) {
 		case 0x00000000: A_(GR    ); break;
 		case 0x00000001: A_(CE0   ); break;
@@ -88,6 +89,7 @@ gk104_top_oneinit(struct nvkm_top *top)
 		case 0x0000000f: A_(NVENC1); break;
 		case 0x00000010: B_(NVDEC ); break;
 		case 0x00000013: B_(CE    ); break;
+		case 0x00000014: C_(GSP   ); break;
 			break;
 		default:
 			break;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/volt/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/Kbuild
index bcd179ba11d0..146adcdd316a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/volt/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/Kbuild
@@ -2,6 +2,7 @@ nvkm-y += nvkm/subdev/volt/base.o
 nvkm-y += nvkm/subdev/volt/gpio.o
 nvkm-y += nvkm/subdev/volt/nv40.o
 nvkm-y += nvkm/subdev/volt/gf100.o
+nvkm-y += nvkm/subdev/volt/gf117.o
 nvkm-y += nvkm/subdev/volt/gk104.o
 nvkm-y += nvkm/subdev/volt/gk20a.o
 nvkm-y += nvkm/subdev/volt/gm20b.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gf117.c b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gf117.c
new file mode 100644
index 000000000000..547a58f0aeac
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gf117.c
@@ -0,0 +1,60 @@
+/*
+ * Copyright 2019 Ilia Mirkin
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ilia Mirkin
+ */
+#include "priv.h"
+
+#include <subdev/fuse.h>
+
+static int
+gf117_volt_speedo_read(struct nvkm_volt *volt)
+{
+	struct nvkm_device *device = volt->subdev.device;
+	struct nvkm_fuse *fuse = device->fuse;
+
+	if (!fuse)
+		return -EINVAL;
+
+	return nvkm_fuse_read(fuse, 0x3a8);
+}
+
+static const struct nvkm_volt_func
+gf117_volt = {
+	.oneinit = gf100_volt_oneinit,
+	.vid_get = nvkm_voltgpio_get,
+	.vid_set = nvkm_voltgpio_set,
+	.speedo_read = gf117_volt_speedo_read,
+};
+
+int
+gf117_volt_new(struct nvkm_device *device, int index, struct nvkm_volt **pvolt)
+{
+	struct nvkm_volt *volt;
+	int ret;
+
+	ret = nvkm_volt_new_(&gf117_volt, device, index, &volt);
+	*pvolt = volt;
+	if (ret)
+		return ret;
+
+	return nvkm_voltgpio_init(volt);
+}