summary refs log tree commit diff
diff options
context:
space:
mode:
-rw-r--r--arch/x86/include/asm/iosf_mbi.h87
-rw-r--r--arch/x86/platform/intel/iosf_mbi.c49
-rw-r--r--drivers/gpu/drm/i915/Kconfig1
-rw-r--r--drivers/gpu/drm/i915/Makefile2
-rw-r--r--drivers/gpu/drm/i915/gvt/gvt.h2
-rw-r--r--drivers/gpu/drm/i915/gvt/scheduler.c47
-rw-r--r--drivers/gpu/drm/i915/i915_cmd_parser.c25
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c278
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c42
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h87
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c260
-rw-r--r--drivers/gpu/drm/i915/i915_gem_batch_pool.c37
-rw-r--r--drivers/gpu/drm/i915/i915_gem_context.c3
-rw-r--r--drivers/gpu/drm/i915/i915_gem_context.h3
-rw-r--r--drivers/gpu/drm/i915/i915_gem_evict.c8
-rw-r--r--drivers/gpu/drm/i915/i915_gem_fence_reg.c11
-rw-r--r--drivers/gpu/drm/i915/i915_gem_object.h9
-rw-r--r--drivers/gpu/drm/i915/i915_gem_request.c2
-rw-r--r--drivers/gpu/drm/i915/i915_gem_request.h5
-rw-r--r--drivers/gpu/drm/i915/i915_gem_shrinker.c2
-rw-r--r--drivers/gpu/drm/i915/i915_gem_userptr.c75
-rw-r--r--drivers/gpu/drm/i915/i915_guc_submission.c257
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c207
-rw-r--r--drivers/gpu/drm/i915/i915_params.c10
-rw-r--r--drivers/gpu/drm/i915/i915_params.h2
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h5
-rw-r--r--drivers/gpu/drm/i915/i915_sysfs.c28
-rw-r--r--drivers/gpu/drm/i915/i915_trace.h8
-rw-r--r--drivers/gpu/drm/i915/i915_utils.h2
-rw-r--r--drivers/gpu/drm/i915/i915_vgpu.c8
-rw-r--r--drivers/gpu/drm/i915/intel_bios.c46
-rw-r--r--drivers/gpu/drm/i915/intel_breadcrumbs.c101
-rw-r--r--drivers/gpu/drm/i915/intel_cdclk.c6
-rw-r--r--drivers/gpu/drm/i915/intel_color.c4
-rw-r--r--drivers/gpu/drm/i915/intel_csr.c5
-rw-r--r--drivers/gpu/drm/i915/intel_ddi.c70
-rw-r--r--drivers/gpu/drm/i915/intel_device_info.c9
-rw-r--r--drivers/gpu/drm/i915/intel_display.c635
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h29
-rw-r--r--drivers/gpu/drm/i915/intel_dsi.c76
-rw-r--r--drivers/gpu/drm/i915/intel_dsi.h14
-rw-r--r--drivers/gpu/drm/i915/intel_dsi_vbt.c (renamed from drivers/gpu/drm/i915/intel_dsi_panel_vbt.c)46
-rw-r--r--drivers/gpu/drm/i915/intel_engine_cs.c17
-rw-r--r--drivers/gpu/drm/i915/intel_fbc.c2
-rw-r--r--drivers/gpu/drm/i915/intel_fbdev.c4
-rw-r--r--drivers/gpu/drm/i915/intel_guc_loader.c447
-rw-r--r--drivers/gpu/drm/i915/intel_hdmi.c24
-rw-r--r--drivers/gpu/drm/i915/intel_hotplug.c28
-rw-r--r--drivers/gpu/drm/i915/intel_huc.c113
-rw-r--r--drivers/gpu/drm/i915/intel_lrc.c36
-rw-r--r--drivers/gpu/drm/i915/intel_lrc.h1
-rw-r--r--drivers/gpu/drm/i915/intel_opregion.c26
-rw-r--r--drivers/gpu/drm/i915/intel_overlay.c8
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c133
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c16
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.h11
-rw-r--r--drivers/gpu/drm/i915/intel_sprite.c288
-rw-r--r--drivers/gpu/drm/i915/intel_uc.c287
-rw-r--r--drivers/gpu/drm/i915/intel_uc.h25
-rw-r--r--drivers/gpu/drm/i915/intel_uncore.c140
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem_context.c6
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem_evict.c90
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem_gtt.c22
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_selftest.c4
-rw-r--r--drivers/gpu/drm/i915/selftests/intel_hangcheck.c18
-rw-r--r--drivers/i2c/busses/i2c-designware-baytrail.c83
-rw-r--r--drivers/i2c/busses/i2c-designware-core.c14
-rw-r--r--drivers/i2c/busses/i2c-designware-core.h13
-rw-r--r--drivers/i2c/busses/i2c-designware-pcidrv.c26
-rw-r--r--drivers/i2c/busses/i2c-designware-platdrv.c8
-rw-r--r--include/drm/i915_pciids.h8
-rw-r--r--kernel/locking/lockdep.c2
72 files changed, 2661 insertions, 1842 deletions
diff --git a/arch/x86/include/asm/iosf_mbi.h b/arch/x86/include/asm/iosf_mbi.h
index b41ee164930a..c313cac36f56 100644
--- a/arch/x86/include/asm/iosf_mbi.h
+++ b/arch/x86/include/asm/iosf_mbi.h
@@ -5,6 +5,8 @@
 #ifndef IOSF_MBI_SYMS_H
 #define IOSF_MBI_SYMS_H
 
+#include <linux/notifier.h>
+
 #define MBI_MCR_OFFSET		0xD0
 #define MBI_MDR_OFFSET		0xD4
 #define MBI_MCRX_OFFSET		0xD8
@@ -47,6 +49,10 @@
 #define QRK_MBI_UNIT_MM		0x05
 #define QRK_MBI_UNIT_SOC	0x31
 
+/* Action values for the pmic_bus_access_notifier functions */
+#define MBI_PMIC_BUS_ACCESS_BEGIN	1
+#define MBI_PMIC_BUS_ACCESS_END		2
+
 #if IS_ENABLED(CONFIG_IOSF_MBI)
 
 bool iosf_mbi_available(void);
@@ -88,6 +94,65 @@ int iosf_mbi_write(u8 port, u8 opcode, u32 offset, u32 mdr);
  */
 int iosf_mbi_modify(u8 port, u8 opcode, u32 offset, u32 mdr, u32 mask);
 
+/**
+ * iosf_mbi_punit_acquire() - Acquire access to the P-Unit
+ *
+ * One some systems the P-Unit accesses the PMIC to change various voltages
+ * through the same bus as other kernel drivers use for e.g. battery monitoring.
+ *
+ * If a driver sends requests to the P-Unit which require the P-Unit to access
+ * the PMIC bus while another driver is also accessing the PMIC bus various bad
+ * things happen.
+ *
+ * To avoid these problems this function must be called before accessing the
+ * P-Unit or the PMIC, be it through iosf_mbi* functions or through other means.
+ *
+ * Note on these systems the i2c-bus driver will request a sempahore from the
+ * P-Unit for exclusive access to the PMIC bus when i2c drivers are accessing
+ * it, but this does not appear to be sufficient, we still need to avoid making
+ * certain P-Unit requests during the access window to avoid problems.
+ *
+ * This function locks a mutex, as such it may sleep.
+ */
+void iosf_mbi_punit_acquire(void);
+
+/**
+ * iosf_mbi_punit_release() - Release access to the P-Unit
+ */
+void iosf_mbi_punit_release(void);
+
+/**
+ * iosf_mbi_register_pmic_bus_access_notifier - Register PMIC bus notifier
+ *
+ * This function can be used by drivers which may need to acquire P-Unit
+ * managed resources from interrupt context, where iosf_mbi_punit_acquire()
+ * can not be used.
+ *
+ * This function allows a driver to register a notifier to get notified (in a
+ * process context) before other drivers start accessing the PMIC bus.
+ *
+ * This allows the driver to acquire any resources, which it may need during
+ * the window the other driver is accessing the PMIC, before hand.
+ *
+ * @nb: notifier_block to register
+ */
+int iosf_mbi_register_pmic_bus_access_notifier(struct notifier_block *nb);
+
+/**
+ * iosf_mbi_register_pmic_bus_access_notifier - Unregister PMIC bus notifier
+ *
+ * @nb: notifier_block to unregister
+ */
+int iosf_mbi_unregister_pmic_bus_access_notifier(struct notifier_block *nb);
+
+/**
+ * iosf_mbi_call_pmic_bus_access_notifier_chain - Call PMIC bus notifier chain
+ *
+ * @val: action to pass into listener's notifier_call function
+ * @v: data pointer to pass into listener's notifier_call function
+ */
+int iosf_mbi_call_pmic_bus_access_notifier_chain(unsigned long val, void *v);
+
 #else /* CONFIG_IOSF_MBI is not enabled */
 static inline
 bool iosf_mbi_available(void)
@@ -115,6 +180,28 @@ int iosf_mbi_modify(u8 port, u8 opcode, u32 offset, u32 mdr, u32 mask)
 	WARN(1, "IOSF_MBI driver not available");
 	return -EPERM;
 }
+
+static inline void iosf_mbi_punit_acquire(void) {}
+static inline void iosf_mbi_punit_release(void) {}
+
+static inline
+int iosf_mbi_register_pmic_bus_access_notifier(struct notifier_block *nb)
+{
+	return 0;
+}
+
+static inline
+int iosf_mbi_unregister_pmic_bus_access_notifier(struct notifier_block *nb)
+{
+	return 0;
+}
+
+static inline
+int iosf_mbi_call_pmic_bus_access_notifier_chain(unsigned long val, void *v)
+{
+	return 0;
+}
+
 #endif /* CONFIG_IOSF_MBI */
 
 #endif /* IOSF_MBI_SYMS_H */
diff --git a/arch/x86/platform/intel/iosf_mbi.c b/arch/x86/platform/intel/iosf_mbi.c
index edf2c54bf131..a952ac199741 100644
--- a/arch/x86/platform/intel/iosf_mbi.c
+++ b/arch/x86/platform/intel/iosf_mbi.c
@@ -34,6 +34,8 @@
 
 static struct pci_dev *mbi_pdev;
 static DEFINE_SPINLOCK(iosf_mbi_lock);
+static DEFINE_MUTEX(iosf_mbi_punit_mutex);
+static BLOCKING_NOTIFIER_HEAD(iosf_mbi_pmic_bus_access_notifier);
 
 static inline u32 iosf_mbi_form_mcr(u8 op, u8 port, u8 offset)
 {
@@ -190,6 +192,53 @@ bool iosf_mbi_available(void)
 }
 EXPORT_SYMBOL(iosf_mbi_available);
 
+void iosf_mbi_punit_acquire(void)
+{
+	mutex_lock(&iosf_mbi_punit_mutex);
+}
+EXPORT_SYMBOL(iosf_mbi_punit_acquire);
+
+void iosf_mbi_punit_release(void)
+{
+	mutex_unlock(&iosf_mbi_punit_mutex);
+}
+EXPORT_SYMBOL(iosf_mbi_punit_release);
+
+int iosf_mbi_register_pmic_bus_access_notifier(struct notifier_block *nb)
+{
+	int ret;
+
+	/* Wait for the bus to go inactive before registering */
+	mutex_lock(&iosf_mbi_punit_mutex);
+	ret = blocking_notifier_chain_register(
+				&iosf_mbi_pmic_bus_access_notifier, nb);
+	mutex_unlock(&iosf_mbi_punit_mutex);
+
+	return ret;
+}
+EXPORT_SYMBOL(iosf_mbi_register_pmic_bus_access_notifier);
+
+int iosf_mbi_unregister_pmic_bus_access_notifier(struct notifier_block *nb)
+{
+	int ret;
+
+	/* Wait for the bus to go inactive before unregistering */
+	mutex_lock(&iosf_mbi_punit_mutex);
+	ret = blocking_notifier_chain_unregister(
+				&iosf_mbi_pmic_bus_access_notifier, nb);
+	mutex_unlock(&iosf_mbi_punit_mutex);
+
+	return ret;
+}
+EXPORT_SYMBOL(iosf_mbi_unregister_pmic_bus_access_notifier);
+
+int iosf_mbi_call_pmic_bus_access_notifier_chain(unsigned long val, void *v)
+{
+	return blocking_notifier_call_chain(
+				&iosf_mbi_pmic_bus_access_notifier, val, v);
+}
+EXPORT_SYMBOL(iosf_mbi_call_pmic_bus_access_notifier_chain);
+
 #ifdef CONFIG_IOSF_MBI_DEBUG
 static u32	dbg_mdr;
 static u32	dbg_mcr;
diff --git a/drivers/gpu/drm/i915/Kconfig b/drivers/gpu/drm/i915/Kconfig
index 1ae0bb91ee60..a5cd5dacf055 100644
--- a/drivers/gpu/drm/i915/Kconfig
+++ b/drivers/gpu/drm/i915/Kconfig
@@ -20,6 +20,7 @@ config DRM_I915
 	select ACPI_VIDEO if ACPI
 	select ACPI_BUTTON if ACPI
 	select SYNC_FILE
+	select IOSF_MBI
 	help
 	  Choose this option if you have a system that has "Intel Graphics
 	  Media Accelerator" or "HD Graphics" integrated graphics,
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index b1b580337c7a..2cf04504e494 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -105,8 +105,8 @@ i915-y += dvo_ch7017.o \
 	  intel_dp.o \
 	  intel_dsi.o \
 	  intel_dsi_dcs_backlight.o \
-	  intel_dsi_panel_vbt.o \
 	  intel_dsi_pll.o \
+	  intel_dsi_vbt.o \
 	  intel_dvo.o \
 	  intel_hdmi.o \
 	  intel_i2c.o \
diff --git a/drivers/gpu/drm/i915/gvt/gvt.h b/drivers/gpu/drm/i915/gvt/gvt.h
index e227caf5859e..ce9ac1569cbc 100644
--- a/drivers/gpu/drm/i915/gvt/gvt.h
+++ b/drivers/gpu/drm/i915/gvt/gvt.h
@@ -160,7 +160,6 @@ struct intel_vgpu {
 	atomic_t running_workload_num;
 	DECLARE_BITMAP(tlb_handle_pending, I915_NUM_ENGINES);
 	struct i915_gem_context *shadow_ctx;
-	struct notifier_block shadow_ctx_notifier_block;
 
 #if IS_ENABLED(CONFIG_DRM_I915_GVT_KVMGT)
 	struct {
@@ -231,6 +230,7 @@ struct intel_gvt {
 	struct intel_gvt_gtt gtt;
 	struct intel_gvt_opregion opregion;
 	struct intel_gvt_workload_scheduler scheduler;
+	struct notifier_block shadow_ctx_notifier_block[I915_NUM_ENGINES];
 	DECLARE_HASHTABLE(cmd_table, GVT_CMD_HASH_BITS);
 	struct intel_vgpu_type *types;
 	unsigned int num_types;
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c
index d6b6d0efdd1a..31d2240fdb1f 100644
--- a/drivers/gpu/drm/i915/gvt/scheduler.c
+++ b/drivers/gpu/drm/i915/gvt/scheduler.c
@@ -130,12 +130,10 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload)
 static int shadow_context_status_change(struct notifier_block *nb,
 		unsigned long action, void *data)
 {
-	struct intel_vgpu *vgpu = container_of(nb,
-			struct intel_vgpu, shadow_ctx_notifier_block);
-	struct drm_i915_gem_request *req =
-		(struct drm_i915_gem_request *)data;
-	struct intel_gvt_workload_scheduler *scheduler =
-		&vgpu->gvt->scheduler;
+	struct drm_i915_gem_request *req = (struct drm_i915_gem_request *)data;
+	struct intel_gvt *gvt = container_of(nb, struct intel_gvt,
+				shadow_ctx_notifier_block[req->engine->id]);
+	struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
 	struct intel_vgpu_workload *workload =
 		scheduler->current_workload[req->engine->id];
 
@@ -214,7 +212,7 @@ out:
 		workload->status = ret;
 
 	if (!IS_ERR_OR_NULL(rq))
-		i915_add_request_no_flush(rq);
+		i915_add_request(rq);
 	mutex_unlock(&dev_priv->drm.struct_mutex);
 	return ret;
 }
@@ -493,15 +491,16 @@ void intel_gvt_wait_vgpu_idle(struct intel_vgpu *vgpu)
 void intel_gvt_clean_workload_scheduler(struct intel_gvt *gvt)
 {
 	struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
-	int i;
+	struct intel_engine_cs *engine;
+	enum intel_engine_id i;
 
 	gvt_dbg_core("clean workload scheduler\n");
 
-	for (i = 0; i < I915_NUM_ENGINES; i++) {
-		if (scheduler->thread[i]) {
-			kthread_stop(scheduler->thread[i]);
-			scheduler->thread[i] = NULL;
-		}
+	for_each_engine(engine, gvt->dev_priv, i) {
+		atomic_notifier_chain_unregister(
+					&engine->context_status_notifier,
+					&gvt->shadow_ctx_notifier_block[i]);
+		kthread_stop(scheduler->thread[i]);
 	}
 }
 
@@ -509,18 +508,15 @@ int intel_gvt_init_workload_scheduler(struct intel_gvt *gvt)
 {
 	struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
 	struct workload_thread_param *param = NULL;
+	struct intel_engine_cs *engine;
+	enum intel_engine_id i;
 	int ret;
-	int i;
 
 	gvt_dbg_core("init workload scheduler\n");
 
 	init_waitqueue_head(&scheduler->workload_complete_wq);
 
-	for (i = 0; i < I915_NUM_ENGINES; i++) {
-		/* check ring mask at init time */
-		if (!HAS_ENGINE(gvt->dev_priv, i))
-			continue;
-
+	for_each_engine(engine, gvt->dev_priv, i) {
 		init_waitqueue_head(&scheduler->waitq[i]);
 
 		param = kzalloc(sizeof(*param), GFP_KERNEL);
@@ -539,6 +535,11 @@ int intel_gvt_init_workload_scheduler(struct intel_gvt *gvt)
 			ret = PTR_ERR(scheduler->thread[i]);
 			goto err;
 		}
+
+		gvt->shadow_ctx_notifier_block[i].notifier_call =
+					shadow_context_status_change;
+		atomic_notifier_chain_register(&engine->context_status_notifier,
+					&gvt->shadow_ctx_notifier_block[i]);
 	}
 	return 0;
 err:
@@ -550,9 +551,6 @@ err:
 
 void intel_vgpu_clean_gvt_context(struct intel_vgpu *vgpu)
 {
-	atomic_notifier_chain_unregister(&vgpu->shadow_ctx->status_notifier,
-			&vgpu->shadow_ctx_notifier_block);
-
 	i915_gem_context_put_unlocked(vgpu->shadow_ctx);
 }
 
@@ -567,10 +565,5 @@ int intel_vgpu_init_gvt_context(struct intel_vgpu *vgpu)
 
 	vgpu->shadow_ctx->engine[RCS].initialised = true;
 
-	vgpu->shadow_ctx_notifier_block.notifier_call =
-		shadow_context_status_change;
-
-	atomic_notifier_chain_register(&vgpu->shadow_ctx->status_notifier,
-				       &vgpu->shadow_ctx_notifier_block);
 	return 0;
 }
diff --git a/drivers/gpu/drm/i915/i915_cmd_parser.c b/drivers/gpu/drm/i915/i915_cmd_parser.c
index 21b1cd917d81..7af100f84410 100644
--- a/drivers/gpu/drm/i915/i915_cmd_parser.c
+++ b/drivers/gpu/drm/i915/i915_cmd_parser.c
@@ -1279,11 +1279,17 @@ int intel_engine_cmd_parser(struct intel_engine_cs *engine,
 	 * space. Parsing should be faster in some cases this way.
 	 */
 	batch_end = cmd + (batch_len / sizeof(*batch_end));
-	while (cmd < batch_end) {
+	do {
 		u32 length;
 
-		if (*cmd == MI_BATCH_BUFFER_END)
+		if (*cmd == MI_BATCH_BUFFER_END) {
+			if (needs_clflush_after) {
+				void *ptr = ptr_mask_bits(shadow_batch_obj->mm.mapping);
+				drm_clflush_virt_range(ptr,
+						       (void *)(cmd + 1) - ptr);
+			}
 			break;
+		}
 
 		desc = find_cmd(engine, *cmd, desc, &default_desc);
 		if (!desc) {
@@ -1323,17 +1329,14 @@ int intel_engine_cmd_parser(struct intel_engine_cs *engine,
 		}
 
 		cmd += length;
-	}
-
-	if (cmd >= batch_end) {
-		DRM_DEBUG_DRIVER("CMD: Got to the end of the buffer w/o a BBE cmd!\n");
-		ret = -EINVAL;
-	}
+		if  (cmd >= batch_end) {
+			DRM_DEBUG_DRIVER("CMD: Got to the end of the buffer w/o a BBE cmd!\n");
+			ret = -EINVAL;
+			break;
+		}
+	} while (1);
 
-	if (ret == 0 && needs_clflush_after)
-		drm_clflush_virt_range(shadow_batch_obj->mm.mapping, batch_len);
 	i915_gem_object_unpin_map(shadow_batch_obj);
-
 	return ret;
 }
 
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index aa2d726b4349..47e707d83c4d 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -27,7 +27,7 @@
  */
 
 #include <linux/debugfs.h>
-#include <linux/list_sort.h>
+#include <linux/sort.h>
 #include "intel_drv.h"
 
 static inline struct drm_i915_private *node_to_i915(struct drm_info_node *node)
@@ -204,13 +204,12 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
 		seq_printf(m, " (frontbuffer: 0x%03x)", frontbuffer_bits);
 }
 
-static int obj_rank_by_stolen(void *priv,
-			      struct list_head *A, struct list_head *B)
+static int obj_rank_by_stolen(const void *A, const void *B)
 {
-	struct drm_i915_gem_object *a =
-		container_of(A, struct drm_i915_gem_object, obj_exec_link);
-	struct drm_i915_gem_object *b =
-		container_of(B, struct drm_i915_gem_object, obj_exec_link);
+	const struct drm_i915_gem_object *a =
+		*(const struct drm_i915_gem_object **)A;
+	const struct drm_i915_gem_object *b =
+		*(const struct drm_i915_gem_object **)B;
 
 	if (a->stolen->start < b->stolen->start)
 		return -1;
@@ -223,49 +222,60 @@ static int i915_gem_stolen_list_info(struct seq_file *m, void *data)
 {
 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
 	struct drm_device *dev = &dev_priv->drm;
+	struct drm_i915_gem_object **objects;
 	struct drm_i915_gem_object *obj;
 	u64 total_obj_size, total_gtt_size;
-	LIST_HEAD(stolen);
-	int count, ret;
+	unsigned long total, count, n;
+	int ret;
+
+	total = READ_ONCE(dev_priv->mm.object_count);
+	objects = drm_malloc_ab(total, sizeof(*objects));
+	if (!objects)
+		return -ENOMEM;
 
 	ret = mutex_lock_interruptible(&dev->struct_mutex);
 	if (ret)
-		return ret;
+		goto out;
 
 	total_obj_size = total_gtt_size = count = 0;
 	list_for_each_entry(obj, &dev_priv->mm.bound_list, global_link) {
+		if (count == total)
+			break;
+
 		if (obj->stolen == NULL)
 			continue;
 
-		list_add(&obj->obj_exec_link, &stolen);
-
+		objects[count++] = obj;
 		total_obj_size += obj->base.size;
 		total_gtt_size += i915_gem_obj_total_ggtt_size(obj);
-		count++;
+
 	}
 	list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_link) {
+		if (count == total)
+			break;
+
 		if (obj->stolen == NULL)
 			continue;
 
-		list_add(&obj->obj_exec_link, &stolen);
-
+		objects[count++] = obj;
 		total_obj_size += obj->base.size;
-		count++;
 	}
-	list_sort(NULL, &stolen, obj_rank_by_stolen);
+
+	sort(objects, count, sizeof(*objects), obj_rank_by_stolen, NULL);
+
 	seq_puts(m, "Stolen:\n");
-	while (!list_empty(&stolen)) {
-		obj = list_first_entry(&stolen, typeof(*obj), obj_exec_link);
+	for (n = 0; n < count; n++) {
 		seq_puts(m, "   ");
-		describe_obj(m, obj);
+		describe_obj(m, objects[n]);
 		seq_putc(m, '\n');
-		list_del_init(&obj->obj_exec_link);
 	}
-	mutex_unlock(&dev->struct_mutex);
-
-	seq_printf(m, "Total %d objects, %llu bytes, %llu GTT size\n",
+	seq_printf(m, "Total %lu objects, %llu bytes, %llu GTT size\n",
 		   count, total_obj_size, total_gtt_size);
-	return 0;
+
+	mutex_unlock(&dev->struct_mutex);
+out:
+	drm_free_large(objects);
+	return ret;
 }
 
 struct file_stats {
@@ -1189,7 +1199,8 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
 		}
 		seq_printf(m, "PM IER=0x%08x IMR=0x%08x ISR=0x%08x IIR=0x%08x, MASK=0x%08x\n",
 			   pm_ier, pm_imr, pm_isr, pm_iir, pm_mask);
-		seq_printf(m, "pm_intr_keep: 0x%08x\n", dev_priv->rps.pm_intr_keep);
+		seq_printf(m, "pm_intrmsk_mbz: 0x%08x\n",
+			   dev_priv->rps.pm_intrmsk_mbz);
 		seq_printf(m, "GT_PERF_STATUS: 0x%08x\n", gt_perf_status);
 		seq_printf(m, "Render p-state ratio: %d\n",
 			   (gt_perf_status & (IS_GEN9(dev_priv) ? 0x1ff00 : 0xff00)) >> 8);
@@ -1304,16 +1315,18 @@ static int i915_hangcheck_info(struct seq_file *m, void *unused)
 	enum intel_engine_id id;
 
 	if (test_bit(I915_WEDGED, &dev_priv->gpu_error.flags))
-		seq_printf(m, "Wedged\n");
-	if (test_bit(I915_RESET_IN_PROGRESS, &dev_priv->gpu_error.flags))
-		seq_printf(m, "Reset in progress\n");
+		seq_puts(m, "Wedged\n");
+	if (test_bit(I915_RESET_BACKOFF, &dev_priv->gpu_error.flags))
+		seq_puts(m, "Reset in progress: struct_mutex backoff\n");
+	if (test_bit(I915_RESET_HANDOFF, &dev_priv->gpu_error.flags))
+		seq_puts(m, "Reset in progress: reset handoff to waiter\n");
 	if (waitqueue_active(&dev_priv->gpu_error.wait_queue))
-		seq_printf(m, "Waiter holding struct mutex\n");
+		seq_puts(m, "Waiter holding struct mutex\n");
 	if (waitqueue_active(&dev_priv->gpu_error.reset_queue))
-		seq_printf(m, "struct_mutex blocked for reset\n");
+		seq_puts(m, "struct_mutex blocked for reset\n");
 
 	if (!i915.enable_hangcheck) {
-		seq_printf(m, "Hangcheck disabled\n");
+		seq_puts(m, "Hangcheck disabled\n");
 		return 0;
 	}
 
@@ -1393,14 +1406,10 @@ static int ironlake_drpc_info(struct seq_file *m)
 	u32 rgvmodectl, rstdbyctl;
 	u16 crstandvid;
 
-	intel_runtime_pm_get(dev_priv);
-
 	rgvmodectl = I915_READ(MEMMODECTL);
 	rstdbyctl = I915_READ(RSTDBYCTL);
 	crstandvid = I915_READ16(CRSTANDVID);
 
-	intel_runtime_pm_put(dev_priv);
-
 	seq_printf(m, "HD boost: %s\n", yesno(rgvmodectl & MEMMODE_BOOST_EN));
 	seq_printf(m, "Boost freq: %d\n",
 		   (rgvmodectl & MEMMODE_BOOST_FREQ_MASK) >>
@@ -1464,19 +1473,26 @@ static int i915_forcewake_domains(struct seq_file *m, void *data)
 	return 0;
 }
 
+static void print_rc6_res(struct seq_file *m,
+			  const char *title,
+			  const i915_reg_t reg)
+{
+	struct drm_i915_private *dev_priv = node_to_i915(m->private);
+
+	seq_printf(m, "%s %u (%llu us)\n",
+		   title, I915_READ(reg),
+		   intel_rc6_residency_us(dev_priv, reg));
+}
+
 static int vlv_drpc_info(struct seq_file *m)
 {
 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
 	u32 rpmodectl1, rcctl1, pw_status;
 
-	intel_runtime_pm_get(dev_priv);
-
 	pw_status = I915_READ(VLV_GTLC_PW_STATUS);
 	rpmodectl1 = I915_READ(GEN6_RP_CONTROL);
 	rcctl1 = I915_READ(GEN6_RC_CONTROL);
 
-	intel_runtime_pm_put(dev_priv);
-
 	seq_printf(m, "Video Turbo Mode: %s\n",
 		   yesno(rpmodectl1 & GEN6_RP_MEDIA_TURBO));
 	seq_printf(m, "Turbo enabled: %s\n",
@@ -1494,10 +1510,8 @@ static int vlv_drpc_info(struct seq_file *m)
 	seq_printf(m, "Media Power Well: %s\n",
 		   (pw_status & VLV_GTLC_PW_MEDIA_STATUS_MASK) ? "Up" : "Down");
 
-	seq_printf(m, "Render RC6 residency since boot: %u\n",
-		   I915_READ(VLV_GT_RENDER_RC6));
-	seq_printf(m, "Media RC6 residency since boot: %u\n",
-		   I915_READ(VLV_GT_MEDIA_RC6));
+	print_rc6_res(m, "Render RC6 residency since boot:", VLV_GT_RENDER_RC6);
+	print_rc6_res(m, "Media RC6 residency since boot:", VLV_GT_MEDIA_RC6);
 
 	return i915_forcewake_domains(m, NULL);
 }
@@ -1505,21 +1519,12 @@ static int vlv_drpc_info(struct seq_file *m)
 static int gen6_drpc_info(struct seq_file *m)
 {
 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
-	struct drm_device *dev = &dev_priv->drm;
 	u32 rpmodectl1, gt_core_status, rcctl1, rc6vids = 0;
 	u32 gen9_powergate_enable = 0, gen9_powergate_status = 0;
 	unsigned forcewake_count;
-	int count = 0, ret;
-
-	ret = mutex_lock_interruptible(&dev->struct_mutex);
-	if (ret)
-		return ret;
-	intel_runtime_pm_get(dev_priv);
-
-	spin_lock_irq(&dev_priv->uncore.lock);
-	forcewake_count = dev_priv->uncore.fw_domain[FW_DOMAIN_ID_RENDER].wake_count;
-	spin_unlock_irq(&dev_priv->uncore.lock);
+	int count = 0;
 
+	forcewake_count = READ_ONCE(dev_priv->uncore.fw_domain[FW_DOMAIN_ID_RENDER].wake_count);
 	if (forcewake_count) {
 		seq_puts(m, "RC information inaccurate because somebody "
 			    "holds a forcewake reference \n");
@@ -1539,13 +1544,11 @@ static int gen6_drpc_info(struct seq_file *m)
 		gen9_powergate_enable = I915_READ(GEN9_PG_ENABLE);
 		gen9_powergate_status = I915_READ(GEN9_PWRGT_DOMAIN_STATUS);
 	}
-	mutex_unlock(&dev->struct_mutex);
+
 	mutex_lock(&dev_priv->rps.hw_lock);
 	sandybridge_pcode_read(dev_priv, GEN6_PCODE_READ_RC6VIDS, &rc6vids);
 	mutex_unlock(&dev_priv->rps.hw_lock);
 
-	intel_runtime_pm_put(dev_priv);
-
 	seq_printf(m, "Video Turbo Mode: %s\n",
 		   yesno(rpmodectl1 & GEN6_RP_MEDIA_TURBO));
 	seq_printf(m, "HW control enabled: %s\n",
@@ -1601,14 +1604,11 @@ static int gen6_drpc_info(struct seq_file *m)
 	}
 
 	/* Not exactly sure what this is */
-	seq_printf(m, "RC6 \"Locked to RPn\" residency since boot: %u\n",
-		   I915_READ(GEN6_GT_GFX_RC6_LOCKED));
-	seq_printf(m, "RC6 residency since boot: %u\n",
-		   I915_READ(GEN6_GT_GFX_RC6));
-	seq_printf(m, "RC6+ residency since boot: %u\n",
-		   I915_READ(GEN6_GT_GFX_RC6p));
-	seq_printf(m, "RC6++ residency since boot: %u\n",
-		   I915_READ(GEN6_GT_GFX_RC6pp));
+	print_rc6_res(m, "RC6 \"Locked to RPn\" residency since boot:",
+		      GEN6_GT_GFX_RC6_LOCKED);
+	print_rc6_res(m, "RC6 residency since boot:", GEN6_GT_GFX_RC6);
+	print_rc6_res(m, "RC6+ residency since boot:", GEN6_GT_GFX_RC6p);
+	print_rc6_res(m, "RC6++ residency since boot:", GEN6_GT_GFX_RC6pp);
 
 	seq_printf(m, "RC6   voltage: %dmV\n",
 		   GEN6_DECODE_RC6_VID(((rc6vids >> 0) & 0xff)));
@@ -1622,13 +1622,20 @@ static int gen6_drpc_info(struct seq_file *m)
 static int i915_drpc_info(struct seq_file *m, void *unused)
 {
 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
+	int err;
+
+	intel_runtime_pm_get(dev_priv);
 
 	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
-		return vlv_drpc_info(m);
+		err = vlv_drpc_info(m);
 	else if (INTEL_GEN(dev_priv) >= 6)
-		return gen6_drpc_info(m);
+		err = gen6_drpc_info(m);
 	else
-		return ironlake_drpc_info(m);
+		err = ironlake_drpc_info(m);
+
+	intel_runtime_pm_put(dev_priv);
+
+	return err;
 }
 
 static int i915_frontbuffer_tracking(struct seq_file *m, void *unused)
@@ -1749,7 +1756,9 @@ static int i915_sr_status(struct seq_file *m, void *unused)
 	intel_runtime_pm_get(dev_priv);
 	intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
 
-	if (HAS_PCH_SPLIT(dev_priv))
+	if (INTEL_GEN(dev_priv) >= 9)
+		/* no global SR status; inspect per-plane WM */;
+	else if (HAS_PCH_SPLIT(dev_priv))
 		sr_enabled = I915_READ(WM1_LP_ILK) & WM1_LP_SR_EN;
 	else if (IS_I965GM(dev_priv) || IS_G4X(dev_priv) ||
 		 IS_I945G(dev_priv) || IS_I945GM(dev_priv))
@@ -2709,12 +2718,14 @@ static int i915_sink_crc(struct seq_file *m, void *data)
 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
 	struct drm_device *dev = &dev_priv->drm;
 	struct intel_connector *connector;
+	struct drm_connector_list_iter conn_iter;
 	struct intel_dp *intel_dp = NULL;
 	int ret;
 	u8 crc[6];
 
 	drm_modeset_lock_all(dev);
-	for_each_intel_connector(dev, connector) {
+	drm_connector_list_iter_begin(dev, &conn_iter);
+	for_each_intel_connector_iter(connector, &conn_iter) {
 		struct drm_crtc *crtc;
 
 		if (!connector->base.state->best_encoder)
@@ -2740,6 +2751,7 @@ static int i915_sink_crc(struct seq_file *m, void *data)
 	}
 	ret = -ENODEV;
 out:
+	drm_connector_list_iter_end(&conn_iter);
 	drm_modeset_unlock_all(dev);
 	return ret;
 }
@@ -3176,9 +3188,9 @@ static int i915_display_info(struct seq_file *m, void *unused)
 	struct drm_device *dev = &dev_priv->drm;
 	struct intel_crtc *crtc;
 	struct drm_connector *connector;
+	struct drm_connector_list_iter conn_iter;
 
 	intel_runtime_pm_get(dev_priv);
-	drm_modeset_lock_all(dev);
 	seq_printf(m, "CRTC info\n");
 	seq_printf(m, "---------\n");
 	for_each_intel_crtc(dev, crtc) {
@@ -3186,6 +3198,7 @@ static int i915_display_info(struct seq_file *m, void *unused)
 		struct intel_crtc_state *pipe_config;
 		int x, y;
 
+		drm_modeset_lock(&crtc->base.mutex, NULL);
 		pipe_config = to_intel_crtc_state(crtc->base.state);
 
 		seq_printf(m, "CRTC %d: pipe: %c, active=%s, (size=%dx%d), dither=%s, bpp=%d\n",
@@ -3210,15 +3223,19 @@ static int i915_display_info(struct seq_file *m, void *unused)
 		seq_printf(m, "\tunderrun reporting: cpu=%s pch=%s \n",
 			   yesno(!crtc->cpu_fifo_underrun_disabled),
 			   yesno(!crtc->pch_fifo_underrun_disabled));
+		drm_modeset_unlock(&crtc->base.mutex);
 	}
 
 	seq_printf(m, "\n");
 	seq_printf(m, "Connector info\n");
 	seq_printf(m, "--------------\n");
-	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+	mutex_lock(&dev->mode_config.mutex);
+	drm_connector_list_iter_begin(dev, &conn_iter);
+	drm_for_each_connector_iter(connector, &conn_iter)
 		intel_connector_info(m, connector);
-	}
-	drm_modeset_unlock_all(dev);
+	drm_connector_list_iter_end(&conn_iter);
+	mutex_unlock(&dev->mode_config.mutex);
+
 	intel_runtime_pm_put(dev_priv);
 
 	return 0;
@@ -3551,13 +3568,16 @@ static void drrs_status_per_crtc(struct seq_file *m,
 	struct i915_drrs *drrs = &dev_priv->drrs;
 	int vrefresh = 0;
 	struct drm_connector *connector;
+	struct drm_connector_list_iter conn_iter;
 
-	drm_for_each_connector(connector, dev) {
+	drm_connector_list_iter_begin(dev, &conn_iter);
+	drm_for_each_connector_iter(connector, &conn_iter) {
 		if (connector->state->crtc != &intel_crtc->base)
 			continue;
 
 		seq_printf(m, "%s:\n", connector->name);
 	}
+	drm_connector_list_iter_end(&conn_iter);
 
 	if (dev_priv->vbt.drrs_type == STATIC_DRRS_SUPPORT)
 		seq_puts(m, "\tVBT: DRRS_type: Static");
@@ -3643,9 +3663,10 @@ static int i915_dp_mst_info(struct seq_file *m, void *unused)
 	struct intel_encoder *intel_encoder;
 	struct intel_digital_port *intel_dig_port;
 	struct drm_connector *connector;
+	struct drm_connector_list_iter conn_iter;
 
-	drm_modeset_lock_all(dev);
-	drm_for_each_connector(connector, dev) {
+	drm_connector_list_iter_begin(dev, &conn_iter);
+	drm_for_each_connector_iter(connector, &conn_iter) {
 		if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
 			continue;
 
@@ -3661,7 +3682,8 @@ static int i915_dp_mst_info(struct seq_file *m, void *unused)
 			   port_name(intel_dig_port->port));
 		drm_dp_mst_dump_topology(m, &intel_dig_port->dp.mst_mgr);
 	}
-	drm_modeset_unlock_all(dev);
+	drm_connector_list_iter_end(&conn_iter);
+
 	return 0;
 }
 
@@ -3673,14 +3695,12 @@ static ssize_t i915_displayport_test_active_write(struct file *file,
 	int status = 0;
 	struct drm_device *dev;
 	struct drm_connector *connector;
-	struct list_head *connector_list;
+	struct drm_connector_list_iter conn_iter;
 	struct intel_dp *intel_dp;
 	int val = 0;
 
 	dev = ((struct seq_file *)file->private_data)->private;
 
-	connector_list = &dev->mode_config.connector_list;
-
 	if (len == 0)
 		return 0;
 
@@ -3696,7 +3716,8 @@ static ssize_t i915_displayport_test_active_write(struct file *file,
 	input_buffer[len] = '\0';
 	DRM_DEBUG_DRIVER("Copied %d bytes from user\n", (unsigned int)len);
 
-	list_for_each_entry(connector, connector_list, head) {
+	drm_connector_list_iter_begin(dev, &conn_iter);
+	drm_for_each_connector_iter(connector, &conn_iter) {
 		if (connector->connector_type !=
 		    DRM_MODE_CONNECTOR_DisplayPort)
 			continue;
@@ -3706,7 +3727,7 @@ static ssize_t i915_displayport_test_active_write(struct file *file,
 			intel_dp = enc_to_intel_dp(connector->encoder);
 			status = kstrtoint(input_buffer, 10, &val);
 			if (status < 0)
-				goto out;
+				break;
 			DRM_DEBUG_DRIVER("Got %d for test active\n", val);
 			/* To prevent erroneous activation of the compliance
 			 * testing code, only accept an actual value of 1 here
@@ -3717,6 +3738,7 @@ static ssize_t i915_displayport_test_active_write(struct file *file,
 				intel_dp->compliance.test_active = 0;
 		}
 	}
+	drm_connector_list_iter_end(&conn_iter);
 out:
 	kfree(input_buffer);
 	if (status < 0)
@@ -3730,10 +3752,11 @@ static int i915_displayport_test_active_show(struct seq_file *m, void *data)
 {
 	struct drm_device *dev = m->private;
 	struct drm_connector *connector;
-	struct list_head *connector_list = &dev->mode_config.connector_list;
+	struct drm_connector_list_iter conn_iter;
 	struct intel_dp *intel_dp;
 
-	list_for_each_entry(connector, connector_list, head) {
+	drm_connector_list_iter_begin(dev, &conn_iter);
+	drm_for_each_connector_iter(connector, &conn_iter) {
 		if (connector->connector_type !=
 		    DRM_MODE_CONNECTOR_DisplayPort)
 			continue;
@@ -3748,6 +3771,7 @@ static int i915_displayport_test_active_show(struct seq_file *m, void *data)
 		} else
 			seq_puts(m, "0");
 	}
+	drm_connector_list_iter_end(&conn_iter);
 
 	return 0;
 }
@@ -3774,10 +3798,11 @@ static int i915_displayport_test_data_show(struct seq_file *m, void *data)
 {
 	struct drm_device *dev = m->private;
 	struct drm_connector *connector;
-	struct list_head *connector_list = &dev->mode_config.connector_list;
+	struct drm_connector_list_iter conn_iter;
 	struct intel_dp *intel_dp;
 
-	list_for_each_entry(connector, connector_list, head) {
+	drm_connector_list_iter_begin(dev, &conn_iter);
+	drm_for_each_connector_iter(connector, &conn_iter) {
 		if (connector->connector_type !=
 		    DRM_MODE_CONNECTOR_DisplayPort)
 			continue;
@@ -3801,6 +3826,7 @@ static int i915_displayport_test_data_show(struct seq_file *m, void *data)
 		} else
 			seq_puts(m, "0");
 	}
+	drm_connector_list_iter_end(&conn_iter);
 
 	return 0;
 }
@@ -3825,10 +3851,11 @@ static int i915_displayport_test_type_show(struct seq_file *m, void *data)
 {
 	struct drm_device *dev = m->private;
 	struct drm_connector *connector;
-	struct list_head *connector_list = &dev->mode_config.connector_list;
+	struct drm_connector_list_iter conn_iter;
 	struct intel_dp *intel_dp;
 
-	list_for_each_entry(connector, connector_list, head) {
+	drm_connector_list_iter_begin(dev, &conn_iter);
+	drm_for_each_connector_iter(connector, &conn_iter) {
 		if (connector->connector_type !=
 		    DRM_MODE_CONNECTOR_DisplayPort)
 			continue;
@@ -3840,6 +3867,7 @@ static int i915_displayport_test_type_show(struct seq_file *m, void *data)
 		} else
 			seq_puts(m, "0");
 	}
+	drm_connector_list_iter_end(&conn_iter);
 
 	return 0;
 }
@@ -4111,12 +4139,16 @@ i915_wedged_set(void *data, u64 val)
 	 * while it is writing to 'i915_wedged'
 	 */
 
-	if (i915_reset_in_progress(&dev_priv->gpu_error))
+	if (i915_reset_backoff(&dev_priv->gpu_error))
 		return -EAGAIN;
 
 	i915_handle_error(dev_priv, val,
 			  "Manually setting wedged to %llu", val);
 
+	wait_on_bit(&dev_priv->gpu_error.flags,
+		    I915_RESET_HANDOFF,
+		    TASK_UNINTERRUPTIBLE);
+
 	return 0;
 }
 
@@ -4125,6 +4157,41 @@ DEFINE_SIMPLE_ATTRIBUTE(i915_wedged_fops,
 			"%llu\n");
 
 static int
+fault_irq_set(struct drm_i915_private *i915,
+	      unsigned long *irq,
+	      unsigned long val)
+{
+	int err;
+
+	err = mutex_lock_interruptible(&i915->drm.struct_mutex);
+	if (err)
+		return err;
+
+	err = i915_gem_wait_for_idle(i915,
+				     I915_WAIT_LOCKED |
+				     I915_WAIT_INTERRUPTIBLE);
+	if (err)
+		goto err_unlock;
+
+	/* Retire to kick idle work */
+	i915_gem_retire_requests(i915);
+	GEM_BUG_ON(i915->gt.active_requests);
+
+	*irq = val;
+	mutex_unlock(&i915->drm.struct_mutex);
+
+	/* Flush idle worker to disarm irq */
+	while (flush_delayed_work(&i915->gt.idle_work))
+		;
+
+	return 0;
+
+err_unlock:
+	mutex_unlock(&i915->drm.struct_mutex);
+	return err;
+}
+
+static int
 i915_ring_missed_irq_get(void *data, u64 *val)
 {
 	struct drm_i915_private *dev_priv = data;
@@ -4136,18 +4203,9 @@ i915_ring_missed_irq_get(void *data, u64 *val)
 static int
 i915_ring_missed_irq_set(void *data, u64 val)
 {
-	struct drm_i915_private *dev_priv = data;
-	struct drm_device *dev = &dev_priv->drm;
-	int ret;
+	struct drm_i915_private *i915 = data;
 
-	/* Lock against concurrent debugfs callers */
-	ret = mutex_lock_interruptible(&dev->struct_mutex);
-	if (ret)
-		return ret;
-	dev_priv->gpu_error.missed_irq_rings = val;
-	mutex_unlock(&dev->struct_mutex);
-
-	return 0;
+	return fault_irq_set(i915, &i915->gpu_error.missed_irq_rings, val);
 }
 
 DEFINE_SIMPLE_ATTRIBUTE(i915_ring_missed_irq_fops,
@@ -4167,13 +4225,12 @@ i915_ring_test_irq_get(void *data, u64 *val)
 static int
 i915_ring_test_irq_set(void *data, u64 val)
 {
-	struct drm_i915_private *dev_priv = data;
+	struct drm_i915_private *i915 = data;
 
-	val &= INTEL_INFO(dev_priv)->ring_mask;
+	val &= INTEL_INFO(i915)->ring_mask;
 	DRM_DEBUG_DRIVER("Masking interrupts on rings 0x%08llx\n", val);
-	dev_priv->gpu_error.test_irq_rings = val;
 
-	return 0;
+	return fault_irq_set(i915, &i915->gpu_error.test_irq_rings, val);
 }
 
 DEFINE_SIMPLE_ATTRIBUTE(i915_ring_test_irq_fops,
@@ -4185,11 +4242,13 @@ DEFINE_SIMPLE_ATTRIBUTE(i915_ring_test_irq_fops,
 #define DROP_RETIRE 0x4
 #define DROP_ACTIVE 0x8
 #define DROP_FREED 0x10
+#define DROP_SHRINK_ALL 0x20
 #define DROP_ALL (DROP_UNBOUND	| \
 		  DROP_BOUND	| \
 		  DROP_RETIRE	| \
 		  DROP_ACTIVE	| \
-		  DROP_FREED)
+		  DROP_FREED	| \
+		  DROP_SHRINK_ALL)
 static int
 i915_drop_caches_get(void *data, u64 *val)
 {
@@ -4224,12 +4283,17 @@ i915_drop_caches_set(void *data, u64 val)
 	if (val & (DROP_RETIRE | DROP_ACTIVE))
 		i915_gem_retire_requests(dev_priv);
 
+	lockdep_set_current_reclaim_state(GFP_KERNEL);
 	if (val & DROP_BOUND)
 		i915_gem_shrink(dev_priv, LONG_MAX, I915_SHRINK_BOUND);
 
 	if (val & DROP_UNBOUND)
 		i915_gem_shrink(dev_priv, LONG_MAX, I915_SHRINK_UNBOUND);
 
+	if (val & DROP_SHRINK_ALL)
+		i915_gem_shrink_all(dev_priv);
+	lockdep_clear_current_reclaim_state();
+
 unlock:
 	mutex_unlock(&dev->struct_mutex);
 
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 704dbcf63866..03d9e45694c9 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -567,9 +567,7 @@ static int i915_load_modeset_init(struct drm_device *dev)
 	if (i915_inject_load_failure())
 		return -ENODEV;
 
-	ret = intel_bios_init(dev_priv);
-	if (ret)
-		DRM_INFO("failed to find VBIOS tables\n");
+	intel_bios_init(dev_priv);
 
 	/* If we have > 1 VGA cards, then we need to arbitrate access
 	 * to the common VGA resources.
@@ -607,8 +605,7 @@ static int i915_load_modeset_init(struct drm_device *dev)
 	if (ret)
 		goto cleanup_irq;
 
-	intel_huc_init(dev_priv);
-	intel_guc_init(dev_priv);
+	intel_uc_init_fw(dev_priv);
 
 	ret = i915_gem_init(dev_priv);
 	if (ret)
@@ -827,7 +824,6 @@ static int i915_driver_init_early(struct drm_i915_private *dev_priv,
 
 	spin_lock_init(&dev_priv->mm.object_stat_lock);
 	spin_lock_init(&dev_priv->mmio_flip_lock);
-	spin_lock_init(&dev_priv->wm.dsparb_lock);
 	mutex_init(&dev_priv->sb_lock);
 	mutex_init(&dev_priv->modeset_restore_lock);
 	mutex_init(&dev_priv->av_mutex);
@@ -992,6 +988,8 @@ static void intel_sanitize_options(struct drm_i915_private *dev_priv)
 
 	i915.semaphores = intel_sanitize_semaphores(dev_priv, i915.semaphores);
 	DRM_DEBUG_DRIVER("use GPU semaphores? %s\n", yesno(i915.semaphores));
+
+	intel_uc_sanitize_options(dev_priv);
 }
 
 /**
@@ -1423,17 +1421,14 @@ static void i915_driver_lastclose(struct drm_device *dev)
 	vga_switcheroo_process_delayed_switch();
 }
 
-static void i915_driver_preclose(struct drm_device *dev, struct drm_file *file)
+static void i915_driver_postclose(struct drm_device *dev, struct drm_file *file)
 {
+	struct drm_i915_file_private *file_priv = file->driver_priv;
+
 	mutex_lock(&dev->struct_mutex);
 	i915_gem_context_close(dev, file);
 	i915_gem_release(dev, file);
 	mutex_unlock(&dev->struct_mutex);
-}
-
-static void i915_driver_postclose(struct drm_device *dev, struct drm_file *file)
-{
-	struct drm_i915_file_private *file_priv = file->driver_priv;
 
 	kfree(file_priv);
 }
@@ -1512,7 +1507,7 @@ static int i915_drm_suspend(struct drm_device *dev)
 	opregion_target_state = suspend_to_idle(dev_priv) ? PCI_D1 : PCI_D3cold;
 	intel_opregion_notify_adapter(dev_priv, opregion_target_state);
 
-	intel_uncore_forcewake_reset(dev_priv, false);
+	intel_uncore_suspend(dev_priv);
 	intel_opregion_unregister(dev_priv);
 
 	intel_fbdev_set_suspend(dev, FBINFO_STATE_SUSPENDED, true);
@@ -1757,7 +1752,7 @@ static int i915_drm_resume_early(struct drm_device *dev)
 		DRM_ERROR("Resume prepare failed: %d, continuing anyway\n",
 			  ret);
 
-	intel_uncore_early_sanitize(dev_priv, true);
+	intel_uncore_resume_early(dev_priv);
 
 	if (IS_GEN9_LP(dev_priv)) {
 		if (!dev_priv->suspended_to_idle)
@@ -1820,12 +1815,15 @@ void i915_reset(struct drm_i915_private *dev_priv)
 	int ret;
 
 	lockdep_assert_held(&dev_priv->drm.struct_mutex);
+	GEM_BUG_ON(!test_bit(I915_RESET_BACKOFF, &error->flags));
 
-	if (!test_and_clear_bit(I915_RESET_IN_PROGRESS, &error->flags))
+	if (!test_bit(I915_RESET_HANDOFF, &error->flags))
 		return;
 
 	/* Clear any previous failed attempts at recovery. Time to try again. */
-	__clear_bit(I915_WEDGED, &error->flags);
+	if (!i915_gem_unset_wedged(dev_priv))
+		goto wakeup;
+
 	error->reset_count++;
 
 	pr_notice("drm/i915: Resetting chip after gpu hang\n");
@@ -1871,15 +1869,18 @@ void i915_reset(struct drm_i915_private *dev_priv)
 
 	i915_queue_hangcheck(dev_priv);
 
-wakeup:
+finish:
 	i915_gem_reset_finish(dev_priv);
 	enable_irq(dev_priv->drm.irq);
-	wake_up_bit(&error->flags, I915_RESET_IN_PROGRESS);
+
+wakeup:
+	clear_bit(I915_RESET_HANDOFF, &error->flags);
+	wake_up_bit(&error->flags, I915_RESET_HANDOFF);
 	return;
 
 error:
 	i915_gem_set_wedged(dev_priv);
-	goto wakeup;
+	goto finish;
 }
 
 static int i915_pm_suspend(struct device *kdev)
@@ -2402,7 +2403,7 @@ static int intel_runtime_suspend(struct device *kdev)
 		return ret;
 	}
 
-	intel_uncore_forcewake_reset(dev_priv, false);
+	intel_uncore_suspend(dev_priv);
 
 	enable_rpm_wakeref_asserts(dev_priv);
 	WARN_ON_ONCE(atomic_read(&dev_priv->pm.wakeref_count));
@@ -2638,7 +2639,6 @@ static struct drm_driver driver = {
 	.release = i915_driver_release,
 	.open = i915_driver_open,
 	.lastclose = i915_driver_lastclose,
-	.preclose = i915_driver_preclose,
 	.postclose = i915_driver_postclose,
 	.set_busid = drm_pci_set_busid,
 
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 9ae1e520f48c..a5947a496d0a 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -79,8 +79,8 @@
 
 #define DRIVER_NAME		"i915"
 #define DRIVER_DESC		"Intel Graphics"
-#define DRIVER_DATE		"20170306"
-#define DRIVER_TIMESTAMP	1488785683
+#define DRIVER_DATE		"20170320"
+#define DRIVER_TIMESTAMP	1489994464
 
 #undef WARN_ON
 /* Many gcc seem to no see through this and fall over :( */
@@ -489,10 +489,8 @@ struct i915_hotplug {
 			    &(dev)->mode_config.encoder_list,	\
 			    base.head)
 
-#define for_each_intel_connector(dev, intel_connector)		\
-	list_for_each_entry(intel_connector,			\
-			    &(dev)->mode_config.connector_list,	\
-			    base.head)
+#define for_each_intel_connector_iter(intel_connector, iter) \
+	while ((intel_connector = to_intel_connector(drm_connector_list_iter_next(iter))))
 
 #define for_each_encoder_on_crtc(dev, __crtc, intel_encoder) \
 	list_for_each_entry((intel_encoder), &(dev)->mode_config.encoder_list, base.head) \
@@ -764,6 +762,7 @@ struct intel_uncore {
 	const struct intel_forcewake_range *fw_domains_table;
 	unsigned int fw_domains_table_entries;
 
+	struct notifier_block pmic_bus_access_nb;
 	struct intel_uncore_funcs funcs;
 
 	unsigned fifo_count;
@@ -1324,7 +1323,7 @@ struct vlv_s0ix_state {
 };
 
 struct intel_rps_ei {
-	u32 cz_clock;
+	ktime_t ktime;
 	u32 render_c0;
 	u32 media_c0;
 };
@@ -1339,7 +1338,7 @@ struct intel_gen6_power_mgmt {
 	u32 pm_iir;
 
 	/* PM interrupt bits that should never be masked */
-	u32 pm_intr_keep;
+	u32 pm_intrmsk_mbz;
 
 	/* Frequencies are stored in potentially platform dependent multiples.
 	 * In other words, *_freq needs to be multiplied by X to be interesting.
@@ -1378,7 +1377,7 @@ struct intel_gen6_power_mgmt {
 	unsigned boosts;
 
 	/* manual wa residency calculations */
-	struct intel_rps_ei up_ei, down_ei;
+	struct intel_rps_ei ei;
 
 	/*
 	 * Protects RPS/RC6 register access and PCU communication.
@@ -1596,8 +1595,33 @@ struct i915_gpu_error {
 	 */
 	unsigned long reset_count;
 
+	/**
+	 * flags: Control various stages of the GPU reset
+	 *
+	 * #I915_RESET_BACKOFF - When we start a reset, we want to stop any
+	 * other users acquiring the struct_mutex. To do this we set the
+	 * #I915_RESET_BACKOFF bit in the error flags when we detect a reset
+	 * and then check for that bit before acquiring the struct_mutex (in
+	 * i915_mutex_lock_interruptible()?). I915_RESET_BACKOFF serves a
+	 * secondary role in preventing two concurrent global reset attempts.
+	 *
+	 * #I915_RESET_HANDOFF - To perform the actual GPU reset, we need the
+	 * struct_mutex. We try to acquire the struct_mutex in the reset worker,
+	 * but it may be held by some long running waiter (that we cannot
+	 * interrupt without causing trouble). Once we are ready to do the GPU
+	 * reset, we set the I915_RESET_HANDOFF bit and wakeup any waiters. If
+	 * they already hold the struct_mutex and want to participate they can
+	 * inspect the bit and do the reset directly, otherwise the worker
+	 * waits for the struct_mutex.
+	 *
+	 * #I915_WEDGED - If reset fails and we can no longer use the GPU,
+	 * we set the #I915_WEDGED bit. Prior to command submission, e.g.
+	 * i915_gem_request_alloc(), this bit is checked and the sequence
+	 * aborted (with -EIO reported to userspace) if set.
+	 */
 	unsigned long flags;
-#define I915_RESET_IN_PROGRESS	0
+#define I915_RESET_BACKOFF	0
+#define I915_RESET_HANDOFF	1
 #define I915_WEDGED		(BITS_PER_LONG - 1)
 
 	/**
@@ -2376,9 +2400,6 @@ struct drm_i915_private {
 	} sagv_status;
 
 	struct {
-		/* protects DSPARB registers on pre-g4x/vlv/chv */
-		spinlock_t dsparb_lock;
-
 		/*
 		 * Raw watermark latency values:
 		 * in 0.1us units for WM0,
@@ -2545,6 +2566,11 @@ static inline struct drm_i915_private *guc_to_i915(struct intel_guc *guc)
 	return container_of(guc, struct drm_i915_private, guc);
 }
 
+static inline struct drm_i915_private *huc_to_i915(struct intel_huc *huc)
+{
+	return container_of(huc, struct drm_i915_private, huc);
+}
+
 /* Simple iterator over all initialised engines */
 #define for_each_engine(engine__, dev_priv__, id__) \
 	for ((id__) = 0; \
@@ -3057,14 +3083,12 @@ int intel_irq_install(struct drm_i915_private *dev_priv);
 void intel_irq_uninstall(struct drm_i915_private *dev_priv);
 
 extern void intel_uncore_sanitize(struct drm_i915_private *dev_priv);
-extern void intel_uncore_early_sanitize(struct drm_i915_private *dev_priv,
-					bool restore_forcewake);
 extern void intel_uncore_init(struct drm_i915_private *dev_priv);
 extern bool intel_uncore_unclaimed_mmio(struct drm_i915_private *dev_priv);
 extern bool intel_uncore_arm_unclaimed_mmio_detection(struct drm_i915_private *dev_priv);
 extern void intel_uncore_fini(struct drm_i915_private *dev_priv);
-extern void intel_uncore_forcewake_reset(struct drm_i915_private *dev_priv,
-					 bool restore);
+extern void intel_uncore_suspend(struct drm_i915_private *dev_priv);
+extern void intel_uncore_resume_early(struct drm_i915_private *dev_priv);
 const char *intel_uncore_forcewake_domain_to_str(const enum forcewake_domain_id id);
 void intel_uncore_forcewake_get(struct drm_i915_private *dev_priv,
 				enum forcewake_domains domains);
@@ -3356,9 +3380,9 @@ int i915_gem_obj_prepare_shmem_read(struct drm_i915_gem_object *obj,
 				    unsigned int *needs_clflush);
 int i915_gem_obj_prepare_shmem_write(struct drm_i915_gem_object *obj,
 				     unsigned int *needs_clflush);
-#define CLFLUSH_BEFORE 0x1
-#define CLFLUSH_AFTER 0x2
-#define CLFLUSH_FLAGS (CLFLUSH_BEFORE | CLFLUSH_AFTER)
+#define CLFLUSH_BEFORE	BIT(0)
+#define CLFLUSH_AFTER	BIT(1)
+#define CLFLUSH_FLAGS	(CLFLUSH_BEFORE | CLFLUSH_AFTER)
 
 static inline void
 i915_gem_obj_finish_shmem_access(struct drm_i915_gem_object *obj)
@@ -3388,9 +3412,14 @@ i915_gem_find_active_request(struct intel_engine_cs *engine);
 
 void i915_gem_retire_requests(struct drm_i915_private *dev_priv);
 
-static inline bool i915_reset_in_progress(struct i915_gpu_error *error)
+static inline bool i915_reset_backoff(struct i915_gpu_error *error)
+{
+	return unlikely(test_bit(I915_RESET_BACKOFF, &error->flags));
+}
+
+static inline bool i915_reset_handoff(struct i915_gpu_error *error)
 {
-	return unlikely(test_bit(I915_RESET_IN_PROGRESS, &error->flags));
+	return unlikely(test_bit(I915_RESET_HANDOFF, &error->flags));
 }
 
 static inline bool i915_terminally_wedged(struct i915_gpu_error *error)
@@ -3398,9 +3427,9 @@ static inline bool i915_terminally_wedged(struct i915_gpu_error *error)
 	return unlikely(test_bit(I915_WEDGED, &error->flags));
 }
 
-static inline bool i915_reset_in_progress_or_wedged(struct i915_gpu_error *error)
+static inline bool i915_reset_backoff_or_wedged(struct i915_gpu_error *error)
 {
-	return i915_reset_in_progress(error) | i915_terminally_wedged(error);
+	return i915_reset_backoff(error) | i915_terminally_wedged(error);
 }
 
 static inline u32 i915_reset_count(struct i915_gpu_error *error)
@@ -3412,6 +3441,7 @@ int i915_gem_reset_prepare(struct drm_i915_private *dev_priv);
 void i915_gem_reset(struct drm_i915_private *dev_priv);
 void i915_gem_reset_finish(struct drm_i915_private *dev_priv);
 void i915_gem_set_wedged(struct drm_i915_private *dev_priv);
+bool i915_gem_unset_wedged(struct drm_i915_private *dev_priv);
 
 void i915_gem_init_mmio(struct drm_i915_private *i915);
 int __must_check i915_gem_init(struct drm_i915_private *dev_priv);
@@ -3717,7 +3747,7 @@ static inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter)
 extern void intel_i2c_reset(struct drm_i915_private *dev_priv);
 
 /* intel_bios.c */
-int intel_bios_init(struct drm_i915_private *dev_priv);
+void intel_bios_init(struct drm_i915_private *dev_priv);
 bool intel_bios_is_valid_vbt(const void *buf, size_t size);
 bool intel_bios_is_tv_present(struct drm_i915_private *dev_priv);
 bool intel_bios_is_lvds_present(struct drm_i915_private *dev_priv, u8 *i2c_pin);
@@ -3880,6 +3910,8 @@ void vlv_phy_reset_lanes(struct intel_encoder *encoder);
 
 int intel_gpu_freq(struct drm_i915_private *dev_priv, int val);
 int intel_freq_opcode(struct drm_i915_private *dev_priv, int val);
+u64 intel_rc6_residency_us(struct drm_i915_private *dev_priv,
+			   const i915_reg_t reg);
 
 #define I915_READ8(reg)		dev_priv->uncore.funcs.mmio_readb(dev_priv, (reg), true)
 #define I915_WRITE8(reg, val)	dev_priv->uncore.funcs.mmio_writeb(dev_priv, (reg), (val), true)
@@ -4087,7 +4119,6 @@ __i915_request_irq_complete(const struct drm_i915_gem_request *req)
 	if (engine->irq_seqno_barrier &&
 	    test_and_clear_bit(ENGINE_IRQ_BREADCRUMB, &engine->irq_posted)) {
 		struct intel_breadcrumbs *b = &engine->breadcrumbs;
-		unsigned long flags;
 
 		/* The ordering of irq_posted versus applying the barrier
 		 * is crucial. The clearing of the current irq_posted must
@@ -4109,7 +4140,7 @@ __i915_request_irq_complete(const struct drm_i915_gem_request *req)
 		 * the seqno before we believe it coherent since they see
 		 * irq_posted == false but we are still running).
 		 */
-		spin_lock_irqsave(&b->irq_lock, flags);
+		spin_lock_irq(&b->irq_lock);
 		if (b->irq_wait && b->irq_wait->tsk != current)
 			/* Note that if the bottom-half is changed as we
 			 * are sending the wake-up, the new bottom-half will
@@ -4118,7 +4149,7 @@ __i915_request_irq_complete(const struct drm_i915_gem_request *req)
 			 * ourself.
 			 */
 			wake_up_process(b->irq_wait->tsk);
-		spin_unlock_irqrestore(&b->irq_lock, flags);
+		spin_unlock_irq(&b->irq_lock);
 
 		if (__i915_gem_request_completed(req, seqno))
 			return true;
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 7c20601fe1de..58e1db77d70e 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -103,16 +103,13 @@ i915_gem_wait_for_error(struct i915_gpu_error *error)
 
 	might_sleep();
 
-	if (!i915_reset_in_progress(error))
-		return 0;
-
 	/*
 	 * Only wait 10 seconds for the gpu reset to complete to avoid hanging
 	 * userspace. If it takes that long something really bad is going on and
 	 * we should simply try to bail out and fail as gracefully as possible.
 	 */
 	ret = wait_event_interruptible_timeout(error->reset_queue,
-					       !i915_reset_in_progress(error),
+					       !i915_reset_backoff(error),
 					       I915_RESET_TIMEOUT);
 	if (ret == 0) {
 		DRM_ERROR("Timed out waiting for the gpu reset to complete\n");
@@ -462,11 +459,16 @@ i915_gem_object_wait_reservation(struct reservation_object *resv,
 
 	dma_fence_put(excl);
 
+	/* Oportunistically prune the fences iff we know they have *all* been
+	 * signaled and that the reservation object has not been changed (i.e.
+	 * no new fences have been added).
+	 */
 	if (prune_fences && !__read_seqcount_retry(&resv->seq, seq)) {
-		reservation_object_lock(resv, NULL);
-		if (!__read_seqcount_retry(&resv->seq, seq))
-			reservation_object_add_excl_fence(resv, NULL);
-		reservation_object_unlock(resv);
+		if (reservation_object_trylock(resv)) {
+			if (!__read_seqcount_retry(&resv->seq, seq))
+				reservation_object_add_excl_fence(resv, NULL);
+			reservation_object_unlock(resv);
+		}
 	}
 
 	return timeout;
@@ -783,6 +785,15 @@ int i915_gem_obj_prepare_shmem_read(struct drm_i915_gem_object *obj,
 	if (ret)
 		return ret;
 
+	if (i915_gem_object_is_coherent(obj) ||
+	    !static_cpu_has(X86_FEATURE_CLFLUSH)) {
+		ret = i915_gem_object_set_to_cpu_domain(obj, false);
+		if (ret)
+			goto err_unpin;
+		else
+			goto out;
+	}
+
 	i915_gem_object_flush_gtt_write_domain(obj);
 
 	/* If we're not in the cpu read domain, set ourself into the gtt
@@ -791,16 +802,9 @@ int i915_gem_obj_prepare_shmem_read(struct drm_i915_gem_object *obj,
 	 * anyway again before the next pread happens.
 	 */
 	if (!(obj->base.read_domains & I915_GEM_DOMAIN_CPU))
-		*needs_clflush = !i915_gem_object_is_coherent(obj);
-
-	if (*needs_clflush && !static_cpu_has(X86_FEATURE_CLFLUSH)) {
-		ret = i915_gem_object_set_to_cpu_domain(obj, false);
-		if (ret)
-			goto err_unpin;
-
-		*needs_clflush = 0;
-	}
+		*needs_clflush = CLFLUSH_BEFORE;
 
+out:
 	/* return with the pages pinned */
 	return 0;
 
@@ -833,6 +837,15 @@ int i915_gem_obj_prepare_shmem_write(struct drm_i915_gem_object *obj,
 	if (ret)
 		return ret;
 
+	if (i915_gem_object_is_coherent(obj) ||
+	    !static_cpu_has(X86_FEATURE_CLFLUSH)) {
+		ret = i915_gem_object_set_to_cpu_domain(obj, true);
+		if (ret)
+			goto err_unpin;
+		else
+			goto out;
+	}
+
 	i915_gem_object_flush_gtt_write_domain(obj);
 
 	/* If we're not in the cpu write domain, set ourself into the
@@ -841,25 +854,15 @@ int i915_gem_obj_prepare_shmem_write(struct drm_i915_gem_object *obj,
 	 * right away and we therefore have to clflush anyway.
 	 */
 	if (obj->base.write_domain != I915_GEM_DOMAIN_CPU)
-		*needs_clflush |= cpu_write_needs_clflush(obj) << 1;
+		*needs_clflush |= CLFLUSH_AFTER;
 
 	/* Same trick applies to invalidate partially written cachelines read
 	 * before writing.
 	 */
 	if (!(obj->base.read_domains & I915_GEM_DOMAIN_CPU))
-		*needs_clflush |= !i915_gem_object_is_coherent(obj);
-
-	if (*needs_clflush && !static_cpu_has(X86_FEATURE_CLFLUSH)) {
-		ret = i915_gem_object_set_to_cpu_domain(obj, true);
-		if (ret)
-			goto err_unpin;
-
-		*needs_clflush = 0;
-	}
-
-	if ((*needs_clflush & CLFLUSH_AFTER) == 0)
-		obj->cache_dirty = true;
+		*needs_clflush |= CLFLUSH_BEFORE;
 
+out:
 	intel_fb_obj_invalidate(obj, ORIGIN_CPU);
 	obj->mm.dirty = true;
 	/* return with the pages pinned */
@@ -1452,6 +1455,12 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
 
 	trace_i915_gem_object_pwrite(obj, args->offset, args->size);
 
+	ret = -ENODEV;
+	if (obj->ops->pwrite)
+		ret = obj->ops->pwrite(obj, args);
+	if (ret != -ENODEV)
+		goto err;
+
 	ret = i915_gem_object_wait(obj,
 				   I915_WAIT_INTERRUPTIBLE |
 				   I915_WAIT_ALL,
@@ -2130,6 +2139,7 @@ i915_gem_object_truncate(struct drm_i915_gem_object *obj)
 	 */
 	shmem_truncate_range(file_inode(obj->base.filp), 0, (loff_t)-1);
 	obj->mm.madv = __I915_MADV_PURGED;
+	obj->mm.pages = ERR_PTR(-EFAULT);
 }
 
 /* Try to discard unwanted pages */
@@ -2229,7 +2239,9 @@ void __i915_gem_object_put_pages(struct drm_i915_gem_object *obj,
 
 	__i915_gem_object_reset_page_iter(obj);
 
-	obj->ops->put_pages(obj, pages);
+	if (!IS_ERR(pages))
+		obj->ops->put_pages(obj, pages);
+
 unlock:
 	mutex_unlock(&obj->mm.lock);
 }
@@ -2449,7 +2461,7 @@ int __i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
 	if (err)
 		return err;
 
-	if (unlikely(!obj->mm.pages)) {
+	if (unlikely(IS_ERR_OR_NULL(obj->mm.pages))) {
 		err = ____i915_gem_object_get_pages(obj);
 		if (err)
 			goto unlock;
@@ -2527,7 +2539,7 @@ void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj,
 
 	pinned = true;
 	if (!atomic_inc_not_zero(&obj->mm.pages_pin_count)) {
-		if (unlikely(!obj->mm.pages)) {
+		if (unlikely(IS_ERR_OR_NULL(obj->mm.pages))) {
 			ret = ____i915_gem_object_get_pages(obj);
 			if (ret)
 				goto err_unlock;
@@ -2575,6 +2587,75 @@ err_unlock:
 	goto out_unlock;
 }
 
+static int
+i915_gem_object_pwrite_gtt(struct drm_i915_gem_object *obj,
+			   const struct drm_i915_gem_pwrite *arg)
+{
+	struct address_space *mapping = obj->base.filp->f_mapping;
+	char __user *user_data = u64_to_user_ptr(arg->data_ptr);
+	u64 remain, offset;
+	unsigned int pg;
+
+	/* Before we instantiate/pin the backing store for our use, we
+	 * can prepopulate the shmemfs filp efficiently using a write into
+	 * the pagecache. We avoid the penalty of instantiating all the
+	 * pages, important if the user is just writing to a few and never
+	 * uses the object on the GPU, and using a direct write into shmemfs
+	 * allows it to avoid the cost of retrieving a page (either swapin
+	 * or clearing-before-use) before it is overwritten.
+	 */
+	if (READ_ONCE(obj->mm.pages))
+		return -ENODEV;
+
+	/* Before the pages are instantiated the object is treated as being
+	 * in the CPU domain. The pages will be clflushed as required before
+	 * use, and we can freely write into the pages directly. If userspace
+	 * races pwrite with any other operation; corruption will ensue -
+	 * that is userspace's prerogative!
+	 */
+
+	remain = arg->size;
+	offset = arg->offset;
+	pg = offset_in_page(offset);
+
+	do {
+		unsigned int len, unwritten;
+		struct page *page;
+		void *data, *vaddr;
+		int err;
+
+		len = PAGE_SIZE - pg;
+		if (len > remain)
+			len = remain;
+
+		err = pagecache_write_begin(obj->base.filp, mapping,
+					    offset, len, 0,
+					    &page, &data);
+		if (err < 0)
+			return err;
+
+		vaddr = kmap(page);
+		unwritten = copy_from_user(vaddr + pg, user_data, len);
+		kunmap(page);
+
+		err = pagecache_write_end(obj->base.filp, mapping,
+					  offset, len, len - unwritten,
+					  page, data);
+		if (err < 0)
+			return err;
+
+		if (unwritten)
+			return -EFAULT;
+
+		remain -= len;
+		user_data += len;
+		offset += len;
+		pg = 0;
+	} while (remain);
+
+	return 0;
+}
+
 static bool ban_context(const struct i915_gem_context *ctx)
 {
 	return (i915_gem_context_is_bannable(ctx) &&
@@ -2916,6 +2997,65 @@ void i915_gem_set_wedged(struct drm_i915_private *dev_priv)
 	mod_delayed_work(dev_priv->wq, &dev_priv->gt.idle_work, 0);
 }
 
+bool i915_gem_unset_wedged(struct drm_i915_private *i915)
+{
+	struct i915_gem_timeline *tl;
+	int i;
+
+	lockdep_assert_held(&i915->drm.struct_mutex);
+	if (!test_bit(I915_WEDGED, &i915->gpu_error.flags))
+		return true;
+
+	/* Before unwedging, make sure that all pending operations
+	 * are flushed and errored out - we may have requests waiting upon
+	 * third party fences. We marked all inflight requests as EIO, and
+	 * every execbuf since returned EIO, for consistency we want all
+	 * the currently pending requests to also be marked as EIO, which
+	 * is done inside our nop_submit_request - and so we must wait.
+	 *
+	 * No more can be submitted until we reset the wedged bit.
+	 */
+	list_for_each_entry(tl, &i915->gt.timelines, link) {
+		for (i = 0; i < ARRAY_SIZE(tl->engine); i++) {
+			struct drm_i915_gem_request *rq;
+
+			rq = i915_gem_active_peek(&tl->engine[i].last_request,
+						  &i915->drm.struct_mutex);
+			if (!rq)
+				continue;
+
+			/* We can't use our normal waiter as we want to
+			 * avoid recursively trying to handle the current
+			 * reset. The basic dma_fence_default_wait() installs
+			 * a callback for dma_fence_signal(), which is
+			 * triggered by our nop handler (indirectly, the
+			 * callback enables the signaler thread which is
+			 * woken by the nop_submit_request() advancing the seqno
+			 * and when the seqno passes the fence, the signaler
+			 * then signals the fence waking us up).
+			 */
+			if (dma_fence_default_wait(&rq->fence, true,
+						   MAX_SCHEDULE_TIMEOUT) < 0)
+				return false;
+		}
+	}
+
+	/* Undo nop_submit_request. We prevent all new i915 requests from
+	 * being queued (by disallowing execbuf whilst wedged) so having
+	 * waited for all active requests above, we know the system is idle
+	 * and do not have to worry about a thread being inside
+	 * engine->submit_request() as we swap over. So unlike installing
+	 * the nop_submit_request on reset, we can do this from normal
+	 * context and do not require stop_machine().
+	 */
+	intel_engines_reset_default_submission(i915);
+
+	smp_mb__before_atomic(); /* complete takeover before enabling execbuf */
+	clear_bit(I915_WEDGED, &i915->gpu_error.flags);
+
+	return true;
+}
+
 static void
 i915_gem_retire_work_handler(struct work_struct *work)
 {
@@ -3991,8 +4131,11 @@ void i915_gem_object_init(struct drm_i915_gem_object *obj,
 static const struct drm_i915_gem_object_ops i915_gem_object_ops = {
 	.flags = I915_GEM_OBJECT_HAS_STRUCT_PAGE |
 		 I915_GEM_OBJECT_IS_SHRINKABLE,
+
 	.get_pages = i915_gem_object_get_pages_gtt,
 	.put_pages = i915_gem_object_put_pages_gtt,
+
+	.pwrite = i915_gem_object_pwrite_gtt,
 };
 
 struct drm_i915_gem_object *
@@ -4454,7 +4597,7 @@ int i915_gem_init_hw(struct drm_i915_private *dev_priv)
 	intel_mocs_init_l3cc_table(dev_priv);
 
 	/* We can't enable contexts until all firmware is loaded */
-	ret = intel_guc_setup(dev_priv);
+	ret = intel_uc_init_hw(dev_priv);
 	if (ret)
 		goto out;
 
@@ -4810,38 +4953,49 @@ i915_gem_object_create_from_data(struct drm_i915_private *dev_priv,
 			         const void *data, size_t size)
 {
 	struct drm_i915_gem_object *obj;
-	struct sg_table *sg;
-	size_t bytes;
-	int ret;
+	struct file *file;
+	size_t offset;
+	int err;
 
 	obj = i915_gem_object_create(dev_priv, round_up(size, PAGE_SIZE));
 	if (IS_ERR(obj))
 		return obj;
 
-	ret = i915_gem_object_set_to_cpu_domain(obj, true);
-	if (ret)
-		goto fail;
+	GEM_BUG_ON(obj->base.write_domain != I915_GEM_DOMAIN_CPU);
 
-	ret = i915_gem_object_pin_pages(obj);
-	if (ret)
-		goto fail;
+	file = obj->base.filp;
+	offset = 0;
+	do {
+		unsigned int len = min_t(typeof(size), size, PAGE_SIZE);
+		struct page *page;
+		void *pgdata, *vaddr;
 
-	sg = obj->mm.pages;
-	bytes = sg_copy_from_buffer(sg->sgl, sg->nents, (void *)data, size);
-	obj->mm.dirty = true; /* Backing store is now out of date */
-	i915_gem_object_unpin_pages(obj);
+		err = pagecache_write_begin(file, file->f_mapping,
+					    offset, len, 0,
+					    &page, &pgdata);
+		if (err < 0)
+			goto fail;
 
-	if (WARN_ON(bytes != size)) {
-		DRM_ERROR("Incomplete copy, wrote %zu of %zu", bytes, size);
-		ret = -EFAULT;
-		goto fail;
-	}
+		vaddr = kmap(page);
+		memcpy(vaddr, data, len);
+		kunmap(page);
+
+		err = pagecache_write_end(file, file->f_mapping,
+					  offset, len, len,
+					  page, pgdata);
+		if (err < 0)
+			goto fail;
+
+		size -= len;
+		data += len;
+		offset += len;
+	} while (size);
 
 	return obj;
 
 fail:
 	i915_gem_object_put(obj);
-	return ERR_PTR(ret);
+	return ERR_PTR(err);
 }
 
 struct scatterlist *
diff --git a/drivers/gpu/drm/i915/i915_gem_batch_pool.c b/drivers/gpu/drm/i915/i915_gem_batch_pool.c
index 99ceae7855f8..41aa598c4f3b 100644
--- a/drivers/gpu/drm/i915/i915_gem_batch_pool.c
+++ b/drivers/gpu/drm/i915/i915_gem_batch_pool.c
@@ -96,8 +96,7 @@ struct drm_i915_gem_object *
 i915_gem_batch_pool_get(struct i915_gem_batch_pool *pool,
 			size_t size)
 {
-	struct drm_i915_gem_object *obj = NULL;
-	struct drm_i915_gem_object *tmp;
+	struct drm_i915_gem_object *obj;
 	struct list_head *list;
 	int n, ret;
 
@@ -112,31 +111,29 @@ i915_gem_batch_pool_get(struct i915_gem_batch_pool *pool,
 		n = ARRAY_SIZE(pool->cache_list) - 1;
 	list = &pool->cache_list[n];
 
-	list_for_each_entry(tmp, list, batch_pool_link) {
+	list_for_each_entry(obj, list, batch_pool_link) {
 		/* The batches are strictly LRU ordered */
-		if (i915_gem_object_is_active(tmp))
-			break;
+		if (i915_gem_object_is_active(obj)) {
+			if (!reservation_object_test_signaled_rcu(obj->resv,
+								  true))
+				break;
 
-		GEM_BUG_ON(!reservation_object_test_signaled_rcu(tmp->resv,
-								 true));
+			i915_gem_retire_requests(pool->engine->i915);
+			GEM_BUG_ON(i915_gem_object_is_active(obj));
+		}
 
-		if (tmp->base.size >= size) {
-			/* Clear the set of shared fences early */
-			reservation_object_lock(tmp->resv, NULL);
-			reservation_object_add_excl_fence(tmp->resv, NULL);
-			reservation_object_unlock(tmp->resv);
+		GEM_BUG_ON(!reservation_object_test_signaled_rcu(obj->resv,
+								 true));
 
-			obj = tmp;
-			break;
-		}
+		if (obj->base.size >= size)
+			goto found;
 	}
 
-	if (obj == NULL) {
-		obj = i915_gem_object_create_internal(pool->engine->i915, size);
-		if (IS_ERR(obj))
-			return obj;
-	}
+	obj = i915_gem_object_create_internal(pool->engine->i915, size);
+	if (IS_ERR(obj))
+		return obj;
 
+found:
 	ret = i915_gem_object_pin_pages(obj);
 	if (ret)
 		return ERR_PTR(ret);
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index baceca14f5e0..486051ed681d 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -318,7 +318,6 @@ __create_hw_context(struct drm_i915_private *dev_priv,
 	ctx->ring_size = 4 * PAGE_SIZE;
 	ctx->desc_template =
 		default_desc_template(dev_priv, dev_priv->mm.aliasing_ppgtt);
-	ATOMIC_INIT_NOTIFIER_HEAD(&ctx->status_notifier);
 
 	/* GuC requires the ring to be placed above GUC_WOPCM_TOP. If GuC is not
 	 * present or not in use we still need a small bias as ring wraparound
@@ -934,7 +933,7 @@ int i915_gem_switch_to_kernel_context(struct drm_i915_private *dev_priv)
 		}
 
 		ret = i915_switch_context(req);
-		i915_add_request_no_flush(req);
+		i915_add_request(req);
 		if (ret)
 			return ret;
 	}
diff --git a/drivers/gpu/drm/i915/i915_gem_context.h b/drivers/gpu/drm/i915/i915_gem_context.h
index 81268c9770a6..4af2ab94558b 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.h
+++ b/drivers/gpu/drm/i915/i915_gem_context.h
@@ -158,9 +158,6 @@ struct i915_gem_context {
 	/** desc_template: invariant fields for the HW context descriptor */
 	u32 desc_template;
 
-	/** status_notifier: list of callbacks for context-switch changes */
-	struct atomic_notifier_head status_notifier;
-
 	/** guilty_count: How many times this context has caused a GPU hang. */
 	unsigned int guilty_count;
 	/**
diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c
index a0de5734f7d0..2da3a94fc9f3 100644
--- a/drivers/gpu/drm/i915/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/i915_gem_evict.c
@@ -299,12 +299,12 @@ int i915_gem_evict_for_node(struct i915_address_space *vm,
 		 * those as well to make room for our guard pages.
 		 */
 		if (check_color) {
-			if (vma->node.start + vma->node.size == node->start) {
-				if (vma->node.color == node->color)
+			if (node->start + node->size == target->start) {
+				if (node->color == target->color)
 					continue;
 			}
-			if (vma->node.start == node->start + node->size) {
-				if (vma->node.color == node->color)
+			if (node->start == target->start + target->size) {
+				if (node->color == target->color)
 					continue;
 			}
 		}
diff --git a/drivers/gpu/drm/i915/i915_gem_fence_reg.c b/drivers/gpu/drm/i915/i915_gem_fence_reg.c
index fadbe8f4c745..5fe2cd8c8f28 100644
--- a/drivers/gpu/drm/i915/i915_gem_fence_reg.c
+++ b/drivers/gpu/drm/i915/i915_gem_fence_reg.c
@@ -248,7 +248,14 @@ static int fence_update(struct drm_i915_fence_reg *fence,
 		list_move(&fence->link, &fence->i915->mm.fence_list);
 	}
 
-	fence_write(fence, vma);
+	/* We only need to update the register itself if the device is awake.
+	 * If the device is currently powered down, we will defer the write
+	 * to the runtime resume, see i915_gem_restore_fences().
+	 */
+	if (intel_runtime_pm_get_if_in_use(fence->i915)) {
+		fence_write(fence, vma);
+		intel_runtime_pm_put(fence->i915);
+	}
 
 	if (vma) {
 		if (fence->vma != vma) {
@@ -278,8 +285,6 @@ i915_vma_put_fence(struct i915_vma *vma)
 {
 	struct drm_i915_fence_reg *fence = vma->fence;
 
-	assert_rpm_wakelock_held(vma->vm->i915);
-
 	if (!fence)
 		return 0;
 
diff --git a/drivers/gpu/drm/i915/i915_gem_object.h b/drivers/gpu/drm/i915/i915_gem_object.h
index 33b0dc4782a9..174cf923c236 100644
--- a/drivers/gpu/drm/i915/i915_gem_object.h
+++ b/drivers/gpu/drm/i915/i915_gem_object.h
@@ -56,6 +56,9 @@ struct drm_i915_gem_object_ops {
 	struct sg_table *(*get_pages)(struct drm_i915_gem_object *);
 	void (*put_pages)(struct drm_i915_gem_object *, struct sg_table *);
 
+	int (*pwrite)(struct drm_i915_gem_object *,
+		      const struct drm_i915_gem_pwrite *);
+
 	int (*dmabuf_export)(struct drm_i915_gem_object *);
 	void (*release)(struct drm_i915_gem_object *);
 };
@@ -271,12 +274,6 @@ static inline void i915_gem_object_unlock(struct drm_i915_gem_object *obj)
 }
 
 static inline bool
-i915_gem_object_is_dead(const struct drm_i915_gem_object *obj)
-{
-	return kref_read(&obj->base.refcount) == 0;
-}
-
-static inline bool
 i915_gem_object_has_struct_page(const struct drm_i915_gem_object *obj)
 {
 	return obj->ops->flags & I915_GEM_OBJECT_HAS_STRUCT_PAGE;
diff --git a/drivers/gpu/drm/i915/i915_gem_request.c b/drivers/gpu/drm/i915/i915_gem_request.c
index 1e1d9f2072cd..0e8d1010cecb 100644
--- a/drivers/gpu/drm/i915/i915_gem_request.c
+++ b/drivers/gpu/drm/i915/i915_gem_request.c
@@ -1012,7 +1012,7 @@ bool __i915_spin_request(const struct drm_i915_gem_request *req,
 
 static bool __i915_wait_request_check_and_reset(struct drm_i915_gem_request *request)
 {
-	if (likely(!i915_reset_in_progress(&request->i915->gpu_error)))
+	if (likely(!i915_reset_handoff(&request->i915->gpu_error)))
 		return false;
 
 	__set_current_state(TASK_RUNNING);
diff --git a/drivers/gpu/drm/i915/i915_gem_request.h b/drivers/gpu/drm/i915/i915_gem_request.h
index 5018e55922f0..a211c53c813f 100644
--- a/drivers/gpu/drm/i915/i915_gem_request.h
+++ b/drivers/gpu/drm/i915/i915_gem_request.h
@@ -267,8 +267,6 @@ int i915_gem_request_await_dma_fence(struct drm_i915_gem_request *req,
 
 void __i915_add_request(struct drm_i915_gem_request *req, bool flush_caches);
 #define i915_add_request(req) \
-	__i915_add_request(req, true)
-#define i915_add_request_no_flush(req) \
 	__i915_add_request(req, false)
 
 void __i915_gem_request_submit(struct drm_i915_gem_request *request);
@@ -348,6 +346,9 @@ static inline bool i915_spin_request(const struct drm_i915_gem_request *request,
 	u32 seqno;
 
 	seqno = i915_gem_request_global_seqno(request);
+	if (!seqno)
+		return 0;
+
 	return (__i915_gem_request_started(request, seqno) &&
 		__i915_spin_request(request, seqno, state, timeout_us));
 }
diff --git a/drivers/gpu/drm/i915/i915_gem_shrinker.c b/drivers/gpu/drm/i915/i915_gem_shrinker.c
index 006a8b908f77..2978acdd995e 100644
--- a/drivers/gpu/drm/i915/i915_gem_shrinker.c
+++ b/drivers/gpu/drm/i915/i915_gem_shrinker.c
@@ -266,7 +266,7 @@ unsigned long i915_gem_shrink_all(struct drm_i915_private *dev_priv)
 				I915_SHRINK_ACTIVE);
 	intel_runtime_pm_put(dev_priv);
 
-	rcu_barrier(); /* wait until our RCU delayed slab frees are completed */
+	synchronize_rcu(); /* wait for our earlier RCU delayed slab frees */
 
 	return freed;
 }
diff --git a/drivers/gpu/drm/i915/i915_gem_userptr.c b/drivers/gpu/drm/i915/i915_gem_userptr.c
index 22b46398831e..58ccf8b8ca1c 100644
--- a/drivers/gpu/drm/i915/i915_gem_userptr.c
+++ b/drivers/gpu/drm/i915/i915_gem_userptr.c
@@ -66,13 +66,18 @@ static void cancel_userptr(struct work_struct *work)
 {
 	struct i915_mmu_object *mo = container_of(work, typeof(*mo), work);
 	struct drm_i915_gem_object *obj = mo->obj;
-	struct drm_device *dev = obj->base.dev;
+	struct work_struct *active;
+
+	/* Cancel any active worker and force us to re-evaluate gup */
+	mutex_lock(&obj->mm.lock);
+	active = fetch_and_zero(&obj->userptr.work);
+	mutex_unlock(&obj->mm.lock);
+	if (active)
+		goto out;
 
 	i915_gem_object_wait(obj, I915_WAIT_ALL, MAX_SCHEDULE_TIMEOUT, NULL);
 
-	mutex_lock(&dev->struct_mutex);
-	/* Cancel any active worker and force us to re-evaluate gup */
-	obj->userptr.work = NULL;
+	mutex_lock(&obj->base.dev->struct_mutex);
 
 	/* We are inside a kthread context and can't be interrupted */
 	if (i915_gem_object_unbind(obj) == 0)
@@ -83,8 +88,10 @@ static void cancel_userptr(struct work_struct *work)
 		  atomic_read(&obj->mm.pages_pin_count),
 		  obj->pin_display);
 
+	mutex_unlock(&obj->base.dev->struct_mutex);
+
+out:
 	i915_gem_object_put(obj);
-	mutex_unlock(&dev->struct_mutex);
 }
 
 static void add_object(struct i915_mmu_object *mo)
@@ -145,7 +152,8 @@ static void i915_gem_userptr_mn_invalidate_range_start(struct mmu_notifier *_mn,
 		del_object(mo);
 	spin_unlock(&mn->lock);
 
-	flush_workqueue(mn->wq);
+	if (!list_empty(&cancelled))
+		flush_workqueue(mn->wq);
 }
 
 static const struct mmu_notifier_ops i915_gem_userptr_notifier = {
@@ -541,6 +549,8 @@ __i915_gem_userptr_get_pages_worker(struct work_struct *_work)
 		}
 
 		obj->userptr.work = ERR_CAST(pages);
+		if (IS_ERR(pages))
+			__i915_gem_userptr_set_active(obj, false);
 	}
 	mutex_unlock(&obj->mm.lock);
 
@@ -553,8 +563,7 @@ __i915_gem_userptr_get_pages_worker(struct work_struct *_work)
 }
 
 static struct sg_table *
-__i915_gem_userptr_get_pages_schedule(struct drm_i915_gem_object *obj,
-				      bool *active)
+__i915_gem_userptr_get_pages_schedule(struct drm_i915_gem_object *obj)
 {
 	struct get_pages_work *work;
 
@@ -591,7 +600,6 @@ __i915_gem_userptr_get_pages_schedule(struct drm_i915_gem_object *obj,
 	INIT_WORK(&work->work, __i915_gem_userptr_get_pages_worker);
 	schedule_work(&work->work);
 
-	*active = true;
 	return ERR_PTR(-EAGAIN);
 }
 
@@ -599,10 +607,11 @@ static struct sg_table *
 i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj)
 {
 	const int num_pages = obj->base.size >> PAGE_SHIFT;
+	struct mm_struct *mm = obj->userptr.mm->mm;
 	struct page **pvec;
 	struct sg_table *pages;
-	int pinned, ret;
 	bool active;
+	int pinned;
 
 	/* If userspace should engineer that these pages are replaced in
 	 * the vma between us binding this page into the GTT and completion
@@ -629,37 +638,39 @@ i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj)
 			return ERR_PTR(-EAGAIN);
 	}
 
-	/* Let the mmu-notifier know that we have begun and need cancellation */
-	ret = __i915_gem_userptr_set_active(obj, true);
-	if (ret)
-		return ERR_PTR(ret);
-
 	pvec = NULL;
 	pinned = 0;
-	if (obj->userptr.mm->mm == current->mm) {
-		pvec = drm_malloc_gfp(num_pages, sizeof(struct page *),
-				      GFP_TEMPORARY);
-		if (pvec == NULL) {
-			__i915_gem_userptr_set_active(obj, false);
-			return ERR_PTR(-ENOMEM);
-		}
 
-		pinned = __get_user_pages_fast(obj->userptr.ptr, num_pages,
-					       !obj->userptr.read_only, pvec);
+	if (mm == current->mm) {
+		pvec = drm_malloc_gfp(num_pages, sizeof(struct page *),
+				      GFP_TEMPORARY |
+				      __GFP_NORETRY |
+				      __GFP_NOWARN);
+		if (pvec) /* defer to worker if malloc fails */
+			pinned = __get_user_pages_fast(obj->userptr.ptr,
+						       num_pages,
+						       !obj->userptr.read_only,
+						       pvec);
 	}
 
 	active = false;
-	if (pinned < 0)
-		pages = ERR_PTR(pinned), pinned = 0;
-	else if (pinned < num_pages)
-		pages = __i915_gem_userptr_get_pages_schedule(obj, &active);
-	else
+	if (pinned < 0) {
+		pages = ERR_PTR(pinned);
+		pinned = 0;
+	} else if (pinned < num_pages) {
+		pages = __i915_gem_userptr_get_pages_schedule(obj);
+		active = pages == ERR_PTR(-EAGAIN);
+	} else {
 		pages = __i915_gem_userptr_set_pages(obj, pvec, num_pages);
-	if (IS_ERR(pages)) {
-		__i915_gem_userptr_set_active(obj, active);
-		release_pages(pvec, pinned, 0);
+		active = !IS_ERR(pages);
 	}
+	if (active)
+		__i915_gem_userptr_set_active(obj, true);
+
+	if (IS_ERR(pages))
+		release_pages(pvec, pinned, 0);
 	drm_free_large(pvec);
+
 	return pages;
 }
 
diff --git a/drivers/gpu/drm/i915/i915_guc_submission.c b/drivers/gpu/drm/i915/i915_guc_submission.c
index beb38e30d0e9..832ac9e45801 100644
--- a/drivers/gpu/drm/i915/i915_guc_submission.c
+++ b/drivers/gpu/drm/i915/i915_guc_submission.c
@@ -25,6 +25,8 @@
 #include "i915_drv.h"
 #include "intel_uc.h"
 
+#include <trace/events/dma_fence.h>
+
 /**
  * DOC: GuC-based command submission
  *
@@ -522,8 +524,6 @@ static void __i915_guc_submit(struct drm_i915_gem_request *rq)
 	if (i915_vma_is_map_and_fenceable(rq->ring->vma))
 		POSTING_READ_FW(GUC_STATUS);
 
-	trace_i915_gem_request_in(rq, 0);
-
 	spin_lock_irqsave(&client->wq_lock, flags);
 
 	guc_wq_item_append(client, rq);
@@ -542,10 +542,111 @@ static void __i915_guc_submit(struct drm_i915_gem_request *rq)
 
 static void i915_guc_submit(struct drm_i915_gem_request *rq)
 {
-	i915_gem_request_submit(rq);
+	__i915_gem_request_submit(rq);
 	__i915_guc_submit(rq);
 }
 
+static void nested_enable_signaling(struct drm_i915_gem_request *rq)
+{
+	/* If we use dma_fence_enable_sw_signaling() directly, lockdep
+	 * detects an ordering issue between the fence lockclass and the
+	 * global_timeline. This circular dependency can only occur via 2
+	 * different fences (but same fence lockclass), so we use the nesting
+	 * annotation here to prevent the warn, equivalent to the nesting
+	 * inside i915_gem_request_submit() for when we also enable the
+	 * signaler.
+	 */
+
+	if (test_and_set_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT,
+			     &rq->fence.flags))
+		return;
+
+	GEM_BUG_ON(test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &rq->fence.flags));
+	trace_dma_fence_enable_signal(&rq->fence);
+
+	spin_lock_nested(&rq->lock, SINGLE_DEPTH_NESTING);
+	intel_engine_enable_signaling(rq);
+	spin_unlock(&rq->lock);
+}
+
+static bool i915_guc_dequeue(struct intel_engine_cs *engine)
+{
+	struct execlist_port *port = engine->execlist_port;
+	struct drm_i915_gem_request *last = port[0].request;
+	unsigned long flags;
+	struct rb_node *rb;
+	bool submit = false;
+
+	/* After execlist_first is updated, the tasklet will be rescheduled.
+	 *
+	 * If we are currently running (inside the tasklet) and a third
+	 * party queues a request and so updates engine->execlist_first under
+	 * the spinlock (which we have elided), it will atomically set the
+	 * TASKLET_SCHED flag causing the us to be re-executed and pick up
+	 * the change in state (the update to TASKLET_SCHED incurs a memory
+	 * barrier making this cross-cpu checking safe).
+	 */
+	if (!READ_ONCE(engine->execlist_first))
+		return false;
+
+	spin_lock_irqsave(&engine->timeline->lock, flags);
+	rb = engine->execlist_first;
+	while (rb) {
+		struct drm_i915_gem_request *rq =
+			rb_entry(rb, typeof(*rq), priotree.node);
+
+		if (last && rq->ctx != last->ctx) {
+			if (port != engine->execlist_port)
+				break;
+
+			i915_gem_request_assign(&port->request, last);
+			nested_enable_signaling(last);
+			port++;
+		}
+
+		rb = rb_next(rb);
+		rb_erase(&rq->priotree.node, &engine->execlist_queue);
+		RB_CLEAR_NODE(&rq->priotree.node);
+		rq->priotree.priority = INT_MAX;
+
+		trace_i915_gem_request_in(rq, port - engine->execlist_port);
+		i915_guc_submit(rq);
+		last = rq;
+		submit = true;
+	}
+	if (submit) {
+		i915_gem_request_assign(&port->request, last);
+		nested_enable_signaling(last);
+		engine->execlist_first = rb;
+	}
+	spin_unlock_irqrestore(&engine->timeline->lock, flags);
+
+	return submit;
+}
+
+static void i915_guc_irq_handler(unsigned long data)
+{
+	struct intel_engine_cs *engine = (struct intel_engine_cs *)data;
+	struct execlist_port *port = engine->execlist_port;
+	struct drm_i915_gem_request *rq;
+	bool submit;
+
+	do {
+		rq = port[0].request;
+		while (rq && i915_gem_request_completed(rq)) {
+			trace_i915_gem_request_out(rq);
+			i915_gem_request_put(rq);
+			port[0].request = port[1].request;
+			port[1].request = NULL;
+			rq = port[0].request;
+		}
+
+		submit = false;
+		if (!port[1].request)
+			submit = i915_guc_dequeue(engine);
+	} while (submit);
+}
+
 /*
  * Everything below here is concerned with setup & teardown, and is
  * therefore not part of the somewhat time-critical batch-submission
@@ -810,22 +911,21 @@ static void guc_addon_create(struct intel_guc *guc)
 {
 	struct drm_i915_private *dev_priv = guc_to_i915(guc);
 	struct i915_vma *vma;
-	struct guc_ads *ads;
-	struct guc_policies *policies;
-	struct guc_mmio_reg_state *reg_state;
-	struct intel_engine_cs *engine;
-	enum intel_engine_id id;
 	struct page *page;
-	u32 size;
-
 	/* The ads obj includes the struct itself and buffers passed to GuC */
-	size = sizeof(struct guc_ads) + sizeof(struct guc_policies) +
-			sizeof(struct guc_mmio_reg_state) +
-			GUC_S3_SAVE_SPACE_PAGES * PAGE_SIZE;
+	struct {
+		struct guc_ads ads;
+		struct guc_policies policies;
+		struct guc_mmio_reg_state reg_state;
+		u8 reg_state_buffer[GUC_S3_SAVE_SPACE_PAGES * PAGE_SIZE];
+	} __packed *blob;
+	struct intel_engine_cs *engine;
+	enum intel_engine_id id;
+	u32 base;
 
 	vma = guc->ads_vma;
 	if (!vma) {
-		vma = intel_guc_allocate_vma(guc, PAGE_ALIGN(size));
+		vma = intel_guc_allocate_vma(guc, PAGE_ALIGN(sizeof(*blob)));
 		if (IS_ERR(vma))
 			return;
 
@@ -833,44 +933,38 @@ static void guc_addon_create(struct intel_guc *guc)
 	}
 
 	page = i915_vma_first_page(vma);
-	ads = kmap(page);
-
-	/*
-	 * The GuC requires a "Golden Context" when it reinitialises
-	 * engines after a reset. Here we use the Render ring default
-	 * context, which must already exist and be pinned in the GGTT,
-	 * so its address won't change after we've told the GuC where
-	 * to find it.
-	 */
-	engine = dev_priv->engine[RCS];
-	ads->golden_context_lrca = engine->status_page.ggtt_offset;
-
-	for_each_engine(engine, dev_priv, id)
-		ads->eng_state_size[engine->guc_id] = intel_lr_context_size(engine);
+	blob = kmap(page);
 
 	/* GuC scheduling policies */
-	policies = (void *)ads + sizeof(struct guc_ads);
-	guc_policies_init(policies);
-
-	ads->scheduler_policies =
-		guc_ggtt_offset(vma) + sizeof(struct guc_ads);
+	guc_policies_init(&blob->policies);
 
 	/* MMIO reg state */
-	reg_state = (void *)policies + sizeof(struct guc_policies);
-
 	for_each_engine(engine, dev_priv, id) {
-		reg_state->mmio_white_list[engine->guc_id].mmio_start =
+		blob->reg_state.mmio_white_list[engine->guc_id].mmio_start =
 			engine->mmio_base + GUC_MMIO_WHITE_LIST_START;
 
 		/* Nothing to be saved or restored for now. */
-		reg_state->mmio_white_list[engine->guc_id].count = 0;
+		blob->reg_state.mmio_white_list[engine->guc_id].count = 0;
 	}
 
-	ads->reg_state_addr = ads->scheduler_policies +
-			sizeof(struct guc_policies);
+	/*
+	 * The GuC requires a "Golden Context" when it reinitialises
+	 * engines after a reset. Here we use the Render ring default
+	 * context, which must already exist and be pinned in the GGTT,
+	 * so its address won't change after we've told the GuC where
+	 * to find it.
+	 */
+	blob->ads.golden_context_lrca =
+		dev_priv->engine[RCS]->status_page.ggtt_offset;
 
-	ads->reg_state_buffer = ads->reg_state_addr +
-			sizeof(struct guc_mmio_reg_state);
+	for_each_engine(engine, dev_priv, id)
+		blob->ads.eng_state_size[engine->guc_id] =
+			intel_lr_context_size(engine);
+
+	base = guc_ggtt_offset(vma);
+	blob->ads.scheduler_policies = base + ptr_offset(blob, policies);
+	blob->ads.reg_state_buffer = base + ptr_offset(blob, reg_state_buffer);
+	blob->ads.reg_state_addr = base + ptr_offset(blob, reg_state);
 
 	kunmap(page);
 }
@@ -936,6 +1030,48 @@ static void guc_reset_wq(struct i915_guc_client *client)
 	client->wq_tail = 0;
 }
 
+static void guc_interrupts_capture(struct drm_i915_private *dev_priv)
+{
+	struct intel_engine_cs *engine;
+	enum intel_engine_id id;
+	int irqs;
+
+	/* tell all command streamers to forward interrupts (but not vblank) to GuC */
+	irqs = _MASKED_BIT_ENABLE(GFX_INTERRUPT_STEERING);
+	for_each_engine(engine, dev_priv, id)
+		I915_WRITE(RING_MODE_GEN7(engine), irqs);
+
+	/* route USER_INTERRUPT to Host, all others are sent to GuC. */
+	irqs = GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
+	       GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT;
+	/* These three registers have the same bit definitions */
+	I915_WRITE(GUC_BCS_RCS_IER, ~irqs);
+	I915_WRITE(GUC_VCS2_VCS1_IER, ~irqs);
+	I915_WRITE(GUC_WD_VECS_IER, ~irqs);
+
+	/*
+	 * The REDIRECT_TO_GUC bit of the PMINTRMSK register directs all
+	 * (unmasked) PM interrupts to the GuC. All other bits of this
+	 * register *disable* generation of a specific interrupt.
+	 *
+	 * 'pm_intrmsk_mbz' indicates bits that are NOT to be set when
+	 * writing to the PM interrupt mask register, i.e. interrupts
+	 * that must not be disabled.
+	 *
+	 * If the GuC is handling these interrupts, then we must not let
+	 * the PM code disable ANY interrupt that the GuC is expecting.
+	 * So for each ENABLED (0) bit in this register, we must SET the
+	 * bit in pm_intrmsk_mbz so that it's left enabled for the GuC.
+	 * GuC needs ARAT expired interrupt unmasked hence it is set in
+	 * pm_intrmsk_mbz.
+	 *
+	 * Here we CLEAR REDIRECT_TO_GUC bit in pm_intrmsk_mbz, which will
+	 * result in the register bit being left SET!
+	 */
+	dev_priv->rps.pm_intrmsk_mbz |= ARAT_EXPIRED_INTRMSK;
+	dev_priv->rps.pm_intrmsk_mbz &= ~GEN8_PMINTR_DISABLE_REDIRECT_TO_GUC;
+}
+
 int i915_guc_submission_enable(struct drm_i915_private *dev_priv)
 {
 	struct intel_guc *guc = &dev_priv->guc;
@@ -952,12 +1088,19 @@ int i915_guc_submission_enable(struct drm_i915_private *dev_priv)
 	guc_init_doorbell_hw(guc);
 
 	/* Take over from manual control of ELSP (execlists) */
+	guc_interrupts_capture(dev_priv);
+
 	for_each_engine(engine, dev_priv, id) {
 		const int wqi_size = sizeof(struct guc_wq_item);
 		struct drm_i915_gem_request *rq;
 
-		engine->submit_request = i915_guc_submit;
-		engine->schedule = NULL;
+		/* The tasklet was initialised by execlists, and may be in
+		 * a state of flux (across a reset) and so we just want to
+		 * take over the callback without changing any other state
+		 * in the tasklet.
+		 */
+		engine->irq_tasklet.func = i915_guc_irq_handler;
+		clear_bit(ENGINE_IRQ_EXECLIST, &engine->irq_posted);
 
 		/* Replay the current set of previously submitted requests */
 		spin_lock_irq(&engine->timeline->lock);
@@ -971,15 +1114,41 @@ int i915_guc_submission_enable(struct drm_i915_private *dev_priv)
 	return 0;
 }
 
+static void guc_interrupts_release(struct drm_i915_private *dev_priv)
+{
+	struct intel_engine_cs *engine;
+	enum intel_engine_id id;
+	int irqs;
+
+	/*
+	 * tell all command streamers NOT to forward interrupts or vblank
+	 * to GuC.
+	 */
+	irqs = _MASKED_FIELD(GFX_FORWARD_VBLANK_MASK, GFX_FORWARD_VBLANK_NEVER);
+	irqs |= _MASKED_BIT_DISABLE(GFX_INTERRUPT_STEERING);
+	for_each_engine(engine, dev_priv, id)
+		I915_WRITE(RING_MODE_GEN7(engine), irqs);
+
+	/* route all GT interrupts to the host */
+	I915_WRITE(GUC_BCS_RCS_IER, 0);
+	I915_WRITE(GUC_VCS2_VCS1_IER, 0);
+	I915_WRITE(GUC_WD_VECS_IER, 0);
+
+	dev_priv->rps.pm_intrmsk_mbz |= GEN8_PMINTR_DISABLE_REDIRECT_TO_GUC;
+	dev_priv->rps.pm_intrmsk_mbz &= ~ARAT_EXPIRED_INTRMSK;
+}
+
 void i915_guc_submission_disable(struct drm_i915_private *dev_priv)
 {
 	struct intel_guc *guc = &dev_priv->guc;
 
+	guc_interrupts_release(dev_priv);
+
 	if (!guc->execbuf_client)
 		return;
 
 	/* Revert back to manual ELSP submission */
-	intel_execlists_enable_submission(dev_priv);
+	intel_engines_reset_default_submission(dev_priv);
 }
 
 void i915_guc_submission_fini(struct drm_i915_private *dev_priv)
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index df95733cf112..cb20c9408b12 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -389,11 +389,6 @@ void gen6_enable_rps_interrupts(struct drm_i915_private *dev_priv)
 	spin_unlock_irq(&dev_priv->irq_lock);
 }
 
-u32 gen6_sanitize_rps_pm_mask(struct drm_i915_private *dev_priv, u32 mask)
-{
-	return (mask & ~dev_priv->rps.pm_intr_keep);
-}
-
 void gen6_disable_rps_interrupts(struct drm_i915_private *dev_priv)
 {
 	if (!READ_ONCE(dev_priv->rps.interrupts_enabled))
@@ -728,6 +723,7 @@ static u32 i915_get_vblank_counter(struct drm_device *dev, unsigned int pipe)
 	struct intel_crtc *intel_crtc = intel_get_crtc_for_pipe(dev_priv,
 								pipe);
 	const struct drm_display_mode *mode = &intel_crtc->base.hwmode;
+	unsigned long irqflags;
 
 	htotal = mode->crtc_htotal;
 	hsync_start = mode->crtc_hsync_start;
@@ -744,17 +740,21 @@ static u32 i915_get_vblank_counter(struct drm_device *dev, unsigned int pipe)
 	high_frame = PIPEFRAME(pipe);
 	low_frame = PIPEFRAMEPIXEL(pipe);
 
+	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
+
 	/*
 	 * High & low register fields aren't synchronized, so make sure
 	 * we get a low value that's stable across two reads of the high
 	 * register.
 	 */
 	do {
-		high1 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
-		low   = I915_READ(low_frame);
-		high2 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
+		high1 = I915_READ_FW(high_frame) & PIPE_FRAME_HIGH_MASK;
+		low   = I915_READ_FW(low_frame);
+		high2 = I915_READ_FW(high_frame) & PIPE_FRAME_HIGH_MASK;
 	} while (high1 != high2);
 
+	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
+
 	high1 >>= PIPE_FRAME_HIGH_SHIFT;
 	pixel = low & PIPE_PIXEL_MASK;
 	low >>= PIPE_FRAME_LOW_SHIFT;
@@ -812,8 +812,7 @@ static int __intel_get_crtc_scanline(struct intel_crtc *crtc)
 
 		for (i = 0; i < 100; i++) {
 			udelay(1);
-			temp = __raw_i915_read32(dev_priv, PIPEDSL(pipe)) &
-				DSL_LINEMASK_GEN3;
+			temp = I915_READ_FW(PIPEDSL(pipe)) & DSL_LINEMASK_GEN3;
 			if (temp != position) {
 				position = temp;
 				break;
@@ -1057,7 +1056,9 @@ static void notify_ring(struct intel_engine_cs *engine)
 		 * and many waiters.
 		 */
 		if (i915_seqno_passed(intel_engine_get_seqno(engine),
-				      wait->seqno))
+				      wait->seqno) &&
+		    !test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
+			      &wait->request->fence.flags))
 			rq = i915_gem_request_get(wait->request);
 
 		wake_up_process(wait->tsk);
@@ -1077,73 +1078,51 @@ static void notify_ring(struct intel_engine_cs *engine)
 static void vlv_c0_read(struct drm_i915_private *dev_priv,
 			struct intel_rps_ei *ei)
 {
-	ei->cz_clock = vlv_punit_read(dev_priv, PUNIT_REG_CZ_TIMESTAMP);
+	ei->ktime = ktime_get_raw();
 	ei->render_c0 = I915_READ(VLV_RENDER_C0_COUNT);
 	ei->media_c0 = I915_READ(VLV_MEDIA_C0_COUNT);
 }
 
-static bool vlv_c0_above(struct drm_i915_private *dev_priv,
-			 const struct intel_rps_ei *old,
-			 const struct intel_rps_ei *now,
-			 int threshold)
-{
-	u64 time, c0;
-	unsigned int mul = 100;
-
-	if (old->cz_clock == 0)
-		return false;
-
-	if (I915_READ(VLV_COUNTER_CONTROL) & VLV_COUNT_RANGE_HIGH)
-		mul <<= 8;
-
-	time = now->cz_clock - old->cz_clock;
-	time *= threshold * dev_priv->czclk_freq;
-
-	/* Workload can be split between render + media, e.g. SwapBuffers
-	 * being blitted in X after being rendered in mesa. To account for
-	 * this we need to combine both engines into our activity counter.
-	 */
-	c0 = now->render_c0 - old->render_c0;
-	c0 += now->media_c0 - old->media_c0;
-	c0 *= mul * VLV_CZ_CLOCK_TO_MILLI_SEC;
-
-	return c0 >= time;
-}
-
 void gen6_rps_reset_ei(struct drm_i915_private *dev_priv)
 {
-	vlv_c0_read(dev_priv, &dev_priv->rps.down_ei);
-	dev_priv->rps.up_ei = dev_priv->rps.down_ei;
+	memset(&dev_priv->rps.ei, 0, sizeof(dev_priv->rps.ei));
 }
 
 static u32 vlv_wa_c0_ei(struct drm_i915_private *dev_priv, u32 pm_iir)
 {
+	const struct intel_rps_ei *prev = &dev_priv->rps.ei;
 	struct intel_rps_ei now;
 	u32 events = 0;
 
-	if ((pm_iir & (GEN6_PM_RP_DOWN_EI_EXPIRED | GEN6_PM_RP_UP_EI_EXPIRED)) == 0)
+	if ((pm_iir & GEN6_PM_RP_UP_EI_EXPIRED) == 0)
 		return 0;
 
 	vlv_c0_read(dev_priv, &now);
-	if (now.cz_clock == 0)
-		return 0;
 
-	if (pm_iir & GEN6_PM_RP_DOWN_EI_EXPIRED) {
-		if (!vlv_c0_above(dev_priv,
-				  &dev_priv->rps.down_ei, &now,
-				  dev_priv->rps.down_threshold))
-			events |= GEN6_PM_RP_DOWN_THRESHOLD;
-		dev_priv->rps.down_ei = now;
-	}
+	if (prev->ktime) {
+		u64 time, c0;
+		u32 render, media;
 
-	if (pm_iir & GEN6_PM_RP_UP_EI_EXPIRED) {
-		if (vlv_c0_above(dev_priv,
-				 &dev_priv->rps.up_ei, &now,
-				 dev_priv->rps.up_threshold))
-			events |= GEN6_PM_RP_UP_THRESHOLD;
-		dev_priv->rps.up_ei = now;
+		time = ktime_us_delta(now.ktime, prev->ktime);
+		time *= dev_priv->czclk_freq;
+
+		/* Workload can be split between render + media,
+		 * e.g. SwapBuffers being blitted in X after being rendered in
+		 * mesa. To account for this we need to combine both engines
+		 * into our activity counter.
+		 */
+		render = now.render_c0 - prev->render_c0;
+		media = now.media_c0 - prev->media_c0;
+		c0 = max(render, media);
+		c0 *= 1000 * 100 << 8; /* to usecs and scale to threshold% */
+
+		if (c0 > time * dev_priv->rps.up_threshold)
+			events = GEN6_PM_RP_UP_THRESHOLD;
+		else if (c0 < time * dev_priv->rps.down_threshold)
+			events = GEN6_PM_RP_DOWN_THRESHOLD;
 	}
 
+	dev_priv->rps.ei = now;
 	return events;
 }
 
@@ -1163,30 +1142,21 @@ static void gen6_pm_rps_work(struct work_struct *work)
 {
 	struct drm_i915_private *dev_priv =
 		container_of(work, struct drm_i915_private, rps.work);
-	bool client_boost;
+	bool client_boost = false;
 	int new_delay, adj, min, max;
-	u32 pm_iir;
+	u32 pm_iir = 0;
 
 	spin_lock_irq(&dev_priv->irq_lock);
-	/* Speed up work cancelation during disabling rps interrupts. */
-	if (!dev_priv->rps.interrupts_enabled) {
-		spin_unlock_irq(&dev_priv->irq_lock);
-		return;
+	if (dev_priv->rps.interrupts_enabled) {
+		pm_iir = fetch_and_zero(&dev_priv->rps.pm_iir);
+		client_boost = fetch_and_zero(&dev_priv->rps.client_boost);
 	}
-
-	pm_iir = dev_priv->rps.pm_iir;
-	dev_priv->rps.pm_iir = 0;
-	/* Make sure not to corrupt PMIMR state used by ringbuffer on GEN6 */
-	gen6_unmask_pm_irq(dev_priv, dev_priv->pm_rps_events);
-	client_boost = dev_priv->rps.client_boost;
-	dev_priv->rps.client_boost = false;
 	spin_unlock_irq(&dev_priv->irq_lock);
 
 	/* Make sure we didn't queue anything we're not going to process. */
 	WARN_ON(pm_iir & ~dev_priv->pm_rps_events);
-
 	if ((pm_iir & dev_priv->pm_rps_events) == 0 && !client_boost)
-		return;
+		goto out;
 
 	mutex_lock(&dev_priv->rps.hw_lock);
 
@@ -1243,6 +1213,13 @@ static void gen6_pm_rps_work(struct work_struct *work)
 	}
 
 	mutex_unlock(&dev_priv->rps.hw_lock);
+
+out:
+	/* Make sure not to corrupt PMIMR state used by ringbuffer on GEN6 */
+	spin_lock_irq(&dev_priv->irq_lock);
+	if (dev_priv->rps.interrupts_enabled)
+		gen6_unmask_pm_irq(dev_priv, dev_priv->pm_rps_events);
+	spin_unlock_irq(&dev_priv->irq_lock);
 }
 
 
@@ -1378,13 +1355,20 @@ static void snb_gt_irq_handler(struct drm_i915_private *dev_priv,
 static __always_inline void
 gen8_cs_irq_handler(struct intel_engine_cs *engine, u32 iir, int test_shift)
 {
-	if (iir & (GT_RENDER_USER_INTERRUPT << test_shift))
-		notify_ring(engine);
+	bool tasklet = false;
 
 	if (iir & (GT_CONTEXT_SWITCH_INTERRUPT << test_shift)) {
 		set_bit(ENGINE_IRQ_EXECLIST, &engine->irq_posted);
-		tasklet_hi_schedule(&engine->irq_tasklet);
+		tasklet = true;
+	}
+
+	if (iir & (GT_RENDER_USER_INTERRUPT << test_shift)) {
+		notify_ring(engine);
+		tasklet |= i915.enable_guc_submission;
 	}
+
+	if (tasklet)
+		tasklet_hi_schedule(&engine->irq_tasklet);
 }
 
 static irqreturn_t gen8_gt_irq_ack(struct drm_i915_private *dev_priv,
@@ -2647,22 +2631,6 @@ static irqreturn_t gen8_irq_handler(int irq, void *arg)
 	return ret;
 }
 
-static void i915_error_wake_up(struct drm_i915_private *dev_priv)
-{
-	/*
-	 * Notify all waiters for GPU completion events that reset state has
-	 * been changed, and that they need to restart their wait after
-	 * checking for potential errors (and bail out to drop locks if there is
-	 * a gpu reset pending so that i915_error_work_func can acquire them).
-	 */
-
-	/* Wake up __wait_seqno, potentially holding dev->struct_mutex. */
-	wake_up_all(&dev_priv->gpu_error.wait_queue);
-
-	/* Wake up intel_crtc_wait_for_pending_flips, holding crtc->mutex. */
-	wake_up_all(&dev_priv->pending_flip_queue);
-}
-
 /**
  * i915_reset_and_wakeup - do process context error handling work
  * @dev_priv: i915 device private
@@ -2682,16 +2650,11 @@ static void i915_reset_and_wakeup(struct drm_i915_private *dev_priv)
 	DRM_DEBUG_DRIVER("resetting chip\n");
 	kobject_uevent_env(kobj, KOBJ_CHANGE, reset_event);
 
-	/*
-	 * In most cases it's guaranteed that we get here with an RPM
-	 * reference held, for example because there is a pending GPU
-	 * request that won't finish until the reset is done. This
-	 * isn't the case at least when we get here by doing a
-	 * simulated reset via debugs, so get an RPM reference.
-	 */
-	intel_runtime_pm_get(dev_priv);
 	intel_prepare_reset(dev_priv);
 
+	set_bit(I915_RESET_HANDOFF, &dev_priv->gpu_error.flags);
+	wake_up_all(&dev_priv->gpu_error.wait_queue);
+
 	do {
 		/*
 		 * All state reset _must_ be completed before we update the
@@ -2706,12 +2669,11 @@ static void i915_reset_and_wakeup(struct drm_i915_private *dev_priv)
 
 		/* We need to wait for anyone holding the lock to wakeup */
 	} while (wait_on_bit_timeout(&dev_priv->gpu_error.flags,
-				     I915_RESET_IN_PROGRESS,
+				     I915_RESET_HANDOFF,
 				     TASK_UNINTERRUPTIBLE,
 				     HZ));
 
 	intel_finish_reset(dev_priv);
-	intel_runtime_pm_put(dev_priv);
 
 	if (!test_bit(I915_WEDGED, &dev_priv->gpu_error.flags))
 		kobject_uevent_env(kobj,
@@ -2721,6 +2683,7 @@ static void i915_reset_and_wakeup(struct drm_i915_private *dev_priv)
 	 * Note: The wake_up also serves as a memory barrier so that
 	 * waiters see the updated value of the dev_priv->gpu_error.
 	 */
+	clear_bit(I915_RESET_BACKOFF, &dev_priv->gpu_error.flags);
 	wake_up_all(&dev_priv->gpu_error.reset_queue);
 }
 
@@ -2798,31 +2761,29 @@ void i915_handle_error(struct drm_i915_private *dev_priv,
 	vscnprintf(error_msg, sizeof(error_msg), fmt, args);
 	va_end(args);
 
+	/*
+	 * In most cases it's guaranteed that we get here with an RPM
+	 * reference held, for example because there is a pending GPU
+	 * request that won't finish until the reset is done. This
+	 * isn't the case at least when we get here by doing a
+	 * simulated reset via debugfs, so get an RPM reference.
+	 */
+	intel_runtime_pm_get(dev_priv);
+
 	i915_capture_error_state(dev_priv, engine_mask, error_msg);
 	i915_clear_error_registers(dev_priv);
 
 	if (!engine_mask)
-		return;
+		goto out;
 
-	if (test_and_set_bit(I915_RESET_IN_PROGRESS,
+	if (test_and_set_bit(I915_RESET_BACKOFF,
 			     &dev_priv->gpu_error.flags))
-		return;
-
-	/*
-	 * Wakeup waiting processes so that the reset function
-	 * i915_reset_and_wakeup doesn't deadlock trying to grab
-	 * various locks. By bumping the reset counter first, the woken
-	 * processes will see a reset in progress and back off,
-	 * releasing their locks and then wait for the reset completion.
-	 * We must do this for _all_ gpu waiters that might hold locks
-	 * that the reset work needs to acquire.
-	 *
-	 * Note: The wake_up also provides a memory barrier to ensure that the
-	 * waiters see the updated value of the reset flags.
-	 */
-	i915_error_wake_up(dev_priv);
+		goto out;
 
 	i915_reset_and_wakeup(dev_priv);
+
+out:
+	intel_runtime_pm_put(dev_priv);
 }
 
 /* Called from drm generic code, passed 'crtc' which
@@ -4283,11 +4244,11 @@ void intel_irq_init(struct drm_i915_private *dev_priv)
 	/* Let's track the enabled rps events */
 	if (IS_VALLEYVIEW(dev_priv))
 		/* WaGsvRC0ResidencyMethod:vlv */
-		dev_priv->pm_rps_events = GEN6_PM_RP_DOWN_EI_EXPIRED | GEN6_PM_RP_UP_EI_EXPIRED;
+		dev_priv->pm_rps_events = GEN6_PM_RP_UP_EI_EXPIRED;
 	else
 		dev_priv->pm_rps_events = GEN6_PM_RPS_EVENTS;
 
-	dev_priv->rps.pm_intr_keep = 0;
+	dev_priv->rps.pm_intrmsk_mbz = 0;
 
 	/*
 	 * SNB,IVB can while VLV,CHV may hard hang on looping batchbuffer
@@ -4296,10 +4257,10 @@ void intel_irq_init(struct drm_i915_private *dev_priv)
 	 * TODO: verify if this can be reproduced on VLV,CHV.
 	 */
 	if (INTEL_INFO(dev_priv)->gen <= 7 && !IS_HASWELL(dev_priv))
-		dev_priv->rps.pm_intr_keep |= GEN6_PM_RP_UP_EI_EXPIRED;
+		dev_priv->rps.pm_intrmsk_mbz |= GEN6_PM_RP_UP_EI_EXPIRED;
 
 	if (INTEL_INFO(dev_priv)->gen >= 8)
-		dev_priv->rps.pm_intr_keep |= GEN8_PMINTR_REDIRECT_TO_GUC;
+		dev_priv->rps.pm_intrmsk_mbz |= GEN8_PMINTR_DISABLE_REDIRECT_TO_GUC;
 
 	if (IS_GEN2(dev_priv)) {
 		/* Gen2 doesn't have a hardware frame counter */
diff --git a/drivers/gpu/drm/i915/i915_params.c b/drivers/gpu/drm/i915/i915_params.c
index 2e9645e6555a..b6a7e363d076 100644
--- a/drivers/gpu/drm/i915/i915_params.c
+++ b/drivers/gpu/drm/i915/i915_params.c
@@ -59,6 +59,8 @@ struct i915_params i915 __read_mostly = {
 	.enable_guc_loading = 0,
 	.enable_guc_submission = 0,
 	.guc_log_level = -1,
+	.guc_firmware_path = NULL,
+	.huc_firmware_path = NULL,
 	.enable_dp_mst = true,
 	.inject_load_failure = 0,
 	.enable_dpcd_backlight = false,
@@ -230,6 +232,14 @@ module_param_named(guc_log_level, i915.guc_log_level, int, 0400);
 MODULE_PARM_DESC(guc_log_level,
 	"GuC firmware logging level (-1:disabled (default), 0-3:enabled)");
 
+module_param_named_unsafe(guc_firmware_path, i915.guc_firmware_path, charp, 0400);
+MODULE_PARM_DESC(guc_firmware_path,
+	"GuC firmware path to use instead of the default one");
+
+module_param_named_unsafe(huc_firmware_path, i915.huc_firmware_path, charp, 0400);
+MODULE_PARM_DESC(huc_firmware_path,
+	"HuC firmware path to use instead of the default one");
+
 module_param_named_unsafe(enable_dp_mst, i915.enable_dp_mst, bool, 0600);
 MODULE_PARM_DESC(enable_dp_mst,
 	"Enable multi-stream transport (MST) for new DisplayPort sinks. (default: true)");
diff --git a/drivers/gpu/drm/i915/i915_params.h b/drivers/gpu/drm/i915/i915_params.h
index 55d47eea172e..34148cc8637c 100644
--- a/drivers/gpu/drm/i915/i915_params.h
+++ b/drivers/gpu/drm/i915/i915_params.h
@@ -46,6 +46,8 @@
 	func(int, enable_guc_loading); \
 	func(int, enable_guc_submission); \
 	func(int, guc_log_level); \
+	func(char *, guc_firmware_path); \
+	func(char *, huc_firmware_path); \
 	func(int, use_mmio_flip); \
 	func(int, mmio_debug); \
 	func(int, edp_vswing); \
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index cc843f96576f..04c8f69fcc62 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -1140,8 +1140,6 @@ enum skl_disp_power_wells {
 #define 	VLV_BIAS_CPU_125_SOC_875 (6 << 2)
 #define 	CHV_BIAS_CPU_50_SOC_50 (3 << 2)
 
-#define VLV_CZ_CLOCK_TO_MILLI_SEC		100000
-
 /* vlv2 north clock has */
 #define CCK_FUSE_REG				0x8
 #define  CCK_FUSE_HPLL_FREQ_MASK		0x3
@@ -7453,7 +7451,8 @@ enum {
 #define VLV_RCEDATA				_MMIO(0xA0BC)
 #define GEN6_RC6pp_THRESHOLD			_MMIO(0xA0C0)
 #define GEN6_PMINTRMSK				_MMIO(0xA168)
-#define   GEN8_PMINTR_REDIRECT_TO_GUC		  (1<<31)
+#define   GEN8_PMINTR_DISABLE_REDIRECT_TO_GUC	(1<<31)
+#define   ARAT_EXPIRED_INTRMSK			(1<<9)
 #define GEN8_MISC_CTRL0				_MMIO(0xA180)
 #define VLV_PWRDWNUPCTL				_MMIO(0xA294)
 #define GEN9_MEDIA_PG_IDLE_HYSTERESIS		_MMIO(0xA0C4)
diff --git a/drivers/gpu/drm/i915/i915_sysfs.c b/drivers/gpu/drm/i915/i915_sysfs.c
index af0ac9f261fd..f3fdfda5e558 100644
--- a/drivers/gpu/drm/i915/i915_sysfs.c
+++ b/drivers/gpu/drm/i915/i915_sysfs.c
@@ -42,32 +42,8 @@ static inline struct drm_i915_private *kdev_minor_to_i915(struct device *kdev)
 static u32 calc_residency(struct drm_i915_private *dev_priv,
 			  i915_reg_t reg)
 {
-	u64 raw_time; /* 32b value may overflow during fixed point math */
-	u64 units = 128ULL, div = 100000ULL;
-	u32 ret;
-
-	if (!intel_enable_rc6())
-		return 0;
-
-	intel_runtime_pm_get(dev_priv);
-
-	/* On VLV and CHV, residency time is in CZ units rather than 1.28us */
-	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
-		units = 1;
-		div = dev_priv->czclk_freq;
-
-		if (I915_READ(VLV_COUNTER_CONTROL) & VLV_COUNT_RANGE_HIGH)
-			units <<= 8;
-	} else if (IS_GEN9_LP(dev_priv)) {
-		units = 1;
-		div = 1200;		/* 833.33ns */
-	}
-
-	raw_time = I915_READ(reg) * units;
-	ret = DIV_ROUND_UP_ULL(raw_time, div);
-
-	intel_runtime_pm_put(dev_priv);
-	return ret;
+	return DIV_ROUND_CLOSEST_ULL(intel_rc6_residency_us(dev_priv, reg),
+				     1000);
 }
 
 static ssize_t
diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h
index 5503f5ab1e98..66404c5aee82 100644
--- a/drivers/gpu/drm/i915/i915_trace.h
+++ b/drivers/gpu/drm/i915/i915_trace.h
@@ -590,7 +590,7 @@ TRACE_EVENT(i915_gem_request_queue,
 	    TP_fast_assign(
 			   __entry->dev = req->i915->drm.primary->index;
 			   __entry->ring = req->engine->id;
-			   __entry->ctx = req->ctx->hw_id;
+			   __entry->ctx = req->fence.context;
 			   __entry->seqno = req->fence.seqno;
 			   __entry->flags = flags;
 			   ),
@@ -637,8 +637,8 @@ DECLARE_EVENT_CLASS(i915_gem_request,
 
 	    TP_fast_assign(
 			   __entry->dev = req->i915->drm.primary->index;
-			   __entry->ctx = req->ctx->hw_id;
 			   __entry->ring = req->engine->id;
+			   __entry->ctx = req->fence.context;
 			   __entry->seqno = req->fence.seqno;
 			   __entry->global = req->global_seqno;
 			   ),
@@ -681,7 +681,7 @@ DECLARE_EVENT_CLASS(i915_gem_request_hw,
 		    TP_fast_assign(
 			           __entry->dev = req->i915->drm.primary->index;
 			           __entry->ring = req->engine->id;
-			           __entry->ctx = req->ctx->hw_id;
+			           __entry->ctx = req->fence.context;
 			           __entry->seqno = req->fence.seqno;
 			           __entry->global_seqno = req->global_seqno;
 			           __entry->port = port;
@@ -776,7 +776,7 @@ TRACE_EVENT(i915_gem_request_wait_begin,
 	    TP_fast_assign(
 			   __entry->dev = req->i915->drm.primary->index;
 			   __entry->ring = req->engine->id;
-			   __entry->ctx = req->ctx->hw_id;
+			   __entry->ctx = req->fence.context;
 			   __entry->seqno = req->fence.seqno;
 			   __entry->global = req->global_seqno;
 			   __entry->flags = flags;
diff --git a/drivers/gpu/drm/i915/i915_utils.h b/drivers/gpu/drm/i915/i915_utils.h
index b8ba0f2f92af..94a3a3299910 100644
--- a/drivers/gpu/drm/i915/i915_utils.h
+++ b/drivers/gpu/drm/i915/i915_utils.h
@@ -66,6 +66,8 @@
 #define ptr_pack_bits(ptr, bits)					\
 	((typeof(ptr))((unsigned long)(ptr) | (bits)))
 
+#define ptr_offset(ptr, member) offsetof(typeof(*(ptr)), member)
+
 #define fetch_and_zero(ptr) ({						\
 	typeof(*ptr) __T = *(ptr);					\
 	*(ptr) = (typeof(*ptr))0;					\
diff --git a/drivers/gpu/drm/i915/i915_vgpu.c b/drivers/gpu/drm/i915/i915_vgpu.c
index 14014068dfcf..4ab8a973b61f 100644
--- a/drivers/gpu/drm/i915/i915_vgpu.c
+++ b/drivers/gpu/drm/i915/i915_vgpu.c
@@ -218,13 +218,9 @@ int intel_vgt_balloon(struct drm_i915_private *dev_priv)
 			goto err;
 	}
 
-	/*
-	 * No need to partition out the last physical page,
-	 * because it is reserved to the guard page.
-	 */
-	if (unmappable_end < ggtt_end - PAGE_SIZE) {
+	if (unmappable_end < ggtt_end) {
 		ret = vgt_balloon_space(ggtt, &bl_info.space[3],
-					unmappable_end, ggtt_end - PAGE_SIZE);
+					unmappable_end, ggtt_end);
 		if (ret)
 			goto err;
 	}
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
index e144f033f4b5..639d45c1dd2e 100644
--- a/drivers/gpu/drm/i915/intel_bios.c
+++ b/drivers/gpu/drm/i915/intel_bios.c
@@ -1341,6 +1341,7 @@ parse_device_mapping(struct drm_i915_private *dev_priv,
 	return;
 }
 
+/* Common defaults which may be overridden by VBT. */
 static void
 init_vbt_defaults(struct drm_i915_private *dev_priv)
 {
@@ -1377,6 +1378,18 @@ init_vbt_defaults(struct drm_i915_private *dev_priv)
 			&dev_priv->vbt.ddi_port_info[port];
 
 		info->hdmi_level_shift = HDMI_LEVEL_SHIFT_UNKNOWN;
+	}
+}
+
+/* Defaults to initialize only if there is no VBT. */
+static void
+init_vbt_missing_defaults(struct drm_i915_private *dev_priv)
+{
+	enum port port;
+
+	for (port = PORT_A; port < I915_MAX_PORTS; port++) {
+		struct ddi_vbt_port_info *info =
+			&dev_priv->vbt.ddi_port_info[port];
 
 		info->supports_dvi = (port != PORT_A && port != PORT_E);
 		info->supports_hdmi = info->supports_dvi;
@@ -1462,36 +1475,35 @@ static const struct vbt_header *find_vbt(void __iomem *bios, size_t size)
  * intel_bios_init - find VBT and initialize settings from the BIOS
  * @dev_priv: i915 device instance
  *
- * Loads the Video BIOS and checks that the VBT exists.  Sets scratch registers
- * to appropriate values.
- *
- * Returns 0 on success, nonzero on failure.
+ * Parse and initialize settings from the Video BIOS Tables (VBT). If the VBT
+ * was not found in ACPI OpRegion, try to find it in PCI ROM first. Also
+ * initialize some defaults if the VBT is not present at all.
  */
-int
-intel_bios_init(struct drm_i915_private *dev_priv)
+void intel_bios_init(struct drm_i915_private *dev_priv)
 {
 	struct pci_dev *pdev = dev_priv->drm.pdev;
 	const struct vbt_header *vbt = dev_priv->opregion.vbt;
 	const struct bdb_header *bdb;
 	u8 __iomem *bios = NULL;
 
-	if (HAS_PCH_NOP(dev_priv))
-		return -ENODEV;
+	if (HAS_PCH_NOP(dev_priv)) {
+		DRM_DEBUG_KMS("Skipping VBT init due to disabled display.\n");
+		return;
+	}
 
 	init_vbt_defaults(dev_priv);
 
+	/* If the OpRegion does not have VBT, look in PCI ROM. */
 	if (!vbt) {
 		size_t size;
 
 		bios = pci_map_rom(pdev, &size);
 		if (!bios)
-			return -1;
+			goto out;
 
 		vbt = find_vbt(bios, size);
-		if (!vbt) {
-			pci_unmap_rom(pdev, bios);
-			return -1;
-		}
+		if (!vbt)
+			goto out;
 
 		DRM_DEBUG_KMS("Found valid VBT in PCI ROM\n");
 	}
@@ -1516,10 +1528,14 @@ intel_bios_init(struct drm_i915_private *dev_priv)
 	parse_mipi_sequence(dev_priv, bdb);
 	parse_ddi_ports(dev_priv, bdb);
 
+out:
+	if (!vbt) {
+		DRM_INFO("Failed to find VBIOS tables (VBT)\n");
+		init_vbt_missing_defaults(dev_priv);
+	}
+
 	if (bios)
 		pci_unmap_rom(pdev, bios);
-
-	return 0;
 }
 
 /**
diff --git a/drivers/gpu/drm/i915/intel_breadcrumbs.c b/drivers/gpu/drm/i915/intel_breadcrumbs.c
index 2393bb9fe665..ba986edee312 100644
--- a/drivers/gpu/drm/i915/intel_breadcrumbs.c
+++ b/drivers/gpu/drm/i915/intel_breadcrumbs.c
@@ -47,12 +47,11 @@ static unsigned int __intel_breadcrumbs_wakeup(struct intel_breadcrumbs *b)
 unsigned int intel_engine_wakeup(struct intel_engine_cs *engine)
 {
 	struct intel_breadcrumbs *b = &engine->breadcrumbs;
-	unsigned long flags;
 	unsigned int result;
 
-	spin_lock_irqsave(&b->irq_lock, flags);
+	spin_lock_irq(&b->irq_lock);
 	result = __intel_breadcrumbs_wakeup(b);
-	spin_unlock_irqrestore(&b->irq_lock, flags);
+	spin_unlock_irq(&b->irq_lock);
 
 	return result;
 }
@@ -86,7 +85,7 @@ static void intel_breadcrumbs_hangcheck(unsigned long data)
 		return;
 	}
 
-	/* We keep the hangcheck time alive until we disarm the irq, even
+	/* We keep the hangcheck timer alive until we disarm the irq, even
 	 * if there are no waiters at present.
 	 *
 	 * If the waiter was currently running, assume it hasn't had a chance
@@ -110,20 +109,18 @@ static void intel_breadcrumbs_fake_irq(unsigned long data)
 {
 	struct intel_engine_cs *engine = (struct intel_engine_cs *)data;
 	struct intel_breadcrumbs *b = &engine->breadcrumbs;
-	unsigned long flags;
 
-	/*
-	 * The timer persists in case we cannot enable interrupts,
+	/* The timer persists in case we cannot enable interrupts,
 	 * or if we have previously seen seqno/interrupt incoherency
-	 * ("missed interrupt" syndrome). Here the worker will wake up
-	 * every jiffie in order to kick the oldest waiter to do the
-	 * coherent seqno check.
+	 * ("missed interrupt" syndrome, better known as a "missed breadcrumb").
+	 * Here the worker will wake up every jiffie in order to kick the
+	 * oldest waiter to do the coherent seqno check.
 	 */
 
-	spin_lock_irqsave(&b->irq_lock, flags);
+	spin_lock_irq(&b->irq_lock);
 	if (!__intel_breadcrumbs_wakeup(b))
 		__intel_engine_disarm_breadcrumbs(engine);
-	spin_unlock_irqrestore(&b->irq_lock, flags);
+	spin_unlock_irq(&b->irq_lock);
 	if (!b->irq_armed)
 		return;
 
@@ -168,6 +165,7 @@ void __intel_engine_disarm_breadcrumbs(struct intel_engine_cs *engine)
 	struct intel_breadcrumbs *b = &engine->breadcrumbs;
 
 	lockdep_assert_held(&b->irq_lock);
+	GEM_BUG_ON(b->irq_wait);
 
 	if (b->irq_enabled) {
 		irq_disable(engine);
@@ -180,23 +178,31 @@ void __intel_engine_disarm_breadcrumbs(struct intel_engine_cs *engine)
 void intel_engine_disarm_breadcrumbs(struct intel_engine_cs *engine)
 {
 	struct intel_breadcrumbs *b = &engine->breadcrumbs;
-	unsigned long flags;
+	struct intel_wait *wait, *n, *first;
 
 	if (!b->irq_armed)
 		return;
 
-	spin_lock_irqsave(&b->irq_lock, flags);
-
 	/* We only disarm the irq when we are idle (all requests completed),
-	 * so if there remains a sleeping waiter, it missed the request
+	 * so if the bottom-half remains asleep, it missed the request
 	 * completion.
 	 */
-	if (__intel_breadcrumbs_wakeup(b) & ENGINE_WAKEUP_ASLEEP)
-		missed_breadcrumb(engine);
 
+	spin_lock_irq(&b->rb_lock);
+
+	spin_lock(&b->irq_lock);
+	first = fetch_and_zero(&b->irq_wait);
 	__intel_engine_disarm_breadcrumbs(engine);
+	spin_unlock(&b->irq_lock);
+
+	rbtree_postorder_for_each_entry_safe(wait, n, &b->waiters, node) {
+		RB_CLEAR_NODE(&wait->node);
+		if (wake_up_process(wait->tsk) && wait == first)
+			missed_breadcrumb(engine);
+	}
+	b->waiters = RB_ROOT;
 
-	spin_unlock_irqrestore(&b->irq_lock, flags);
+	spin_unlock_irq(&b->rb_lock);
 }
 
 static bool use_fake_irq(const struct intel_breadcrumbs *b)
@@ -280,9 +286,15 @@ static inline void __intel_breadcrumbs_finish(struct intel_breadcrumbs *b,
 					      struct intel_wait *wait)
 {
 	lockdep_assert_held(&b->rb_lock);
+	GEM_BUG_ON(b->irq_wait == wait);
 
 	/* This request is completed, so remove it from the tree, mark it as
-	 * complete, and *then* wake up the associated task.
+	 * complete, and *then* wake up the associated task. N.B. when the
+	 * task wakes up, it will find the empty rb_node, discern that it
+	 * has already been removed from the tree and skip the serialisation
+	 * of the b->rb_lock and b->irq_lock. This means that the destruction
+	 * of the intel_wait is not serialised with the interrupt handler
+	 * by the waiter - it must instead be serialised by the caller.
 	 */
 	rb_erase(&wait->node, &b->waiters);
 	RB_CLEAR_NODE(&wait->node);
@@ -297,6 +309,7 @@ static inline void __intel_breadcrumbs_next(struct intel_engine_cs *engine,
 
 	spin_lock(&b->irq_lock);
 	GEM_BUG_ON(!b->irq_armed);
+	GEM_BUG_ON(!b->irq_wait);
 	b->irq_wait = to_wait(next);
 	spin_unlock(&b->irq_lock);
 
@@ -372,25 +385,8 @@ static bool __intel_engine_add_wait(struct intel_engine_cs *engine,
 	rb_link_node(&wait->node, parent, p);
 	rb_insert_color(&wait->node, &b->waiters);
 
-	if (completed) {
-		struct rb_node *next = rb_next(completed);
-
-		GEM_BUG_ON(!next && !first);
-		if (next && next != &wait->node) {
-			GEM_BUG_ON(first);
-			__intel_breadcrumbs_next(engine, next);
-		}
-
-		do {
-			struct intel_wait *crumb = to_wait(completed);
-			completed = rb_prev(completed);
-			__intel_breadcrumbs_finish(b, crumb);
-		} while (completed);
-	}
-
 	if (first) {
 		spin_lock(&b->irq_lock);
-		GEM_BUG_ON(rb_first(&b->waiters) != &wait->node);
 		b->irq_wait = wait;
 		/* After assigning ourselves as the new bottom-half, we must
 		 * perform a cursory check to prevent a missed interrupt.
@@ -403,7 +399,28 @@ static bool __intel_engine_add_wait(struct intel_engine_cs *engine,
 		__intel_breadcrumbs_enable_irq(b);
 		spin_unlock(&b->irq_lock);
 	}
+
+	if (completed) {
+		/* Advance the bottom-half (b->irq_wait) before we wake up
+		 * the waiters who may scribble over their intel_wait
+		 * just as the interrupt handler is dereferencing it via
+		 * b->irq_wait.
+		 */
+		if (!first) {
+			struct rb_node *next = rb_next(completed);
+			GEM_BUG_ON(next == &wait->node);
+			__intel_breadcrumbs_next(engine, next);
+		}
+
+		do {
+			struct intel_wait *crumb = to_wait(completed);
+			completed = rb_prev(completed);
+			__intel_breadcrumbs_finish(b, crumb);
+		} while (completed);
+	}
+
 	GEM_BUG_ON(!b->irq_wait);
+	GEM_BUG_ON(!b->irq_armed);
 	GEM_BUG_ON(rb_first(&b->waiters) != &b->irq_wait->node);
 
 	return first;
@@ -505,8 +522,10 @@ void intel_engine_remove_wait(struct intel_engine_cs *engine,
 	 * the tree by the bottom-half to avoid contention on the spinlock
 	 * by the herd.
 	 */
-	if (RB_EMPTY_NODE(&wait->node))
+	if (RB_EMPTY_NODE(&wait->node)) {
+		GEM_BUG_ON(READ_ONCE(b->irq_wait) == wait);
 		return;
+	}
 
 	spin_lock_irq(&b->rb_lock);
 	__intel_engine_remove_wait(engine, wait);
@@ -643,7 +662,7 @@ void intel_engine_enable_signaling(struct drm_i915_gem_request *request)
 	/* Note that we may be called from an interrupt handler on another
 	 * device (e.g. nouveau signaling a fence completion causing us
 	 * to submit a request, and so enable signaling). As such,
-	 * we need to make sure that all other users of b->lock protect
+	 * we need to make sure that all other users of b->rb_lock protect
 	 * against interrupts, i.e. use spin_lock_irqsave.
 	 */
 
@@ -822,12 +841,12 @@ bool intel_breadcrumbs_busy(struct intel_engine_cs *engine)
 
 	if (b->irq_wait) {
 		wake_up_process(b->irq_wait->tsk);
-		busy |= intel_engine_flag(engine);
+		busy = true;
 	}
 
 	if (rcu_access_pointer(b->first_signal)) {
 		wake_up_process(b->signaler);
-		busy |= intel_engine_flag(engine);
+		busy = true;
 	}
 
 	spin_unlock_irq(&b->rb_lock);
diff --git a/drivers/gpu/drm/i915/intel_cdclk.c b/drivers/gpu/drm/i915/intel_cdclk.c
index d643c0c5321b..c2cc33f3d888 100644
--- a/drivers/gpu/drm/i915/intel_cdclk.c
+++ b/drivers/gpu/drm/i915/intel_cdclk.c
@@ -223,7 +223,7 @@ static unsigned int intel_hpll_vco(struct drm_i915_private *dev_priv)
 	/* FIXME other chipsets? */
 	if (IS_GM45(dev_priv))
 		vco_table = ctg_vco;
-	else if (IS_G4X(dev_priv))
+	else if (IS_G45(dev_priv))
 		vco_table = elk_vco;
 	else if (IS_I965GM(dev_priv))
 		vco_table = cl_vco;
@@ -1470,7 +1470,7 @@ static int intel_max_pixel_rate(struct drm_atomic_state *state)
 	memcpy(intel_state->min_pixclk, dev_priv->min_pixclk,
 	       sizeof(intel_state->min_pixclk));
 
-	for_each_crtc_in_state(state, crtc, cstate, i) {
+	for_each_new_crtc_in_state(state, crtc, cstate, i) {
 		int pixel_rate;
 
 		crtc_state = to_intel_crtc_state(cstate);
@@ -1859,7 +1859,7 @@ void intel_init_cdclk_hooks(struct drm_i915_private *dev_priv)
 		dev_priv->display.get_cdclk = fixed_450mhz_get_cdclk;
 	else if (IS_GM45(dev_priv))
 		dev_priv->display.get_cdclk = gm45_get_cdclk;
-	else if (IS_G4X(dev_priv))
+	else if (IS_G45(dev_priv))
 		dev_priv->display.get_cdclk = g33_get_cdclk;
 	else if (IS_I965GM(dev_priv))
 		dev_priv->display.get_cdclk = i965gm_get_cdclk;
diff --git a/drivers/gpu/drm/i915/intel_color.c b/drivers/gpu/drm/i915/intel_color.c
index b9e5266d933b..306c6b06b330 100644
--- a/drivers/gpu/drm/i915/intel_color.c
+++ b/drivers/gpu/drm/i915/intel_color.c
@@ -465,14 +465,14 @@ static void glk_load_degamma_lut(struct drm_crtc_state *state)
 	 *  different values per channel, so this just loads a linear table.
 	 */
 	for (i = 0; i < lut_size; i++) {
-		uint32_t v = (i * ((1 << 16) - 1)) / (lut_size - 1);
+		uint32_t v = (i * (1 << 16)) / (lut_size - 1);
 
 		I915_WRITE(PRE_CSC_GAMC_DATA(pipe), v);
 	}
 
 	/* Clamp values > 1.0. */
 	while (i++ < 35)
-		I915_WRITE(PRE_CSC_GAMC_DATA(pipe), (1 << 16) - 1);
+		I915_WRITE(PRE_CSC_GAMC_DATA(pipe), (1 << 16));
 }
 
 static void glk_load_luts(struct drm_crtc_state *state)
diff --git a/drivers/gpu/drm/i915/intel_csr.c b/drivers/gpu/drm/i915/intel_csr.c
index 14659c7e2858..36832257cc9b 100644
--- a/drivers/gpu/drm/i915/intel_csr.c
+++ b/drivers/gpu/drm/i915/intel_csr.c
@@ -34,9 +34,8 @@
  * low-power state and comes back to normal.
  */
 
-#define I915_CSR_GLK "i915/glk_dmc_ver1_03.bin"
-MODULE_FIRMWARE(I915_CSR_GLK);
-#define GLK_CSR_VERSION_REQUIRED	CSR_VERSION(1, 3)
+#define I915_CSR_GLK "i915/glk_dmc_ver1_04.bin"
+#define GLK_CSR_VERSION_REQUIRED	CSR_VERSION(1, 4)
 
 #define I915_CSR_KBL "i915/kbl_dmc_ver1_01.bin"
 MODULE_FIRMWARE(I915_CSR_KBL);
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
index 04676760e6fd..d8214ba8da14 100644
--- a/drivers/gpu/drm/i915/intel_ddi.c
+++ b/drivers/gpu/drm/i915/intel_ddi.c
@@ -821,11 +821,11 @@ static struct intel_encoder *
 intel_ddi_get_crtc_encoder(struct intel_crtc *crtc)
 {
 	struct drm_device *dev = crtc->base.dev;
-	struct intel_encoder *intel_encoder, *ret = NULL;
+	struct intel_encoder *encoder, *ret = NULL;
 	int num_encoders = 0;
 
-	for_each_encoder_on_crtc(dev, &crtc->base, intel_encoder) {
-		ret = intel_encoder;
+	for_each_encoder_on_crtc(dev, &crtc->base, encoder) {
+		ret = encoder;
 		num_encoders++;
 	}
 
@@ -837,7 +837,7 @@ intel_ddi_get_crtc_encoder(struct intel_crtc *crtc)
 	return ret;
 }
 
-struct intel_encoder *
+static struct intel_encoder *
 intel_ddi_get_crtc_new_encoder(struct intel_crtc_state *crtc_state)
 {
 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
@@ -850,7 +850,7 @@ intel_ddi_get_crtc_new_encoder(struct intel_crtc_state *crtc_state)
 
 	state = crtc_state->base.state;
 
-	for_each_connector_in_state(state, connector, connector_state, i) {
+	for_each_new_connector_in_state(state, connector, connector_state, i) {
 		if (connector_state->crtc != crtc_state->base.crtc)
 			continue;
 
@@ -1130,12 +1130,12 @@ void intel_ddi_clock_get(struct intel_encoder *encoder,
 static bool
 hsw_ddi_pll_select(struct intel_crtc *intel_crtc,
 		   struct intel_crtc_state *crtc_state,
-		   struct intel_encoder *intel_encoder)
+		   struct intel_encoder *encoder)
 {
 	struct intel_shared_dpll *pll;
 
 	pll = intel_get_shared_dpll(intel_crtc, crtc_state,
-				    intel_encoder);
+				    encoder);
 	if (!pll)
 		DRM_DEBUG_DRIVER("failed to find PLL for pipe %c\n",
 				 pipe_name(intel_crtc->pipe));
@@ -1146,11 +1146,11 @@ hsw_ddi_pll_select(struct intel_crtc *intel_crtc,
 static bool
 skl_ddi_pll_select(struct intel_crtc *intel_crtc,
 		   struct intel_crtc_state *crtc_state,
-		   struct intel_encoder *intel_encoder)
+		   struct intel_encoder *encoder)
 {
 	struct intel_shared_dpll *pll;
 
-	pll = intel_get_shared_dpll(intel_crtc, crtc_state, intel_encoder);
+	pll = intel_get_shared_dpll(intel_crtc, crtc_state, encoder);
 	if (pll == NULL) {
 		DRM_DEBUG_DRIVER("failed to find PLL for pipe %c\n",
 				 pipe_name(intel_crtc->pipe));
@@ -1163,9 +1163,9 @@ skl_ddi_pll_select(struct intel_crtc *intel_crtc,
 static bool
 bxt_ddi_pll_select(struct intel_crtc *intel_crtc,
 		   struct intel_crtc_state *crtc_state,
-		   struct intel_encoder *intel_encoder)
+		   struct intel_encoder *encoder)
 {
-	return !!intel_get_shared_dpll(intel_crtc, crtc_state, intel_encoder);
+	return !!intel_get_shared_dpll(intel_crtc, crtc_state, encoder);
 }
 
 /*
@@ -1179,27 +1179,27 @@ bool intel_ddi_pll_select(struct intel_crtc *intel_crtc,
 			  struct intel_crtc_state *crtc_state)
 {
 	struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
-	struct intel_encoder *intel_encoder =
+	struct intel_encoder *encoder =
 		intel_ddi_get_crtc_new_encoder(crtc_state);
 
 	if (IS_GEN9_BC(dev_priv))
 		return skl_ddi_pll_select(intel_crtc, crtc_state,
-					  intel_encoder);
+					  encoder);
 	else if (IS_GEN9_LP(dev_priv))
 		return bxt_ddi_pll_select(intel_crtc, crtc_state,
-					  intel_encoder);
+					  encoder);
 	else
 		return hsw_ddi_pll_select(intel_crtc, crtc_state,
-					  intel_encoder);
+					  encoder);
 }
 
 void intel_ddi_set_pipe_settings(const struct intel_crtc_state *crtc_state)
 {
 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
-	struct intel_encoder *intel_encoder = intel_ddi_get_crtc_encoder(crtc);
+	struct intel_encoder *encoder = intel_ddi_get_crtc_encoder(crtc);
 	enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
-	int type = intel_encoder->type;
+	int type = encoder->type;
 	uint32_t temp;
 
 	if (type == INTEL_OUTPUT_DP || type == INTEL_OUTPUT_EDP || type == INTEL_OUTPUT_DP_MST) {
@@ -1244,12 +1244,12 @@ void intel_ddi_set_vc_payload_alloc(const struct intel_crtc_state *crtc_state,
 void intel_ddi_enable_transcoder_func(const struct intel_crtc_state *crtc_state)
 {
 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
-	struct intel_encoder *intel_encoder = intel_ddi_get_crtc_encoder(crtc);
+	struct intel_encoder *encoder = intel_ddi_get_crtc_encoder(crtc);
 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
 	enum pipe pipe = crtc->pipe;
 	enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
-	enum port port = intel_ddi_get_encoder_port(intel_encoder);
-	int type = intel_encoder->type;
+	enum port port = intel_ddi_get_encoder_port(encoder);
+	int type = encoder->type;
 	uint32_t temp;
 
 	/* Enable TRANS_DDI_FUNC_CTL for the pipe to work in HDMI mode */
@@ -1321,7 +1321,7 @@ void intel_ddi_enable_transcoder_func(const struct intel_crtc_state *crtc_state)
 		temp |= DDI_PORT_WIDTH(crtc_state->lane_count);
 	} else {
 		WARN(1, "Invalid encoder type %d for pipe %c\n",
-		     intel_encoder->type, pipe_name(pipe));
+		     encoder->type, pipe_name(pipe));
 	}
 
 	I915_WRITE(TRANS_DDI_FUNC_CTL(cpu_transcoder), temp);
@@ -1342,19 +1342,19 @@ bool intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector)
 {
 	struct drm_device *dev = intel_connector->base.dev;
 	struct drm_i915_private *dev_priv = to_i915(dev);
-	struct intel_encoder *intel_encoder = intel_connector->encoder;
+	struct intel_encoder *encoder = intel_connector->encoder;
 	int type = intel_connector->base.connector_type;
-	enum port port = intel_ddi_get_encoder_port(intel_encoder);
+	enum port port = intel_ddi_get_encoder_port(encoder);
 	enum pipe pipe = 0;
 	enum transcoder cpu_transcoder;
 	uint32_t tmp;
 	bool ret;
 
 	if (!intel_display_power_get_if_enabled(dev_priv,
-						intel_encoder->power_domain))
+						encoder->power_domain))
 		return false;
 
-	if (!intel_encoder->get_hw_state(intel_encoder, &pipe)) {
+	if (!encoder->get_hw_state(encoder, &pipe)) {
 		ret = false;
 		goto out;
 	}
@@ -1393,7 +1393,7 @@ bool intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector)
 	}
 
 out:
-	intel_display_power_put(dev_priv, intel_encoder->power_domain);
+	intel_display_power_put(dev_priv, encoder->power_domain);
 
 	return ret;
 }
@@ -1486,8 +1486,8 @@ void intel_ddi_enable_pipe_clock(const struct intel_crtc_state *crtc_state)
 {
 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
-	struct intel_encoder *intel_encoder = intel_ddi_get_crtc_encoder(crtc);
-	enum port port = intel_ddi_get_encoder_port(intel_encoder);
+	struct intel_encoder *encoder = intel_ddi_get_crtc_encoder(crtc);
+	enum port port = intel_ddi_get_encoder_port(encoder);
 	enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
 
 	if (cpu_transcoder != TRANSCODER_EDP)
@@ -1762,14 +1762,14 @@ static void intel_ddi_pre_enable_hdmi(struct intel_encoder *encoder,
 				   crtc_state, conn_state);
 }
 
-static void intel_ddi_pre_enable(struct intel_encoder *intel_encoder,
+static void intel_ddi_pre_enable(struct intel_encoder *encoder,
 				 struct intel_crtc_state *pipe_config,
 				 struct drm_connector_state *conn_state)
 {
-	int type = intel_encoder->type;
+	int type = encoder->type;
 
 	if (type == INTEL_OUTPUT_DP || type == INTEL_OUTPUT_EDP) {
-		intel_ddi_pre_enable_dp(intel_encoder,
+		intel_ddi_pre_enable_dp(encoder,
 					pipe_config->port_clock,
 					pipe_config->lane_count,
 					pipe_config->shared_dpll,
@@ -1777,7 +1777,7 @@ static void intel_ddi_pre_enable(struct intel_encoder *intel_encoder,
 							    INTEL_OUTPUT_DP_MST));
 	}
 	if (type == INTEL_OUTPUT_HDMI) {
-		intel_ddi_pre_enable_hdmi(intel_encoder,
+		intel_ddi_pre_enable_hdmi(encoder,
 					  pipe_config->has_hdmi_sink,
 					  pipe_config, conn_state,
 					  pipe_config->shared_dpll);
@@ -1836,11 +1836,11 @@ static void intel_ddi_post_disable(struct intel_encoder *intel_encoder,
 	}
 }
 
-void intel_ddi_fdi_post_disable(struct intel_encoder *intel_encoder,
+void intel_ddi_fdi_post_disable(struct intel_encoder *encoder,
 				struct intel_crtc_state *old_crtc_state,
 				struct drm_connector_state *old_conn_state)
 {
-	struct drm_i915_private *dev_priv = to_i915(intel_encoder->base.dev);
+	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
 	uint32_t val;
 
 	/*
@@ -1853,7 +1853,7 @@ void intel_ddi_fdi_post_disable(struct intel_encoder *intel_encoder,
 	val &= ~FDI_RX_ENABLE;
 	I915_WRITE(FDI_RX_CTL(PIPE_A), val);
 
-	intel_ddi_post_disable(intel_encoder, old_crtc_state, old_conn_state);
+	intel_ddi_post_disable(encoder, old_crtc_state, old_conn_state);
 
 	val = I915_READ(FDI_RX_MISC(PIPE_A));
 	val &= ~(FDI_RX_PWRDN_LANE1_MASK | FDI_RX_PWRDN_LANE0_MASK);
diff --git a/drivers/gpu/drm/i915/intel_device_info.c b/drivers/gpu/drm/i915/intel_device_info.c
index 9fc6ab783008..7d01dfe7faac 100644
--- a/drivers/gpu/drm/i915/intel_device_info.c
+++ b/drivers/gpu/drm/i915/intel_device_info.c
@@ -197,8 +197,10 @@ static void gen9_sseu_info_init(struct drm_i915_private *dev_priv)
 		IS_GEN9_LP(dev_priv) && sseu_subslice_total(sseu) > 1;
 	sseu->has_eu_pg = sseu->eu_per_subslice > 2;
 
-	if (IS_BROXTON(dev_priv)) {
+	if (IS_GEN9_LP(dev_priv)) {
 #define IS_SS_DISABLED(ss)	(!(sseu->subslice_mask & BIT(ss)))
+		info->has_pooled_eu = hweight8(sseu->subslice_mask) == 3;
+
 		/*
 		 * There is a HW issue in 2x6 fused down parts that requires
 		 * Pooled EU to be enabled as a WA. The pool configuration
@@ -206,9 +208,8 @@ static void gen9_sseu_info_init(struct drm_i915_private *dev_priv)
 		 * doesn't affect if the device has all 3 subslices enabled.
 		 */
 		/* WaEnablePooledEuFor2x6:bxt */
-		info->has_pooled_eu = ((hweight8(sseu->subslice_mask) == 3) ||
-				       (hweight8(sseu->subslice_mask) == 2 &&
-					INTEL_REVID(dev_priv) < BXT_REVID_C0));
+		info->has_pooled_eu |= (hweight8(sseu->subslice_mask) == 2 &&
+					IS_BXT_REVID(dev_priv, 0, BXT_REVID_B_LAST));
 
 		sseu->min_eu_in_pool = 0;
 		if (info->has_pooled_eu) {
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 7369ee31ad91..010e5ddb198a 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -1990,10 +1990,13 @@ static unsigned int intel_tile_size(const struct drm_i915_private *dev_priv)
 	return IS_GEN2(dev_priv) ? 2048 : 4096;
 }
 
-static unsigned int intel_tile_width_bytes(const struct drm_i915_private *dev_priv,
-					   uint64_t fb_modifier, unsigned int cpp)
+static unsigned int
+intel_tile_width_bytes(const struct drm_framebuffer *fb, int plane)
 {
-	switch (fb_modifier) {
+	struct drm_i915_private *dev_priv = to_i915(fb->dev);
+	unsigned int cpp = fb->format->cpp[plane];
+
+	switch (fb->modifier) {
 	case DRM_FORMAT_MOD_NONE:
 		return cpp;
 	case I915_FORMAT_MOD_X_TILED:
@@ -2022,43 +2025,38 @@ static unsigned int intel_tile_width_bytes(const struct drm_i915_private *dev_pr
 		}
 		break;
 	default:
-		MISSING_CASE(fb_modifier);
+		MISSING_CASE(fb->modifier);
 		return cpp;
 	}
 }
 
-unsigned int intel_tile_height(const struct drm_i915_private *dev_priv,
-			       uint64_t fb_modifier, unsigned int cpp)
+static unsigned int
+intel_tile_height(const struct drm_framebuffer *fb, int plane)
 {
-	if (fb_modifier == DRM_FORMAT_MOD_NONE)
+	if (fb->modifier == DRM_FORMAT_MOD_NONE)
 		return 1;
 	else
-		return intel_tile_size(dev_priv) /
-			intel_tile_width_bytes(dev_priv, fb_modifier, cpp);
+		return intel_tile_size(to_i915(fb->dev)) /
+			intel_tile_width_bytes(fb, plane);
 }
 
 /* Return the tile dimensions in pixel units */
-static void intel_tile_dims(const struct drm_i915_private *dev_priv,
+static void intel_tile_dims(const struct drm_framebuffer *fb, int plane,
 			    unsigned int *tile_width,
-			    unsigned int *tile_height,
-			    uint64_t fb_modifier,
-			    unsigned int cpp)
+			    unsigned int *tile_height)
 {
-	unsigned int tile_width_bytes =
-		intel_tile_width_bytes(dev_priv, fb_modifier, cpp);
+	unsigned int tile_width_bytes = intel_tile_width_bytes(fb, plane);
+	unsigned int cpp = fb->format->cpp[plane];
 
 	*tile_width = tile_width_bytes / cpp;
-	*tile_height = intel_tile_size(dev_priv) / tile_width_bytes;
+	*tile_height = intel_tile_size(to_i915(fb->dev)) / tile_width_bytes;
 }
 
 unsigned int
-intel_fb_align_height(struct drm_i915_private *dev_priv,
-		      unsigned int height,
-		      uint32_t pixel_format,
-		      uint64_t fb_modifier)
+intel_fb_align_height(const struct drm_framebuffer *fb,
+		      int plane, unsigned int height)
 {
-	unsigned int cpp = drm_format_plane_cpp(pixel_format, 0);
-	unsigned int tile_height = intel_tile_height(dev_priv, fb_modifier, cpp);
+	unsigned int tile_height = intel_tile_height(fb, plane);
 
 	return ALIGN(height, tile_height);
 }
@@ -2099,21 +2097,27 @@ static unsigned int intel_linear_alignment(const struct drm_i915_private *dev_pr
 		return 0;
 }
 
-static unsigned int intel_surf_alignment(const struct drm_i915_private *dev_priv,
-					 uint64_t fb_modifier)
+static unsigned int intel_surf_alignment(const struct drm_framebuffer *fb,
+					 int plane)
 {
-	switch (fb_modifier) {
+	struct drm_i915_private *dev_priv = to_i915(fb->dev);
+
+	/* AUX_DIST needs only 4K alignment */
+	if (fb->format->format == DRM_FORMAT_NV12 && plane == 1)
+		return 4096;
+
+	switch (fb->modifier) {
 	case DRM_FORMAT_MOD_NONE:
 		return intel_linear_alignment(dev_priv);
 	case I915_FORMAT_MOD_X_TILED:
-		if (INTEL_INFO(dev_priv)->gen >= 9)
+		if (INTEL_GEN(dev_priv) >= 9)
 			return 256 * 1024;
 		return 0;
 	case I915_FORMAT_MOD_Y_TILED:
 	case I915_FORMAT_MOD_Yf_TILED:
 		return 1 * 1024 * 1024;
 	default:
-		MISSING_CASE(fb_modifier);
+		MISSING_CASE(fb->modifier);
 		return 0;
 	}
 }
@@ -2130,7 +2134,7 @@ intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb, unsigned int rotation)
 
 	WARN_ON(!mutex_is_locked(&dev->struct_mutex));
 
-	alignment = intel_surf_alignment(dev_priv, fb->modifier);
+	alignment = intel_surf_alignment(fb, 0);
 
 	intel_fill_fb_ggtt_view(&view, fb, rotation);
 
@@ -2291,8 +2295,7 @@ static u32 intel_adjust_tile_offset(int *x, int *y,
 		unsigned int pitch_tiles;
 
 		tile_size = intel_tile_size(dev_priv);
-		intel_tile_dims(dev_priv, &tile_width, &tile_height,
-				fb->modifier, cpp);
+		intel_tile_dims(fb, plane, &tile_width, &tile_height);
 
 		if (drm_rotation_90_or_270(rotation)) {
 			pitch_tiles = pitch / tile_height;
@@ -2347,8 +2350,7 @@ static u32 _intel_compute_tile_offset(const struct drm_i915_private *dev_priv,
 		unsigned int tile_rows, tiles, pitch_tiles;
 
 		tile_size = intel_tile_size(dev_priv);
-		intel_tile_dims(dev_priv, &tile_width, &tile_height,
-				fb_modifier, cpp);
+		intel_tile_dims(fb, plane, &tile_width, &tile_height);
 
 		if (drm_rotation_90_or_270(rotation)) {
 			pitch_tiles = pitch / tile_height;
@@ -2388,13 +2390,7 @@ u32 intel_compute_tile_offset(int *x, int *y,
 	const struct drm_framebuffer *fb = state->base.fb;
 	unsigned int rotation = state->base.rotation;
 	int pitch = intel_fb_pitch(fb, plane, rotation);
-	u32 alignment;
-
-	/* AUX_DIST needs only 4K alignment */
-	if (fb->format->format == DRM_FORMAT_NV12 && plane == 1)
-		alignment = 4096;
-	else
-		alignment = intel_surf_alignment(dev_priv, fb->modifier);
+	u32 alignment = intel_surf_alignment(fb, plane);
 
 	return _intel_compute_tile_offset(dev_priv, x, y, fb, plane, pitch,
 					  rotation, alignment);
@@ -2458,8 +2454,8 @@ intel_fill_fb_info(struct drm_i915_private *dev_priv,
 		 */
 		if (i915_gem_object_is_tiled(intel_fb->obj) &&
 		    (x + width) * cpp > fb->pitches[i]) {
-			DRM_DEBUG("bad fb plane %d offset: 0x%x\n",
-				  i, fb->offsets[i]);
+			DRM_DEBUG_KMS("bad fb plane %d offset: 0x%x\n",
+				      i, fb->offsets[i]);
 			return -EINVAL;
 		}
 
@@ -2471,7 +2467,7 @@ intel_fill_fb_info(struct drm_i915_private *dev_priv,
 		intel_fb->normal[i].y = y;
 
 		offset = _intel_compute_tile_offset(dev_priv, &x, &y,
-						    fb, 0, fb->pitches[i],
+						    fb, i, fb->pitches[i],
 						    DRM_ROTATE_0, tile_size);
 		offset /= tile_size;
 
@@ -2480,8 +2476,7 @@ intel_fill_fb_info(struct drm_i915_private *dev_priv,
 			unsigned int pitch_tiles;
 			struct drm_rect r;
 
-			intel_tile_dims(dev_priv, &tile_width, &tile_height,
-					fb->modifier, cpp);
+			intel_tile_dims(fb, i, &tile_width, &tile_height);
 
 			rot_info->plane[i].offset = offset;
 			rot_info->plane[i].stride = DIV_ROUND_UP(fb->pitches[i], tile_width * cpp);
@@ -2542,9 +2537,9 @@ intel_fill_fb_info(struct drm_i915_private *dev_priv,
 		max_size = max(max_size, offset + size);
 	}
 
-	if (max_size * tile_size > to_intel_framebuffer(fb)->obj->base.size) {
-		DRM_DEBUG("fb too big for bo (need %u bytes, have %zu bytes)\n",
-			  max_size * tile_size, to_intel_framebuffer(fb)->obj->base.size);
+	if (max_size * tile_size > intel_fb->obj->base.size) {
+		DRM_DEBUG_KMS("fb too big for bo (need %u bytes, have %zu bytes)\n",
+			      max_size * tile_size, intel_fb->obj->base.size);
 		return -EINVAL;
 	}
 
@@ -2846,7 +2841,6 @@ static int skl_max_plane_width(const struct drm_framebuffer *fb, int plane,
 
 static int skl_check_main_surface(struct intel_plane_state *plane_state)
 {
-	const struct drm_i915_private *dev_priv = to_i915(plane_state->base.plane->dev);
 	const struct drm_framebuffer *fb = plane_state->base.fb;
 	unsigned int rotation = plane_state->base.rotation;
 	int x = plane_state->base.src.x1 >> 16;
@@ -2865,8 +2859,7 @@ static int skl_check_main_surface(struct intel_plane_state *plane_state)
 
 	intel_add_fb_offsets(&x, &y, plane_state, 0);
 	offset = intel_compute_tile_offset(&x, &y, plane_state, 0);
-
-	alignment = intel_surf_alignment(dev_priv, fb->modifier);
+	alignment = intel_surf_alignment(fb, 0);
 
 	/*
 	 * AUX surface offset is specified as the distance from the
@@ -2983,6 +2976,7 @@ static void i9xx_update_primary_plane(struct drm_plane *primary,
 	unsigned int rotation = plane_state->base.rotation;
 	int x = plane_state->base.src.x1 >> 16;
 	int y = plane_state->base.src.y1 >> 16;
+	unsigned long irqflags;
 
 	dspcntr = DISPPLANE_GAMMA_ENABLE;
 
@@ -2991,20 +2985,6 @@ static void i9xx_update_primary_plane(struct drm_plane *primary,
 	if (INTEL_GEN(dev_priv) < 4) {
 		if (intel_crtc->pipe == PIPE_B)
 			dspcntr |= DISPPLANE_SEL_PIPE_B;
-
-		/* pipesrc and dspsize control the size that is scaled from,
-		 * which should always be the user's requested size.
-		 */
-		I915_WRITE(DSPSIZE(plane),
-			   ((crtc_state->pipe_src_h - 1) << 16) |
-			   (crtc_state->pipe_src_w - 1));
-		I915_WRITE(DSPPOS(plane), 0);
-	} else if (IS_CHERRYVIEW(dev_priv) && plane == PLANE_B) {
-		I915_WRITE(PRIMSIZE(plane),
-			   ((crtc_state->pipe_src_h - 1) << 16) |
-			   (crtc_state->pipe_src_w - 1));
-		I915_WRITE(PRIMPOS(plane), 0);
-		I915_WRITE(PRIMCNSTALPHA(plane), 0);
 	}
 
 	switch (fb->format->format) {
@@ -3067,21 +3047,41 @@ static void i9xx_update_primary_plane(struct drm_plane *primary,
 	intel_crtc->adjusted_x = x;
 	intel_crtc->adjusted_y = y;
 
-	I915_WRITE(reg, dspcntr);
+	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
 
-	I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]);
+	if (INTEL_GEN(dev_priv) < 4) {
+		/* pipesrc and dspsize control the size that is scaled from,
+		 * which should always be the user's requested size.
+		 */
+		I915_WRITE_FW(DSPSIZE(plane),
+			      ((crtc_state->pipe_src_h - 1) << 16) |
+			      (crtc_state->pipe_src_w - 1));
+		I915_WRITE_FW(DSPPOS(plane), 0);
+	} else if (IS_CHERRYVIEW(dev_priv) && plane == PLANE_B) {
+		I915_WRITE_FW(PRIMSIZE(plane),
+			      ((crtc_state->pipe_src_h - 1) << 16) |
+			      (crtc_state->pipe_src_w - 1));
+		I915_WRITE_FW(PRIMPOS(plane), 0);
+		I915_WRITE_FW(PRIMCNSTALPHA(plane), 0);
+	}
+
+	I915_WRITE_FW(reg, dspcntr);
+
+	I915_WRITE_FW(DSPSTRIDE(plane), fb->pitches[0]);
 	if (INTEL_GEN(dev_priv) >= 4) {
-		I915_WRITE(DSPSURF(plane),
-			   intel_plane_ggtt_offset(plane_state) +
-			   intel_crtc->dspaddr_offset);
-		I915_WRITE(DSPTILEOFF(plane), (y << 16) | x);
-		I915_WRITE(DSPLINOFF(plane), linear_offset);
+		I915_WRITE_FW(DSPSURF(plane),
+			      intel_plane_ggtt_offset(plane_state) +
+			      intel_crtc->dspaddr_offset);
+		I915_WRITE_FW(DSPTILEOFF(plane), (y << 16) | x);
+		I915_WRITE_FW(DSPLINOFF(plane), linear_offset);
 	} else {
-		I915_WRITE(DSPADDR(plane),
-			   intel_plane_ggtt_offset(plane_state) +
-			   intel_crtc->dspaddr_offset);
+		I915_WRITE_FW(DSPADDR(plane),
+			      intel_plane_ggtt_offset(plane_state) +
+			      intel_crtc->dspaddr_offset);
 	}
-	POSTING_READ(reg);
+	POSTING_READ_FW(reg);
+
+	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
 }
 
 static void i9xx_disable_primary_plane(struct drm_plane *primary,
@@ -3091,13 +3091,18 @@ static void i9xx_disable_primary_plane(struct drm_plane *primary,
 	struct drm_i915_private *dev_priv = to_i915(dev);
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 	int plane = intel_crtc->plane;
+	unsigned long irqflags;
 
-	I915_WRITE(DSPCNTR(plane), 0);
+	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
+
+	I915_WRITE_FW(DSPCNTR(plane), 0);
 	if (INTEL_INFO(dev_priv)->gen >= 4)
-		I915_WRITE(DSPSURF(plane), 0);
+		I915_WRITE_FW(DSPSURF(plane), 0);
 	else
-		I915_WRITE(DSPADDR(plane), 0);
-	POSTING_READ(DSPCNTR(plane));
+		I915_WRITE_FW(DSPADDR(plane), 0);
+	POSTING_READ_FW(DSPCNTR(plane));
+
+	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
 }
 
 static void ironlake_update_primary_plane(struct drm_plane *primary,
@@ -3115,6 +3120,7 @@ static void ironlake_update_primary_plane(struct drm_plane *primary,
 	unsigned int rotation = plane_state->base.rotation;
 	int x = plane_state->base.src.x1 >> 16;
 	int y = plane_state->base.src.y1 >> 16;
+	unsigned long irqflags;
 
 	dspcntr = DISPPLANE_GAMMA_ENABLE;
 	dspcntr |= DISPLAY_PLANE_ENABLE;
@@ -3171,31 +3177,32 @@ static void ironlake_update_primary_plane(struct drm_plane *primary,
 	intel_crtc->adjusted_x = x;
 	intel_crtc->adjusted_y = y;
 
-	I915_WRITE(reg, dspcntr);
+	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
+
+	I915_WRITE_FW(reg, dspcntr);
 
-	I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]);
-	I915_WRITE(DSPSURF(plane),
-		   intel_plane_ggtt_offset(plane_state) +
-		   intel_crtc->dspaddr_offset);
+	I915_WRITE_FW(DSPSTRIDE(plane), fb->pitches[0]);
+	I915_WRITE_FW(DSPSURF(plane),
+		      intel_plane_ggtt_offset(plane_state) +
+		      intel_crtc->dspaddr_offset);
 	if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
-		I915_WRITE(DSPOFFSET(plane), (y << 16) | x);
+		I915_WRITE_FW(DSPOFFSET(plane), (y << 16) | x);
 	} else {
-		I915_WRITE(DSPTILEOFF(plane), (y << 16) | x);
-		I915_WRITE(DSPLINOFF(plane), linear_offset);
+		I915_WRITE_FW(DSPTILEOFF(plane), (y << 16) | x);
+		I915_WRITE_FW(DSPLINOFF(plane), linear_offset);
 	}
-	POSTING_READ(reg);
+	POSTING_READ_FW(reg);
+
+	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
 }
 
-u32 intel_fb_stride_alignment(const struct drm_i915_private *dev_priv,
-			      uint64_t fb_modifier, uint32_t pixel_format)
+static u32
+intel_fb_stride_alignment(const struct drm_framebuffer *fb, int plane)
 {
-	if (fb_modifier == DRM_FORMAT_MOD_NONE) {
+	if (fb->modifier == DRM_FORMAT_MOD_NONE)
 		return 64;
-	} else {
-		int cpp = drm_format_plane_cpp(pixel_format, 0);
-
-		return intel_tile_width_bytes(dev_priv, fb_modifier, cpp);
-	}
+	else
+		return intel_tile_width_bytes(fb, plane);
 }
 
 static void skl_detach_scaler(struct intel_crtc *intel_crtc, int id)
@@ -3228,21 +3235,21 @@ static void skl_detach_scalers(struct intel_crtc *intel_crtc)
 u32 skl_plane_stride(const struct drm_framebuffer *fb, int plane,
 		     unsigned int rotation)
 {
-	const struct drm_i915_private *dev_priv = to_i915(fb->dev);
-	u32 stride = intel_fb_pitch(fb, plane, rotation);
+	u32 stride;
+
+	if (plane >= fb->format->num_planes)
+		return 0;
+
+	stride = intel_fb_pitch(fb, plane, rotation);
 
 	/*
 	 * The stride is either expressed as a multiple of 64 bytes chunks for
 	 * linear buffers or in number of tiles for tiled buffers.
 	 */
-	if (drm_rotation_90_or_270(rotation)) {
-		int cpp = fb->format->cpp[plane];
-
-		stride /= intel_tile_height(dev_priv, fb->modifier, cpp);
-	} else {
-		stride /= intel_fb_stride_alignment(dev_priv, fb->modifier,
-						    fb->format->format);
-	}
+	if (drm_rotation_90_or_270(rotation))
+		stride /= intel_tile_height(fb, plane);
+	else
+		stride /= intel_fb_stride_alignment(fb, plane);
 
 	return stride;
 }
@@ -3351,15 +3358,11 @@ static void skylake_update_primary_plane(struct drm_plane *plane,
 	int dst_y = plane_state->base.dst.y1;
 	int dst_w = drm_rect_width(&plane_state->base.dst);
 	int dst_h = drm_rect_height(&plane_state->base.dst);
+	unsigned long irqflags;
 
 	plane_ctl = PLANE_CTL_ENABLE;
 
-	if (IS_GEMINILAKE(dev_priv)) {
-		I915_WRITE(PLANE_COLOR_CTL(pipe, plane_id),
-			   PLANE_COLOR_PIPE_GAMMA_ENABLE |
-			   PLANE_COLOR_PIPE_CSC_ENABLE |
-			   PLANE_COLOR_PLANE_GAMMA_DISABLE);
-	} else {
+	if (!IS_GEMINILAKE(dev_priv)) {
 		plane_ctl |=
 			PLANE_CTL_PIPE_GAMMA_ENABLE |
 			PLANE_CTL_PIPE_CSC_ENABLE |
@@ -3381,10 +3384,19 @@ static void skylake_update_primary_plane(struct drm_plane *plane,
 	intel_crtc->adjusted_x = src_x;
 	intel_crtc->adjusted_y = src_y;
 
-	I915_WRITE(PLANE_CTL(pipe, plane_id), plane_ctl);
-	I915_WRITE(PLANE_OFFSET(pipe, plane_id), (src_y << 16) | src_x);
-	I915_WRITE(PLANE_STRIDE(pipe, plane_id), stride);
-	I915_WRITE(PLANE_SIZE(pipe, plane_id), (src_h << 16) | src_w);
+	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
+
+	if (IS_GEMINILAKE(dev_priv)) {
+		I915_WRITE_FW(PLANE_COLOR_CTL(pipe, plane_id),
+			      PLANE_COLOR_PIPE_GAMMA_ENABLE |
+			      PLANE_COLOR_PIPE_CSC_ENABLE |
+			      PLANE_COLOR_PLANE_GAMMA_DISABLE);
+	}
+
+	I915_WRITE_FW(PLANE_CTL(pipe, plane_id), plane_ctl);
+	I915_WRITE_FW(PLANE_OFFSET(pipe, plane_id), (src_y << 16) | src_x);
+	I915_WRITE_FW(PLANE_STRIDE(pipe, plane_id), stride);
+	I915_WRITE_FW(PLANE_SIZE(pipe, plane_id), (src_h << 16) | src_w);
 
 	if (scaler_id >= 0) {
 		uint32_t ps_ctrl = 0;
@@ -3392,19 +3404,21 @@ static void skylake_update_primary_plane(struct drm_plane *plane,
 		WARN_ON(!dst_w || !dst_h);
 		ps_ctrl = PS_SCALER_EN | PS_PLANE_SEL(plane_id) |
 			crtc_state->scaler_state.scalers[scaler_id].mode;
-		I915_WRITE(SKL_PS_CTRL(pipe, scaler_id), ps_ctrl);
-		I915_WRITE(SKL_PS_PWR_GATE(pipe, scaler_id), 0);
-		I915_WRITE(SKL_PS_WIN_POS(pipe, scaler_id), (dst_x << 16) | dst_y);
-		I915_WRITE(SKL_PS_WIN_SZ(pipe, scaler_id), (dst_w << 16) | dst_h);
-		I915_WRITE(PLANE_POS(pipe, plane_id), 0);
+		I915_WRITE_FW(SKL_PS_CTRL(pipe, scaler_id), ps_ctrl);
+		I915_WRITE_FW(SKL_PS_PWR_GATE(pipe, scaler_id), 0);
+		I915_WRITE_FW(SKL_PS_WIN_POS(pipe, scaler_id), (dst_x << 16) | dst_y);
+		I915_WRITE_FW(SKL_PS_WIN_SZ(pipe, scaler_id), (dst_w << 16) | dst_h);
+		I915_WRITE_FW(PLANE_POS(pipe, plane_id), 0);
 	} else {
-		I915_WRITE(PLANE_POS(pipe, plane_id), (dst_y << 16) | dst_x);
+		I915_WRITE_FW(PLANE_POS(pipe, plane_id), (dst_y << 16) | dst_x);
 	}
 
-	I915_WRITE(PLANE_SURF(pipe, plane_id),
-		   intel_plane_ggtt_offset(plane_state) + surf_addr);
+	I915_WRITE_FW(PLANE_SURF(pipe, plane_id),
+		      intel_plane_ggtt_offset(plane_state) + surf_addr);
 
-	POSTING_READ(PLANE_SURF(pipe, plane_id));
+	POSTING_READ_FW(PLANE_SURF(pipe, plane_id));
+
+	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
 }
 
 static void skylake_disable_primary_plane(struct drm_plane *primary,
@@ -3414,10 +3428,15 @@ static void skylake_disable_primary_plane(struct drm_plane *primary,
 	struct drm_i915_private *dev_priv = to_i915(dev);
 	enum plane_id plane_id = to_intel_plane(primary)->id;
 	enum pipe pipe = to_intel_plane(primary)->pipe;
+	unsigned long irqflags;
+
+	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
 
-	I915_WRITE(PLANE_CTL(pipe, plane_id), 0);
-	I915_WRITE(PLANE_SURF(pipe, plane_id), 0);
-	POSTING_READ(PLANE_SURF(pipe, plane_id));
+	I915_WRITE_FW(PLANE_CTL(pipe, plane_id), 0);
+	I915_WRITE_FW(PLANE_SURF(pipe, plane_id), 0);
+	POSTING_READ_FW(PLANE_SURF(pipe, plane_id));
+
+	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
 }
 
 /* Assume fb object is pinned & idle & fenced and just update base pointers */
@@ -3474,7 +3493,12 @@ __intel_display_resume(struct drm_device *dev,
 	if (!state)
 		return 0;
 
-	for_each_crtc_in_state(state, crtc, crtc_state, i) {
+	/*
+	 * We've duplicated the state, pointers to the old state are invalid.
+	 *
+	 * Don't attempt to use the old state until we commit the duplicated state.
+	 */
+	for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
 		/*
 		 * Force recalculation even if we restore
 		 * current state. With fast modeset this may not result
@@ -3615,7 +3639,7 @@ static bool abort_flip_on_reset(struct intel_crtc *crtc)
 {
 	struct i915_gpu_error *error = &to_i915(crtc->base.dev)->gpu_error;
 
-	if (i915_reset_in_progress(error))
+	if (i915_reset_backoff(error))
 		return true;
 
 	if (crtc->reset_count != i915_reset_count(error))
@@ -3650,10 +3674,6 @@ static void intel_update_pipe_config(struct intel_crtc *crtc,
 	/* drm_atomic_helper_update_legacy_modeset_state might not be called. */
 	crtc->base.mode = crtc->base.state->mode;
 
-	DRM_DEBUG_KMS("Updating pipe size %ix%i -> %ix%i\n",
-		      old_crtc_state->pipe_src_w, old_crtc_state->pipe_src_h,
-		      pipe_config->pipe_src_w, pipe_config->pipe_src_h);
-
 	/*
 	 * Update pipe size and adjust fitter if needed: the reason for this is
 	 * that in compute_mode_changes we check the native mode (not the pfit
@@ -4775,23 +4795,17 @@ static void skylake_pfit_enable(struct intel_crtc *crtc)
 	struct intel_crtc_scaler_state *scaler_state =
 		&crtc->config->scaler_state;
 
-	DRM_DEBUG_KMS("for crtc_state = %p\n", crtc->config);
-
 	if (crtc->config->pch_pfit.enabled) {
 		int id;
 
-		if (WARN_ON(crtc->config->scaler_state.scaler_id < 0)) {
-			DRM_ERROR("Requesting pfit without getting a scaler first\n");
+		if (WARN_ON(crtc->config->scaler_state.scaler_id < 0))
 			return;
-		}
 
 		id = scaler_state->scaler_id;
 		I915_WRITE(SKL_PS_CTRL(pipe, id), PS_SCALER_EN |
 			PS_FILTER_MEDIUM | scaler_state->scalers[id].mode);
 		I915_WRITE(SKL_PS_WIN_POS(pipe, id), crtc->config->pch_pfit.pos);
 		I915_WRITE(SKL_PS_WIN_SZ(pipe, id), crtc->config->pch_pfit.size);
-
-		DRM_DEBUG_KMS("for crtc_state = %p scaler_id = %d\n", crtc->config, id);
 	}
 }
 
@@ -5022,13 +5036,12 @@ static void intel_post_plane_update(struct intel_crtc_state *old_crtc_state)
 	}
 }
 
-static void intel_pre_plane_update(struct intel_crtc_state *old_crtc_state)
+static void intel_pre_plane_update(struct intel_crtc_state *old_crtc_state,
+				   struct intel_crtc_state *pipe_config)
 {
 	struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
 	struct drm_device *dev = crtc->base.dev;
 	struct drm_i915_private *dev_priv = to_i915(dev);
-	struct intel_crtc_state *pipe_config =
-		to_intel_crtc_state(crtc->base.state);
 	struct drm_atomic_state *old_state = old_crtc_state->base.state;
 	struct drm_plane *primary = crtc->base.primary;
 	struct drm_plane_state *old_pri_state =
@@ -5125,12 +5138,11 @@ static void intel_encoders_pre_pll_enable(struct drm_crtc *crtc,
 					  struct intel_crtc_state *crtc_state,
 					  struct drm_atomic_state *old_state)
 {
-	struct drm_connector_state *old_conn_state;
+	struct drm_connector_state *conn_state;
 	struct drm_connector *conn;
 	int i;
 
-	for_each_connector_in_state(old_state, conn, old_conn_state, i) {
-		struct drm_connector_state *conn_state = conn->state;
+	for_each_new_connector_in_state(old_state, conn, conn_state, i) {
 		struct intel_encoder *encoder =
 			to_intel_encoder(conn_state->best_encoder);
 
@@ -5146,12 +5158,11 @@ static void intel_encoders_pre_enable(struct drm_crtc *crtc,
 				      struct intel_crtc_state *crtc_state,
 				      struct drm_atomic_state *old_state)
 {
-	struct drm_connector_state *old_conn_state;
+	struct drm_connector_state *conn_state;
 	struct drm_connector *conn;
 	int i;
 
-	for_each_connector_in_state(old_state, conn, old_conn_state, i) {
-		struct drm_connector_state *conn_state = conn->state;
+	for_each_new_connector_in_state(old_state, conn, conn_state, i) {
 		struct intel_encoder *encoder =
 			to_intel_encoder(conn_state->best_encoder);
 
@@ -5167,12 +5178,11 @@ static void intel_encoders_enable(struct drm_crtc *crtc,
 				  struct intel_crtc_state *crtc_state,
 				  struct drm_atomic_state *old_state)
 {
-	struct drm_connector_state *old_conn_state;
+	struct drm_connector_state *conn_state;
 	struct drm_connector *conn;
 	int i;
 
-	for_each_connector_in_state(old_state, conn, old_conn_state, i) {
-		struct drm_connector_state *conn_state = conn->state;
+	for_each_new_connector_in_state(old_state, conn, conn_state, i) {
 		struct intel_encoder *encoder =
 			to_intel_encoder(conn_state->best_encoder);
 
@@ -5192,7 +5202,7 @@ static void intel_encoders_disable(struct drm_crtc *crtc,
 	struct drm_connector *conn;
 	int i;
 
-	for_each_connector_in_state(old_state, conn, old_conn_state, i) {
+	for_each_old_connector_in_state(old_state, conn, old_conn_state, i) {
 		struct intel_encoder *encoder =
 			to_intel_encoder(old_conn_state->best_encoder);
 
@@ -5212,7 +5222,7 @@ static void intel_encoders_post_disable(struct drm_crtc *crtc,
 	struct drm_connector *conn;
 	int i;
 
-	for_each_connector_in_state(old_state, conn, old_conn_state, i) {
+	for_each_old_connector_in_state(old_state, conn, old_conn_state, i) {
 		struct intel_encoder *encoder =
 			to_intel_encoder(old_conn_state->best_encoder);
 
@@ -5232,7 +5242,7 @@ static void intel_encoders_post_pll_disable(struct drm_crtc *crtc,
 	struct drm_connector *conn;
 	int i;
 
-	for_each_connector_in_state(old_state, conn, old_conn_state, i) {
+	for_each_old_connector_in_state(old_state, conn, old_conn_state, i) {
 		struct intel_encoder *encoder =
 			to_intel_encoder(old_conn_state->best_encoder);
 
@@ -7388,10 +7398,7 @@ i9xx_get_initial_plane_config(struct intel_crtc *crtc,
 	val = I915_READ(DSPSTRIDE(pipe));
 	fb->pitches[0] = val & 0xffffffc0;
 
-	aligned_height = intel_fb_align_height(dev_priv,
-					       fb->height,
-					       fb->format->format,
-					       fb->modifier);
+	aligned_height = intel_fb_align_height(fb, 0, fb->height);
 
 	plane_config->size = fb->pitches[0] * aligned_height;
 
@@ -8426,14 +8433,10 @@ skylake_get_initial_plane_config(struct intel_crtc *crtc,
 	fb->width = ((val >> 0) & 0x1fff) + 1;
 
 	val = I915_READ(PLANE_STRIDE(pipe, 0));
-	stride_mult = intel_fb_stride_alignment(dev_priv, fb->modifier,
-						fb->format->format);
+	stride_mult = intel_fb_stride_alignment(fb, 0);
 	fb->pitches[0] = (val & 0x3ff) * stride_mult;
 
-	aligned_height = intel_fb_align_height(dev_priv,
-					       fb->height,
-					       fb->format->format,
-					       fb->modifier);
+	aligned_height = intel_fb_align_height(fb, 0, fb->height);
 
 	plane_config->size = fb->pitches[0] * aligned_height;
 
@@ -8529,10 +8532,7 @@ ironlake_get_initial_plane_config(struct intel_crtc *crtc,
 	val = I915_READ(DSPSTRIDE(pipe));
 	fb->pitches[0] = val & 0xffffffc0;
 
-	aligned_height = intel_fb_align_height(dev_priv,
-					       fb->height,
-					       fb->format->format,
-					       fb->modifier);
+	aligned_height = intel_fb_align_height(fb, 0, fb->height);
 
 	plane_config->size = fb->pitches[0] * aligned_height;
 
@@ -9200,24 +9200,24 @@ static void i845_update_cursor(struct drm_crtc *crtc, u32 base,
 		/* On these chipsets we can only modify the base/size/stride
 		 * whilst the cursor is disabled.
 		 */
-		I915_WRITE(CURCNTR(PIPE_A), 0);
-		POSTING_READ(CURCNTR(PIPE_A));
+		I915_WRITE_FW(CURCNTR(PIPE_A), 0);
+		POSTING_READ_FW(CURCNTR(PIPE_A));
 		intel_crtc->cursor_cntl = 0;
 	}
 
 	if (intel_crtc->cursor_base != base) {
-		I915_WRITE(CURBASE(PIPE_A), base);
+		I915_WRITE_FW(CURBASE(PIPE_A), base);
 		intel_crtc->cursor_base = base;
 	}
 
 	if (intel_crtc->cursor_size != size) {
-		I915_WRITE(CURSIZE, size);
+		I915_WRITE_FW(CURSIZE, size);
 		intel_crtc->cursor_size = size;
 	}
 
 	if (intel_crtc->cursor_cntl != cntl) {
-		I915_WRITE(CURCNTR(PIPE_A), cntl);
-		POSTING_READ(CURCNTR(PIPE_A));
+		I915_WRITE_FW(CURCNTR(PIPE_A), cntl);
+		POSTING_READ_FW(CURCNTR(PIPE_A));
 		intel_crtc->cursor_cntl = cntl;
 	}
 }
@@ -9257,14 +9257,14 @@ static void i9xx_update_cursor(struct drm_crtc *crtc, u32 base,
 	}
 
 	if (intel_crtc->cursor_cntl != cntl) {
-		I915_WRITE(CURCNTR(pipe), cntl);
-		POSTING_READ(CURCNTR(pipe));
+		I915_WRITE_FW(CURCNTR(pipe), cntl);
+		POSTING_READ_FW(CURCNTR(pipe));
 		intel_crtc->cursor_cntl = cntl;
 	}
 
 	/* and commit changes on next vblank */
-	I915_WRITE(CURBASE(pipe), base);
-	POSTING_READ(CURBASE(pipe));
+	I915_WRITE_FW(CURBASE(pipe), base);
+	POSTING_READ_FW(CURBASE(pipe));
 
 	intel_crtc->cursor_base = base;
 }
@@ -9278,6 +9278,7 @@ static void intel_crtc_update_cursor(struct drm_crtc *crtc,
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 	int pipe = intel_crtc->pipe;
 	u32 base = intel_crtc->cursor_addr;
+	unsigned long irqflags;
 	u32 pos = 0;
 
 	if (plane_state) {
@@ -9304,12 +9305,16 @@ static void intel_crtc_update_cursor(struct drm_crtc *crtc,
 		}
 	}
 
-	I915_WRITE(CURPOS(pipe), pos);
+	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
+
+	I915_WRITE_FW(CURPOS(pipe), pos);
 
 	if (IS_I845G(dev_priv) || IS_I865G(dev_priv))
 		i845_update_cursor(crtc, base, plane_state);
 	else
 		i9xx_update_cursor(crtc, base, plane_state);
+
+	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
 }
 
 static bool cursor_size_ok(struct drm_i915_private *dev_priv,
@@ -10590,7 +10595,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
 		goto cleanup;
 
 	intel_crtc->reset_count = i915_reset_count(&dev_priv->gpu_error);
-	if (i915_reset_in_progress_or_wedged(&dev_priv->gpu_error)) {
+	if (i915_reset_backoff_or_wedged(&dev_priv->gpu_error)) {
 		ret = -EIO;
 		goto unlock;
 	}
@@ -10663,7 +10668,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
 		intel_mark_page_flip_active(intel_crtc, work);
 
 		work->flip_queued_req = i915_gem_request_get(request);
-		i915_add_request_no_flush(request);
+		i915_add_request(request);
 	}
 
 	i915_gem_object_wait_priority(obj, 0, I915_PRIORITY_DISPLAY);
@@ -10679,7 +10684,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
 	return 0;
 
 cleanup_request:
-	i915_add_request_no_flush(request);
+	i915_add_request(request);
 cleanup_unpin:
 	to_intel_plane_state(primary->state)->vma = work->old_vma;
 	intel_unpin_fb_vma(vma);
@@ -10907,7 +10912,7 @@ static bool check_single_encoder_cloning(struct drm_atomic_state *state,
 	struct drm_connector_state *connector_state;
 	int i;
 
-	for_each_connector_in_state(state, connector, connector_state, i) {
+	for_each_new_connector_in_state(state, connector, connector_state, i) {
 		if (connector_state->crtc != &crtc->base)
 			continue;
 
@@ -11009,8 +11014,10 @@ static const struct drm_crtc_helper_funcs intel_helper_funcs = {
 static void intel_modeset_update_connector_atomic_state(struct drm_device *dev)
 {
 	struct intel_connector *connector;
+	struct drm_connector_list_iter conn_iter;
 
-	for_each_intel_connector(dev, connector) {
+	drm_connector_list_iter_begin(dev, &conn_iter);
+	for_each_intel_connector_iter(connector, &conn_iter) {
 		if (connector->base.state->crtc)
 			drm_connector_unreference(&connector->base);
 
@@ -11026,6 +11033,7 @@ static void intel_modeset_update_connector_atomic_state(struct drm_device *dev)
 			connector->base.state->crtc = NULL;
 		}
 	}
+	drm_connector_list_iter_end(&conn_iter);
 }
 
 static void
@@ -11078,7 +11086,7 @@ compute_baseline_pipe_bpp(struct intel_crtc *crtc,
 	state = pipe_config->base.state;
 
 	/* Clamp display bpp to EDID value */
-	for_each_connector_in_state(state, connector, connector_state, i) {
+	for_each_new_connector_in_state(state, connector, connector_state, i) {
 		if (connector_state->crtc != &crtc->base)
 			continue;
 
@@ -11273,7 +11281,6 @@ clear_intel_crtc_state(struct intel_crtc_state *crtc_state)
 {
 	struct drm_i915_private *dev_priv =
 		to_i915(crtc_state->base.crtc->dev);
-	struct drm_crtc_state tmp_state;
 	struct intel_crtc_scaler_state scaler_state;
 	struct intel_dpll_hw_state dpll_hw_state;
 	struct intel_shared_dpll *shared_dpll;
@@ -11285,7 +11292,6 @@ clear_intel_crtc_state(struct intel_crtc_state *crtc_state)
 	 * fixed, so that the crtc_state can be safely duplicated. For now,
 	 * only fields that are know to not cause problems are preserved. */
 
-	tmp_state = crtc_state->base;
 	scaler_state = crtc_state->scaler_state;
 	shared_dpll = crtc_state->shared_dpll;
 	dpll_hw_state = crtc_state->dpll_hw_state;
@@ -11293,9 +11299,11 @@ clear_intel_crtc_state(struct intel_crtc_state *crtc_state)
 	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
 		wm_state = crtc_state->wm;
 
-	memset(crtc_state, 0, sizeof *crtc_state);
+	/* Keep base drm_crtc_state intact, only clear our extended struct */
+	BUILD_BUG_ON(offsetof(struct intel_crtc_state, base));
+	memset(&crtc_state->base + 1, 0,
+	       sizeof(*crtc_state) - sizeof(crtc_state->base));
 
-	crtc_state->base = tmp_state;
 	crtc_state->scaler_state = scaler_state;
 	crtc_state->shared_dpll = shared_dpll;
 	crtc_state->dpll_hw_state = dpll_hw_state;
@@ -11351,7 +11359,7 @@ intel_modeset_pipe_config(struct drm_crtc *crtc,
 			       &pipe_config->pipe_src_w,
 			       &pipe_config->pipe_src_h);
 
-	for_each_connector_in_state(state, connector, connector_state, i) {
+	for_each_new_connector_in_state(state, connector, connector_state, i) {
 		if (connector_state->crtc != crtc)
 			continue;
 
@@ -11382,7 +11390,7 @@ encoder_retry:
 	 * adjust it according to limitations or connector properties, and also
 	 * a chance to reject the mode entirely.
 	 */
-	for_each_connector_in_state(state, connector, connector_state, i) {
+	for_each_new_connector_in_state(state, connector, connector_state, i) {
 		if (connector_state->crtc != crtc)
 			continue;
 
@@ -11434,16 +11442,16 @@ static void
 intel_modeset_update_crtc_state(struct drm_atomic_state *state)
 {
 	struct drm_crtc *crtc;
-	struct drm_crtc_state *crtc_state;
+	struct drm_crtc_state *new_crtc_state;
 	int i;
 
 	/* Double check state. */
-	for_each_crtc_in_state(state, crtc, crtc_state, i) {
-		to_intel_crtc(crtc)->config = to_intel_crtc_state(crtc->state);
+	for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
+		to_intel_crtc(crtc)->config = to_intel_crtc_state(new_crtc_state);
 
 		/* Update hwmode for vblank functions */
-		if (crtc->state->active)
-			crtc->hwmode = crtc->state->adjusted_mode;
+		if (new_crtc_state->active)
+			crtc->hwmode = new_crtc_state->adjusted_mode;
 		else
 			crtc->hwmode.crtc_clock = 0;
 
@@ -11916,47 +11924,55 @@ verify_connector_state(struct drm_device *dev,
 		       struct drm_crtc *crtc)
 {
 	struct drm_connector *connector;
-	struct drm_connector_state *old_conn_state;
+	struct drm_connector_state *new_conn_state;
 	int i;
 
-	for_each_connector_in_state(state, connector, old_conn_state, i) {
+	for_each_new_connector_in_state(state, connector, new_conn_state, i) {
 		struct drm_encoder *encoder = connector->encoder;
-		struct drm_connector_state *state = connector->state;
 
-		if (state->crtc != crtc)
+		if (new_conn_state->crtc != crtc)
 			continue;
 
 		intel_connector_verify_state(to_intel_connector(connector));
 
-		I915_STATE_WARN(state->best_encoder != encoder,
+		I915_STATE_WARN(new_conn_state->best_encoder != encoder,
 		     "connector's atomic encoder doesn't match legacy encoder\n");
 	}
 }
 
 static void
-verify_encoder_state(struct drm_device *dev)
+verify_encoder_state(struct drm_device *dev, struct drm_atomic_state *state)
 {
 	struct intel_encoder *encoder;
-	struct intel_connector *connector;
+	struct drm_connector *connector;
+	struct drm_connector_state *old_conn_state, *new_conn_state;
+	int i;
 
 	for_each_intel_encoder(dev, encoder) {
-		bool enabled = false;
+		bool enabled = false, found = false;
 		enum pipe pipe;
 
 		DRM_DEBUG_KMS("[ENCODER:%d:%s]\n",
 			      encoder->base.base.id,
 			      encoder->base.name);
 
-		for_each_intel_connector(dev, connector) {
-			if (connector->base.state->best_encoder != &encoder->base)
+		for_each_oldnew_connector_in_state(state, connector, old_conn_state,
+						   new_conn_state, i) {
+			if (old_conn_state->best_encoder == &encoder->base)
+				found = true;
+
+			if (new_conn_state->best_encoder != &encoder->base)
 				continue;
-			enabled = true;
+			found = enabled = true;
 
-			I915_STATE_WARN(connector->base.state->crtc !=
+			I915_STATE_WARN(new_conn_state->crtc !=
 					encoder->base.crtc,
 			     "connector's crtc doesn't match encoder crtc\n");
 		}
 
+		if (!found)
+			continue;
+
 		I915_STATE_WARN(!!encoder->base.crtc != enabled,
 		     "encoder's enabled state mismatch "
 		     "(expected %i, found %i)\n",
@@ -12157,7 +12173,7 @@ static void
 intel_modeset_verify_disabled(struct drm_device *dev,
 			      struct drm_atomic_state *state)
 {
-	verify_encoder_state(dev);
+	verify_encoder_state(dev, state);
 	verify_connector_state(dev, state, NULL);
 	verify_disabled_dpll_state(dev);
 }
@@ -12205,21 +12221,21 @@ static void intel_modeset_clear_plls(struct drm_atomic_state *state)
 	struct drm_device *dev = state->dev;
 	struct drm_i915_private *dev_priv = to_i915(dev);
 	struct drm_crtc *crtc;
-	struct drm_crtc_state *crtc_state;
+	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
 	int i;
 
 	if (!dev_priv->display.crtc_compute_clock)
 		return;
 
-	for_each_crtc_in_state(state, crtc, crtc_state, i) {
+	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
 		struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 		struct intel_shared_dpll *old_dpll =
-			to_intel_crtc_state(crtc->state)->shared_dpll;
+			to_intel_crtc_state(old_crtc_state)->shared_dpll;
 
-		if (!needs_modeset(crtc_state))
+		if (!needs_modeset(new_crtc_state))
 			continue;
 
-		to_intel_crtc_state(crtc_state)->shared_dpll = NULL;
+		to_intel_crtc_state(new_crtc_state)->shared_dpll = NULL;
 
 		if (!old_dpll)
 			continue;
@@ -12245,7 +12261,7 @@ static int haswell_mode_set_planes_workaround(struct drm_atomic_state *state)
 	int i;
 
 	/* look at all crtc's that are going to be enabled in during modeset */
-	for_each_crtc_in_state(state, crtc, crtc_state, i) {
+	for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
 		intel_crtc = to_intel_crtc(crtc);
 
 		if (!crtc_state->active || !needs_modeset(crtc_state))
@@ -12347,7 +12363,7 @@ static int intel_modeset_checks(struct drm_atomic_state *state)
 	struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
 	struct drm_i915_private *dev_priv = to_i915(state->dev);
 	struct drm_crtc *crtc;
-	struct drm_crtc_state *crtc_state;
+	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
 	int ret = 0, i;
 
 	if (!check_digital_port_conflicts(state)) {
@@ -12360,13 +12376,13 @@ static int intel_modeset_checks(struct drm_atomic_state *state)
 	intel_state->cdclk.logical = dev_priv->cdclk.logical;
 	intel_state->cdclk.actual = dev_priv->cdclk.actual;
 
-	for_each_crtc_in_state(state, crtc, crtc_state, i) {
-		if (crtc_state->active)
+	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
+		if (new_crtc_state->active)
 			intel_state->active_crtcs |= 1 << i;
 		else
 			intel_state->active_crtcs &= ~(1 << i);
 
-		if (crtc_state->active != crtc->state->active)
+		if (old_crtc_state->active != new_crtc_state->active)
 			intel_state->active_pipe_changes |= drm_crtc_mask(crtc);
 	}
 
@@ -12445,7 +12461,7 @@ static int intel_atomic_check(struct drm_device *dev,
 	struct drm_i915_private *dev_priv = to_i915(dev);
 	struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
 	struct drm_crtc *crtc;
-	struct drm_crtc_state *crtc_state;
+	struct drm_crtc_state *old_crtc_state, *crtc_state;
 	int ret, i;
 	bool any_ms = false;
 
@@ -12453,12 +12469,12 @@ static int intel_atomic_check(struct drm_device *dev,
 	if (ret)
 		return ret;
 
-	for_each_crtc_in_state(state, crtc, crtc_state, i) {
+	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, crtc_state, i) {
 		struct intel_crtc_state *pipe_config =
 			to_intel_crtc_state(crtc_state);
 
 		/* Catch I915_MODE_FLAG_INHERITED */
-		if (crtc_state->mode.private_flags != crtc->state->mode.private_flags)
+		if (crtc_state->mode.private_flags != old_crtc_state->mode.private_flags)
 			crtc_state->mode_changed = true;
 
 		if (!needs_modeset(crtc_state))
@@ -12485,10 +12501,10 @@ static int intel_atomic_check(struct drm_device *dev,
 
 		if (i915.fastboot &&
 		    intel_pipe_config_compare(dev_priv,
-					to_intel_crtc_state(crtc->state),
+					to_intel_crtc_state(old_crtc_state),
 					pipe_config, true)) {
 			crtc_state->mode_changed = false;
-			to_intel_crtc_state(crtc_state)->update_pipe = true;
+			pipe_config->update_pipe = true;
 		}
 
 		if (needs_modeset(crtc_state))
@@ -12528,7 +12544,7 @@ static int intel_atomic_prepare_commit(struct drm_device *dev,
 	struct drm_crtc *crtc;
 	int i, ret;
 
-	for_each_crtc_in_state(state, crtc, crtc_state, i) {
+	for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
 		if (state->legacy_cursor_update)
 			continue;
 
@@ -12625,19 +12641,21 @@ static bool needs_vblank_wait(struct intel_crtc_state *crtc_state)
 static void intel_update_crtc(struct drm_crtc *crtc,
 			      struct drm_atomic_state *state,
 			      struct drm_crtc_state *old_crtc_state,
+			      struct drm_crtc_state *new_crtc_state,
 			      unsigned int *crtc_vblank_mask)
 {
 	struct drm_device *dev = crtc->dev;
 	struct drm_i915_private *dev_priv = to_i915(dev);
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-	struct intel_crtc_state *pipe_config = to_intel_crtc_state(crtc->state);
-	bool modeset = needs_modeset(crtc->state);
+	struct intel_crtc_state *pipe_config = to_intel_crtc_state(new_crtc_state);
+	bool modeset = needs_modeset(new_crtc_state);
 
 	if (modeset) {
 		update_scanline_offset(intel_crtc);
 		dev_priv->display.crtc_enable(pipe_config, state);
 	} else {
-		intel_pre_plane_update(to_intel_crtc_state(old_crtc_state));
+		intel_pre_plane_update(to_intel_crtc_state(old_crtc_state),
+				       pipe_config);
 	}
 
 	if (drm_atomic_get_existing_plane_state(state, crtc->primary)) {
@@ -12656,15 +12674,15 @@ static void intel_update_crtcs(struct drm_atomic_state *state,
 			       unsigned int *crtc_vblank_mask)
 {
 	struct drm_crtc *crtc;
-	struct drm_crtc_state *old_crtc_state;
+	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
 	int i;
 
-	for_each_crtc_in_state(state, crtc, old_crtc_state, i) {
-		if (!crtc->state->active)
+	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
+		if (!new_crtc_state->active)
 			continue;
 
 		intel_update_crtc(crtc, state, old_crtc_state,
-				  crtc_vblank_mask);
+				  new_crtc_state, crtc_vblank_mask);
 	}
 }
 
@@ -12675,7 +12693,7 @@ static void skl_update_crtcs(struct drm_atomic_state *state,
 	struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
 	struct drm_crtc *crtc;
 	struct intel_crtc *intel_crtc;
-	struct drm_crtc_state *old_crtc_state;
+	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
 	struct intel_crtc_state *cstate;
 	unsigned int updated = 0;
 	bool progress;
@@ -12684,9 +12702,9 @@ static void skl_update_crtcs(struct drm_atomic_state *state,
 
 	const struct skl_ddb_entry *entries[I915_MAX_PIPES] = {};
 
-	for_each_crtc_in_state(state, crtc, old_crtc_state, i)
+	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i)
 		/* ignore allocations for crtc's that have been turned off. */
-		if (crtc->state->active)
+		if (new_crtc_state->active)
 			entries[i] = &to_intel_crtc_state(old_crtc_state)->wm.skl.ddb;
 
 	/*
@@ -12698,7 +12716,7 @@ static void skl_update_crtcs(struct drm_atomic_state *state,
 	do {
 		progress = false;
 
-		for_each_crtc_in_state(state, crtc, old_crtc_state, i) {
+		for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
 			bool vbl_wait = false;
 			unsigned int cmask = drm_crtc_mask(crtc);
 
@@ -12723,12 +12741,12 @@ static void skl_update_crtcs(struct drm_atomic_state *state,
 			 */
 			if (!skl_ddb_entry_equal(&cstate->wm.skl.ddb,
 						 &to_intel_crtc_state(old_crtc_state)->wm.skl.ddb) &&
-			    !crtc->state->active_changed &&
+			    !new_crtc_state->active_changed &&
 			    intel_state->wm_results.dirty_pipes != updated)
 				vbl_wait = true;
 
 			intel_update_crtc(crtc, state, old_crtc_state,
-					  crtc_vblank_mask);
+					  new_crtc_state, crtc_vblank_mask);
 
 			if (vbl_wait)
 				intel_wait_for_vblank(dev_priv, pipe);
@@ -12761,7 +12779,7 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state)
 	struct drm_device *dev = state->dev;
 	struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
 	struct drm_i915_private *dev_priv = to_i915(dev);
-	struct drm_crtc_state *old_crtc_state;
+	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
 	struct drm_crtc *crtc;
 	struct intel_crtc_state *intel_cstate;
 	bool hw_check = intel_state->modeset;
@@ -12774,22 +12792,23 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state)
 	if (intel_state->modeset)
 		intel_display_power_get(dev_priv, POWER_DOMAIN_MODESET);
 
-	for_each_crtc_in_state(state, crtc, old_crtc_state, i) {
+	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
 		struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 
-		if (needs_modeset(crtc->state) ||
-		    to_intel_crtc_state(crtc->state)->update_pipe) {
+		if (needs_modeset(new_crtc_state) ||
+		    to_intel_crtc_state(new_crtc_state)->update_pipe) {
 			hw_check = true;
 
 			put_domains[to_intel_crtc(crtc)->pipe] =
 				modeset_get_crtc_power_domains(crtc,
-					to_intel_crtc_state(crtc->state));
+					to_intel_crtc_state(new_crtc_state));
 		}
 
-		if (!needs_modeset(crtc->state))
+		if (!needs_modeset(new_crtc_state))
 			continue;
 
-		intel_pre_plane_update(to_intel_crtc_state(old_crtc_state));
+		intel_pre_plane_update(to_intel_crtc_state(old_crtc_state),
+				       to_intel_crtc_state(new_crtc_state));
 
 		if (old_crtc_state->active) {
 			intel_crtc_disable_planes(crtc, old_crtc_state->plane_mask);
@@ -12839,16 +12858,16 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state)
 	}
 
 	/* Complete the events for pipes that have now been disabled */
-	for_each_crtc_in_state(state, crtc, old_crtc_state, i) {
-		bool modeset = needs_modeset(crtc->state);
+	for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
+		bool modeset = needs_modeset(new_crtc_state);
 
 		/* Complete events for now disable pipes here. */
-		if (modeset && !crtc->state->active && crtc->state->event) {
+		if (modeset && !new_crtc_state->active && new_crtc_state->event) {
 			spin_lock_irq(&dev->event_lock);
-			drm_crtc_send_vblank_event(crtc, crtc->state->event);
+			drm_crtc_send_vblank_event(crtc, new_crtc_state->event);
 			spin_unlock_irq(&dev->event_lock);
 
-			crtc->state->event = NULL;
+			new_crtc_state->event = NULL;
 		}
 	}
 
@@ -12874,21 +12893,21 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state)
 	 *
 	 * TODO: Move this (and other cleanup) to an async worker eventually.
 	 */
-	for_each_crtc_in_state(state, crtc, old_crtc_state, i) {
-		intel_cstate = to_intel_crtc_state(crtc->state);
+	for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
+		intel_cstate = to_intel_crtc_state(new_crtc_state);
 
 		if (dev_priv->display.optimize_watermarks)
 			dev_priv->display.optimize_watermarks(intel_state,
 							      intel_cstate);
 	}
 
-	for_each_crtc_in_state(state, crtc, old_crtc_state, i) {
+	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
 		intel_post_plane_update(to_intel_crtc_state(old_crtc_state));
 
 		if (put_domains[i])
 			modeset_put_power_domains(dev_priv, put_domains[i]);
 
-		intel_modeset_verify_crtc(crtc, state, old_crtc_state, crtc->state);
+		intel_modeset_verify_crtc(crtc, state, old_crtc_state, new_crtc_state);
 	}
 
 	if (intel_state->modeset && intel_can_enable_sagv(state))
@@ -12960,13 +12979,13 @@ intel_atomic_commit_ready(struct i915_sw_fence *fence,
 
 static void intel_atomic_track_fbs(struct drm_atomic_state *state)
 {
-	struct drm_plane_state *old_plane_state;
+	struct drm_plane_state *old_plane_state, *new_plane_state;
 	struct drm_plane *plane;
 	int i;
 
-	for_each_plane_in_state(state, plane, old_plane_state, i)
+	for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i)
 		i915_gem_track_fb(intel_fb_obj(old_plane_state->fb),
-				  intel_fb_obj(plane->state->fb),
+				  intel_fb_obj(new_plane_state->fb),
 				  to_intel_plane(plane)->frontbuffer_bit);
 }
 
@@ -13344,17 +13363,19 @@ static void intel_begin_crtc_commit(struct drm_crtc *crtc,
 		to_intel_atomic_state(old_crtc_state->state);
 	bool modeset = needs_modeset(crtc->state);
 
+	if (!modeset &&
+	    (intel_cstate->base.color_mgmt_changed ||
+	     intel_cstate->update_pipe)) {
+		intel_color_set_csc(crtc->state);
+		intel_color_load_luts(crtc->state);
+	}
+
 	/* Perform vblank evasion around commit operation */
 	intel_pipe_update_start(intel_crtc);
 
 	if (modeset)
 		goto out;
 
-	if (crtc->state->color_mgmt_changed || to_intel_crtc_state(crtc->state)->update_pipe) {
-		intel_color_set_csc(crtc->state);
-		intel_color_load_luts(crtc->state);
-	}
-
 	if (intel_cstate->update_pipe)
 		intel_update_pipe_config(intel_crtc, old_intel_cstate);
 	else if (INTEL_GEN(dev_priv) >= 9)
@@ -13917,15 +13938,14 @@ fail:
 
 enum pipe intel_get_pipe_from_connector(struct intel_connector *connector)
 {
-	struct drm_encoder *encoder = connector->base.encoder;
 	struct drm_device *dev = connector->base.dev;
 
 	WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
 
-	if (!encoder || WARN_ON(!encoder->crtc))
+	if (!connector->base.state->crtc)
 		return INVALID_PIPE;
 
-	return to_intel_crtc(encoder->crtc)->pipe;
+	return to_intel_crtc(connector->base.state->crtc)->pipe;
 }
 
 int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
@@ -14326,14 +14346,14 @@ static int intel_framebuffer_init(struct intel_framebuffer *intel_fb,
 		 */
 		if (tiling != I915_TILING_NONE &&
 		    tiling != intel_fb_modifier_to_tiling(mode_cmd->modifier[0])) {
-			DRM_DEBUG("tiling_mode doesn't match fb modifier\n");
+			DRM_DEBUG_KMS("tiling_mode doesn't match fb modifier\n");
 			goto err;
 		}
 	} else {
 		if (tiling == I915_TILING_X) {
 			mode_cmd->modifier[0] = I915_FORMAT_MOD_X_TILED;
 		} else if (tiling == I915_TILING_Y) {
-			DRM_DEBUG("No Y tiling for legacy addfb\n");
+			DRM_DEBUG_KMS("No Y tiling for legacy addfb\n");
 			goto err;
 		}
 	}
@@ -14343,16 +14363,16 @@ static int intel_framebuffer_init(struct intel_framebuffer *intel_fb,
 	case I915_FORMAT_MOD_Y_TILED:
 	case I915_FORMAT_MOD_Yf_TILED:
 		if (INTEL_GEN(dev_priv) < 9) {
-			DRM_DEBUG("Unsupported tiling 0x%llx!\n",
-				  mode_cmd->modifier[0]);
+			DRM_DEBUG_KMS("Unsupported tiling 0x%llx!\n",
+				      mode_cmd->modifier[0]);
 			goto err;
 		}
 	case DRM_FORMAT_MOD_NONE:
 	case I915_FORMAT_MOD_X_TILED:
 		break;
 	default:
-		DRM_DEBUG("Unsupported fb modifier 0x%llx!\n",
-			  mode_cmd->modifier[0]);
+		DRM_DEBUG_KMS("Unsupported fb modifier 0x%llx!\n",
+			      mode_cmd->modifier[0]);
 		goto err;
 	}
 
@@ -14362,26 +14382,17 @@ static int intel_framebuffer_init(struct intel_framebuffer *intel_fb,
 	 */
 	if (INTEL_INFO(dev_priv)->gen < 4 &&
 	    tiling != intel_fb_modifier_to_tiling(mode_cmd->modifier[0])) {
-		DRM_DEBUG("tiling_mode must match fb modifier exactly on gen2/3\n");
-		goto err;
-	}
-
-	stride_alignment = intel_fb_stride_alignment(dev_priv,
-						     mode_cmd->modifier[0],
-						     mode_cmd->pixel_format);
-	if (mode_cmd->pitches[0] & (stride_alignment - 1)) {
-		DRM_DEBUG("pitch (%d) must be at least %u byte aligned\n",
-			  mode_cmd->pitches[0], stride_alignment);
+		DRM_DEBUG_KMS("tiling_mode must match fb modifier exactly on gen2/3\n");
 		goto err;
 	}
 
 	pitch_limit = intel_fb_pitch_limit(dev_priv, mode_cmd->modifier[0],
 					   mode_cmd->pixel_format);
 	if (mode_cmd->pitches[0] > pitch_limit) {
-		DRM_DEBUG("%s pitch (%u) must be at less than %d\n",
-			  mode_cmd->modifier[0] != DRM_FORMAT_MOD_NONE ?
-			  "tiled" : "linear",
-			  mode_cmd->pitches[0], pitch_limit);
+		DRM_DEBUG_KMS("%s pitch (%u) must be at most %d\n",
+			      mode_cmd->modifier[0] != DRM_FORMAT_MOD_NONE ?
+			      "tiled" : "linear",
+			      mode_cmd->pitches[0], pitch_limit);
 		goto err;
 	}
 
@@ -14389,9 +14400,9 @@ static int intel_framebuffer_init(struct intel_framebuffer *intel_fb,
 	 * If there's a fence, enforce that
 	 * the fb pitch and fence stride match.
 	 */
-	if (tiling != I915_TILING_NONE && mode_cmd->pitches[0] !=  stride) {
-		DRM_DEBUG("pitch (%d) must match tiling stride (%d)\n",
-			  mode_cmd->pitches[0], stride);
+	if (tiling != I915_TILING_NONE && mode_cmd->pitches[0] != stride) {
+		DRM_DEBUG_KMS("pitch (%d) must match tiling stride (%d)\n",
+			      mode_cmd->pitches[0], stride);
 		goto err;
 	}
 
@@ -14404,16 +14415,16 @@ static int intel_framebuffer_init(struct intel_framebuffer *intel_fb,
 		break;
 	case DRM_FORMAT_XRGB1555:
 		if (INTEL_GEN(dev_priv) > 3) {
-			DRM_DEBUG("unsupported pixel format: %s\n",
-			          drm_get_format_name(mode_cmd->pixel_format, &format_name));
+			DRM_DEBUG_KMS("unsupported pixel format: %s\n",
+				      drm_get_format_name(mode_cmd->pixel_format, &format_name));
 			goto err;
 		}
 		break;
 	case DRM_FORMAT_ABGR8888:
 		if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv) &&
 		    INTEL_GEN(dev_priv) < 9) {
-			DRM_DEBUG("unsupported pixel format: %s\n",
-			          drm_get_format_name(mode_cmd->pixel_format, &format_name));
+			DRM_DEBUG_KMS("unsupported pixel format: %s\n",
+				      drm_get_format_name(mode_cmd->pixel_format, &format_name));
 			goto err;
 		}
 		break;
@@ -14421,15 +14432,15 @@ static int intel_framebuffer_init(struct intel_framebuffer *intel_fb,
 	case DRM_FORMAT_XRGB2101010:
 	case DRM_FORMAT_XBGR2101010:
 		if (INTEL_GEN(dev_priv) < 4) {
-			DRM_DEBUG("unsupported pixel format: %s\n",
-			          drm_get_format_name(mode_cmd->pixel_format, &format_name));
+			DRM_DEBUG_KMS("unsupported pixel format: %s\n",
+				      drm_get_format_name(mode_cmd->pixel_format, &format_name));
 			goto err;
 		}
 		break;
 	case DRM_FORMAT_ABGR2101010:
 		if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv)) {
-			DRM_DEBUG("unsupported pixel format: %s\n",
-			          drm_get_format_name(mode_cmd->pixel_format, &format_name));
+			DRM_DEBUG_KMS("unsupported pixel format: %s\n",
+				      drm_get_format_name(mode_cmd->pixel_format, &format_name));
 			goto err;
 		}
 		break;
@@ -14438,14 +14449,14 @@ static int intel_framebuffer_init(struct intel_framebuffer *intel_fb,
 	case DRM_FORMAT_YVYU:
 	case DRM_FORMAT_VYUY:
 		if (INTEL_GEN(dev_priv) < 5) {
-			DRM_DEBUG("unsupported pixel format: %s\n",
-			          drm_get_format_name(mode_cmd->pixel_format, &format_name));
+			DRM_DEBUG_KMS("unsupported pixel format: %s\n",
+				      drm_get_format_name(mode_cmd->pixel_format, &format_name));
 			goto err;
 		}
 		break;
 	default:
-		DRM_DEBUG("unsupported pixel format: %s\n",
-		          drm_get_format_name(mode_cmd->pixel_format, &format_name));
+		DRM_DEBUG_KMS("unsupported pixel format: %s\n",
+			      drm_get_format_name(mode_cmd->pixel_format, &format_name));
 		goto err;
 	}
 
@@ -14455,6 +14466,14 @@ static int intel_framebuffer_init(struct intel_framebuffer *intel_fb,
 
 	drm_helper_mode_fill_fb_struct(&dev_priv->drm,
 				       &intel_fb->base, mode_cmd);
+
+	stride_alignment = intel_fb_stride_alignment(&intel_fb->base, 0);
+	if (mode_cmd->pitches[0] & (stride_alignment - 1)) {
+		DRM_DEBUG_KMS("pitch (%d) must be at least %u byte aligned\n",
+			      mode_cmd->pitches[0], stride_alignment);
+		goto err;
+	}
+
 	intel_fb->obj = obj;
 
 	ret = intel_fill_fb_info(dev_priv, &intel_fb->base);
@@ -14908,7 +14927,7 @@ retry:
 	}
 
 	/* Write calculated watermark values back */
-	for_each_crtc_in_state(state, crtc, cstate, i) {
+	for_each_new_crtc_in_state(state, crtc, cstate, i) {
 		struct intel_crtc_state *cs = to_intel_crtc_state(cstate);
 
 		cs->wm.need_postvbl_update = true;
@@ -15060,6 +15079,7 @@ int intel_modeset_init(struct drm_device *dev)
 static void intel_enable_pipe_a(struct drm_device *dev)
 {
 	struct intel_connector *connector;
+	struct drm_connector_list_iter conn_iter;
 	struct drm_connector *crt = NULL;
 	struct intel_load_detect_pipe load_detect_temp;
 	struct drm_modeset_acquire_ctx *ctx = dev->mode_config.acquire_ctx;
@@ -15067,12 +15087,14 @@ static void intel_enable_pipe_a(struct drm_device *dev)
 	/* We can't just switch on the pipe A, we need to set things up with a
 	 * proper mode and output configuration. As a gross hack, enable pipe A
 	 * by enabling the load detect pipe once. */
-	for_each_intel_connector(dev, connector) {
+	drm_connector_list_iter_begin(dev, &conn_iter);
+	for_each_intel_connector_iter(connector, &conn_iter) {
 		if (connector->encoder->type == INTEL_OUTPUT_ANALOG) {
 			crt = &connector->base;
 			break;
 		}
 	}
+	drm_connector_list_iter_end(&conn_iter);
 
 	if (!crt)
 		return;
@@ -15318,6 +15340,7 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
 	struct intel_crtc *crtc;
 	struct intel_encoder *encoder;
 	struct intel_connector *connector;
+	struct drm_connector_list_iter conn_iter;
 	int i;
 
 	dev_priv->active_crtcs = 0;
@@ -15388,7 +15411,8 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
 			      pipe_name(pipe));
 	}
 
-	for_each_intel_connector(dev, connector) {
+	drm_connector_list_iter_begin(dev, &conn_iter);
+	for_each_intel_connector_iter(connector, &conn_iter) {
 		if (connector->get_hw_state(connector)) {
 			connector->base.dpms = DRM_MODE_DPMS_ON;
 
@@ -15416,6 +15440,7 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
 			      connector->base.base.id, connector->base.name,
 			      enableddisabled(connector->base.encoder));
 	}
+	drm_connector_list_iter_end(&conn_iter);
 
 	for_each_intel_crtc(dev, crtc) {
 		struct intel_crtc_state *crtc_state =
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index 0f766f83a31b..51228fe4283b 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -1188,7 +1188,13 @@ void gen6_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask);
 void gen6_reset_rps_interrupts(struct drm_i915_private *dev_priv);
 void gen6_enable_rps_interrupts(struct drm_i915_private *dev_priv);
 void gen6_disable_rps_interrupts(struct drm_i915_private *dev_priv);
-u32 gen6_sanitize_rps_pm_mask(struct drm_i915_private *dev_priv, u32 mask);
+
+static inline u32 gen6_sanitize_rps_pm_mask(const struct drm_i915_private *i915,
+					    u32 mask)
+{
+	return mask & ~i915->rps.pm_intrmsk_mbz;
+}
+
 void intel_runtime_pm_disable_interrupts(struct drm_i915_private *dev_priv);
 void intel_runtime_pm_enable_interrupts(struct drm_i915_private *dev_priv);
 static inline bool intel_irqs_enabled(struct drm_i915_private *dev_priv)
@@ -1239,8 +1245,6 @@ bool intel_ddi_is_audio_enabled(struct drm_i915_private *dev_priv,
 				 struct intel_crtc *intel_crtc);
 void intel_ddi_get_config(struct intel_encoder *encoder,
 			  struct intel_crtc_state *pipe_config);
-struct intel_encoder *
-intel_ddi_get_crtc_new_encoder(struct intel_crtc_state *crtc_state);
 
 void intel_ddi_init_dp_buf_reg(struct intel_encoder *encoder);
 void intel_ddi_clock_get(struct intel_encoder *encoder,
@@ -1250,12 +1254,8 @@ void intel_ddi_set_vc_payload_alloc(const struct intel_crtc_state *crtc_state,
 uint32_t ddi_signal_levels(struct intel_dp *intel_dp);
 u8 intel_ddi_dp_voltage_max(struct intel_encoder *encoder);
 
-unsigned int intel_fb_align_height(struct drm_i915_private *dev_priv,
-				   unsigned int height,
-				   uint32_t pixel_format,
-				   uint64_t fb_format_modifier);
-u32 intel_fb_stride_alignment(const struct drm_i915_private *dev_priv,
-			      uint64_t fb_modifier, uint32_t pixel_format);
+unsigned int intel_fb_align_height(const struct drm_framebuffer *fb,
+				   int plane, unsigned int height);
 
 /* intel_audio.c */
 void intel_init_audio_hooks(struct drm_i915_private *dev_priv);
@@ -1269,6 +1269,10 @@ void intel_audio_init(struct drm_i915_private *dev_priv);
 void intel_audio_deinit(struct drm_i915_private *dev_priv);
 
 /* intel_cdclk.c */
+void skl_init_cdclk(struct drm_i915_private *dev_priv);
+void skl_uninit_cdclk(struct drm_i915_private *dev_priv);
+void bxt_init_cdclk(struct drm_i915_private *dev_priv);
+void bxt_uninit_cdclk(struct drm_i915_private *dev_priv);
 void intel_init_cdclk_hooks(struct drm_i915_private *dev_priv);
 void intel_update_max_cdclk(struct drm_i915_private *dev_priv);
 void intel_update_cdclk(struct drm_i915_private *dev_priv);
@@ -1380,9 +1384,6 @@ int intel_plane_atomic_set_property(struct drm_plane *plane,
 int intel_plane_atomic_calc_changes(struct drm_crtc_state *crtc_state,
 				    struct drm_plane_state *plane_state);
 
-unsigned int intel_tile_height(const struct drm_i915_private *dev_priv,
-			       uint64_t fb_modifier, unsigned int cpp);
-
 void assert_pch_transcoder_disabled(struct drm_i915_private *dev_priv,
 				    enum pipe pipe);
 
@@ -1414,14 +1415,10 @@ void intel_prepare_reset(struct drm_i915_private *dev_priv);
 void intel_finish_reset(struct drm_i915_private *dev_priv);
 void hsw_enable_pc8(struct drm_i915_private *dev_priv);
 void hsw_disable_pc8(struct drm_i915_private *dev_priv);
-void bxt_init_cdclk(struct drm_i915_private *dev_priv);
-void bxt_uninit_cdclk(struct drm_i915_private *dev_priv);
 void gen9_sanitize_dc_state(struct drm_i915_private *dev_priv);
 void bxt_enable_dc9(struct drm_i915_private *dev_priv);
 void bxt_disable_dc9(struct drm_i915_private *dev_priv);
 void gen9_enable_dc5(struct drm_i915_private *dev_priv);
-void skl_init_cdclk(struct drm_i915_private *dev_priv);
-void skl_uninit_cdclk(struct drm_i915_private *dev_priv);
 unsigned int skl_cdclk_get_vco(unsigned int freq);
 void skl_enable_dc6(struct drm_i915_private *dev_priv);
 void skl_disable_dc6(struct drm_i915_private *dev_priv);
diff --git a/drivers/gpu/drm/i915/intel_dsi.c b/drivers/gpu/drm/i915/intel_dsi.c
index 323fc097c3ee..3ffe8b1f1d48 100644
--- a/drivers/gpu/drm/i915/intel_dsi.c
+++ b/drivers/gpu/drm/i915/intel_dsi.c
@@ -28,7 +28,6 @@
 #include <drm/drm_crtc.h>
 #include <drm/drm_edid.h>
 #include <drm/i915_drm.h>
-#include <drm/drm_panel.h>
 #include <drm/drm_mipi_dsi.h>
 #include <linux/slab.h>
 #include <linux/gpio/consumer.h>
@@ -36,16 +35,6 @@
 #include "intel_drv.h"
 #include "intel_dsi.h"
 
-static const struct {
-	u16 panel_id;
-	struct drm_panel * (*init)(struct intel_dsi *intel_dsi, u16 panel_id);
-} intel_dsi_drivers[] = {
-	{
-		.panel_id = MIPI_DSI_GENERIC_PANEL_ID,
-		.init = vbt_panel_init,
-	},
-};
-
 /* return pixels in terms of txbyteclkhs */
 static u16 txbyteclkhs(u16 pixels, int bpp, int lane_count,
 		       u16 burst_mode_ratio)
@@ -817,55 +806,58 @@ static void intel_dsi_pre_enable(struct intel_encoder *encoder,
 	/* Power on, try both CRC pmic gpio and VBT */
 	if (intel_dsi->gpio_panel)
 		gpiod_set_value_cansleep(intel_dsi->gpio_panel, 1);
-	intel_dsi_exec_vbt_sequence(intel_dsi, MIPI_SEQ_POWER_ON);
+	intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_POWER_ON);
 	intel_dsi_msleep(intel_dsi, intel_dsi->panel_on_delay);
 
 	/* Deassert reset */
-	intel_dsi_exec_vbt_sequence(intel_dsi, MIPI_SEQ_DEASSERT_RESET);
+	intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_DEASSERT_RESET);
 
 	/* Put device in ready state (LP-11) */
 	intel_dsi_device_ready(encoder);
 
 	/* Send initialization commands in LP mode */
-	intel_dsi_exec_vbt_sequence(intel_dsi, MIPI_SEQ_INIT_OTP);
+	intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_INIT_OTP);
 
 	/* Enable port in pre-enable phase itself because as per hw team
 	 * recommendation, port should be enabled befor plane & pipe */
 	if (is_cmd_mode(intel_dsi)) {
 		for_each_dsi_port(port, intel_dsi->ports)
 			I915_WRITE(MIPI_MAX_RETURN_PKT_SIZE(port), 8 * 4);
-		intel_dsi_exec_vbt_sequence(intel_dsi, MIPI_SEQ_TEAR_ON);
-		intel_dsi_exec_vbt_sequence(intel_dsi, MIPI_SEQ_DISPLAY_ON);
+		intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_TEAR_ON);
+		intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_DISPLAY_ON);
 	} else {
 		msleep(20); /* XXX */
 		for_each_dsi_port(port, intel_dsi->ports)
 			dpi_send_cmd(intel_dsi, TURN_ON, false, port);
 		intel_dsi_msleep(intel_dsi, 100);
 
-		intel_dsi_exec_vbt_sequence(intel_dsi, MIPI_SEQ_DISPLAY_ON);
+		intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_DISPLAY_ON);
 
 		intel_dsi_port_enable(encoder);
 	}
 
 	intel_panel_enable_backlight(intel_dsi->attached_connector);
-	intel_dsi_exec_vbt_sequence(intel_dsi, MIPI_SEQ_BACKLIGHT_ON);
+	intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_BACKLIGHT_ON);
 }
 
+/*
+ * DSI port enable has to be done before pipe and plane enable, so we do it in
+ * the pre_enable hook.
+ */
 static void intel_dsi_enable_nop(struct intel_encoder *encoder,
 				 struct intel_crtc_state *pipe_config,
 				 struct drm_connector_state *conn_state)
 {
 	DRM_DEBUG_KMS("\n");
-
-	/* for DSI port enable has to be done before pipe
-	 * and plane enable, so port enable is done in
-	 * pre_enable phase itself unlike other encoders
-	 */
 }
 
-static void intel_dsi_pre_disable(struct intel_encoder *encoder,
-				  struct intel_crtc_state *old_crtc_state,
-				  struct drm_connector_state *old_conn_state)
+/*
+ * DSI port disable has to be done after pipe and plane disable, so we do it in
+ * the post_disable hook.
+ */
+static void intel_dsi_disable(struct intel_encoder *encoder,
+			      struct intel_crtc_state *old_crtc_state,
+			      struct drm_connector_state *old_conn_state)
 {
 	struct drm_device *dev = encoder->base.dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
@@ -874,7 +866,7 @@ static void intel_dsi_pre_disable(struct intel_encoder *encoder,
 
 	DRM_DEBUG_KMS("\n");
 
-	intel_dsi_exec_vbt_sequence(intel_dsi, MIPI_SEQ_BACKLIGHT_OFF);
+	intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_BACKLIGHT_OFF);
 	intel_panel_disable_backlight(intel_dsi->attached_connector);
 
 	/*
@@ -936,8 +928,8 @@ static void intel_dsi_post_disable(struct intel_encoder *encoder,
 	 * some next enable sequence send turn on packet error is observed
 	 */
 	if (is_cmd_mode(intel_dsi))
-		intel_dsi_exec_vbt_sequence(intel_dsi, MIPI_SEQ_TEAR_OFF);
-	intel_dsi_exec_vbt_sequence(intel_dsi, MIPI_SEQ_DISPLAY_OFF);
+		intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_TEAR_OFF);
+	intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_DISPLAY_OFF);
 
 	/* Transition to LP-00 */
 	intel_dsi_clear_device_ready(encoder);
@@ -964,11 +956,11 @@ static void intel_dsi_post_disable(struct intel_encoder *encoder,
 	}
 
 	/* Assert reset */
-	intel_dsi_exec_vbt_sequence(intel_dsi, MIPI_SEQ_ASSERT_RESET);
+	intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_ASSERT_RESET);
 
 	/* Power off, try both CRC pmic gpio and VBT */
 	intel_dsi_msleep(intel_dsi, intel_dsi->panel_off_delay);
-	intel_dsi_exec_vbt_sequence(intel_dsi, MIPI_SEQ_POWER_OFF);
+	intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_POWER_OFF);
 	if (intel_dsi->gpio_panel)
 		gpiod_set_value_cansleep(intel_dsi->gpio_panel, 0);
 
@@ -1652,12 +1644,6 @@ static void intel_dsi_encoder_destroy(struct drm_encoder *encoder)
 {
 	struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
 
-	if (intel_dsi->panel) {
-		drm_panel_detach(intel_dsi->panel);
-		/* XXX: Logically this call belongs in the panel driver. */
-		drm_panel_remove(intel_dsi->panel);
-	}
-
 	/* dispose of the gpios */
 	if (intel_dsi->gpio_panel)
 		gpiod_put(intel_dsi->gpio_panel);
@@ -1709,7 +1695,6 @@ void intel_dsi_init(struct drm_i915_private *dev_priv)
 	struct drm_connector *connector;
 	struct drm_display_mode *scan, *fixed_mode = NULL;
 	enum port port;
-	unsigned int i;
 
 	DRM_DEBUG_KMS("\n");
 
@@ -1748,7 +1733,7 @@ void intel_dsi_init(struct drm_i915_private *dev_priv)
 	intel_encoder->compute_config = intel_dsi_compute_config;
 	intel_encoder->pre_enable = intel_dsi_pre_enable;
 	intel_encoder->enable = intel_dsi_enable_nop;
-	intel_encoder->disable = intel_dsi_pre_disable;
+	intel_encoder->disable = intel_dsi_disable;
 	intel_encoder->post_disable = intel_dsi_post_disable;
 	intel_encoder->get_hw_state = intel_dsi_get_hw_state;
 	intel_encoder->get_config = intel_dsi_get_config;
@@ -1816,14 +1801,7 @@ void intel_dsi_init(struct drm_i915_private *dev_priv)
 		intel_dsi->dsi_hosts[port] = host;
 	}
 
-	for (i = 0; i < ARRAY_SIZE(intel_dsi_drivers); i++) {
-		intel_dsi->panel = intel_dsi_drivers[i].init(intel_dsi,
-							     intel_dsi_drivers[i].panel_id);
-		if (intel_dsi->panel)
-			break;
-	}
-
-	if (!intel_dsi->panel) {
+	if (!intel_dsi_vbt_init(intel_dsi, MIPI_DSI_GENERIC_PANEL_ID)) {
 		DRM_DEBUG_KMS("no device found\n");
 		goto err;
 	}
@@ -1857,10 +1835,8 @@ void intel_dsi_init(struct drm_i915_private *dev_priv)
 
 	intel_connector_attach_encoder(intel_connector, intel_encoder);
 
-	drm_panel_attach(intel_dsi->panel, connector);
-
 	mutex_lock(&dev->mode_config.mutex);
-	drm_panel_get_modes(intel_dsi->panel);
+	intel_dsi_vbt_get_modes(intel_dsi);
 	list_for_each_entry(scan, &connector->probed_modes, head) {
 		if ((scan->type & DRM_MODE_TYPE_PREFERRED)) {
 			fixed_mode = drm_mode_duplicate(dev, scan);
diff --git a/drivers/gpu/drm/i915/intel_dsi.h b/drivers/gpu/drm/i915/intel_dsi.h
index 548649158abd..7afeb9580f41 100644
--- a/drivers/gpu/drm/i915/intel_dsi.h
+++ b/drivers/gpu/drm/i915/intel_dsi.h
@@ -39,7 +39,6 @@ struct intel_dsi_host;
 struct intel_dsi {
 	struct intel_encoder base;
 
-	struct drm_panel *panel;
 	struct intel_dsi_host *dsi_hosts[I915_MAX_PORTS];
 
 	/* GPIO Desc for CRC based Panel control */
@@ -130,11 +129,11 @@ static inline struct intel_dsi *enc_to_intel_dsi(struct drm_encoder *encoder)
 	return container_of(encoder, struct intel_dsi, base.base);
 }
 
+/* intel_dsi.c */
 void wait_for_dsi_fifo_empty(struct intel_dsi *intel_dsi, enum port port);
+enum mipi_dsi_pixel_format pixel_format_from_register_bits(u32 fmt);
 
-void intel_dsi_exec_vbt_sequence(struct intel_dsi *intel_dsi,
-				 enum mipi_seq seq_id);
-
+/* intel_dsi_pll.c */
 bool intel_dsi_pll_is_enabled(struct drm_i915_private *dev_priv);
 int intel_compute_dsi_pll(struct intel_encoder *encoder,
 			  struct intel_crtc_state *config);
@@ -146,7 +145,10 @@ u32 intel_dsi_get_pclk(struct intel_encoder *encoder, int pipe_bpp,
 void intel_dsi_reset_clocks(struct intel_encoder *encoder,
 			    enum port port);
 
-struct drm_panel *vbt_panel_init(struct intel_dsi *intel_dsi, u16 panel_id);
-enum mipi_dsi_pixel_format pixel_format_from_register_bits(u32 fmt);
+/* intel_dsi_vbt.c */
+bool intel_dsi_vbt_init(struct intel_dsi *intel_dsi, u16 panel_id);
+int intel_dsi_vbt_get_modes(struct intel_dsi *intel_dsi);
+void intel_dsi_vbt_exec_sequence(struct intel_dsi *intel_dsi,
+				 enum mipi_seq seq_id);
 
 #endif /* _INTEL_DSI_H */
diff --git a/drivers/gpu/drm/i915/intel_dsi_panel_vbt.c b/drivers/gpu/drm/i915/intel_dsi_vbt.c
index ab922b6eb36a..0dce7792643a 100644
--- a/drivers/gpu/drm/i915/intel_dsi_panel_vbt.c
+++ b/drivers/gpu/drm/i915/intel_dsi_vbt.c
@@ -28,7 +28,6 @@
 #include <drm/drm_crtc.h>
 #include <drm/drm_edid.h>
 #include <drm/i915_drm.h>
-#include <drm/drm_panel.h>
 #include <linux/gpio/consumer.h>
 #include <linux/slab.h>
 #include <video/mipi_display.h>
@@ -38,16 +37,6 @@
 #include "intel_drv.h"
 #include "intel_dsi.h"
 
-struct vbt_panel {
-	struct drm_panel panel;
-	struct intel_dsi *intel_dsi;
-};
-
-static inline struct vbt_panel *to_vbt_panel(struct drm_panel *panel)
-{
-	return container_of(panel, struct vbt_panel, panel);
-}
-
 #define MIPI_TRANSFER_MODE_SHIFT	0
 #define MIPI_VIRTUAL_CHANNEL_SHIFT	1
 #define MIPI_PORT_SHIFT			3
@@ -426,7 +415,7 @@ static const char *sequence_name(enum mipi_seq seq_id)
 		return "(unknown)";
 }
 
-void intel_dsi_exec_vbt_sequence(struct intel_dsi *intel_dsi,
+void intel_dsi_vbt_exec_sequence(struct intel_dsi *intel_dsi,
 				 enum mipi_seq seq_id)
 {
 	struct drm_i915_private *dev_priv = to_i915(intel_dsi->base.base.dev);
@@ -492,40 +481,31 @@ void intel_dsi_exec_vbt_sequence(struct intel_dsi *intel_dsi,
 	}
 }
 
-static int vbt_panel_get_modes(struct drm_panel *panel)
+int intel_dsi_vbt_get_modes(struct intel_dsi *intel_dsi)
 {
-	struct vbt_panel *vbt_panel = to_vbt_panel(panel);
-	struct intel_dsi *intel_dsi = vbt_panel->intel_dsi;
+	struct intel_connector *connector = intel_dsi->attached_connector;
 	struct drm_device *dev = intel_dsi->base.base.dev;
 	struct drm_i915_private *dev_priv = to_i915(dev);
 	struct drm_display_mode *mode;
 
-	if (!panel->connector)
-		return 0;
-
 	mode = drm_mode_duplicate(dev, dev_priv->vbt.lfp_lvds_vbt_mode);
 	if (!mode)
 		return 0;
 
 	mode->type |= DRM_MODE_TYPE_PREFERRED;
 
-	drm_mode_probed_add(panel->connector, mode);
+	drm_mode_probed_add(&connector->base, mode);
 
 	return 1;
 }
 
-static const struct drm_panel_funcs vbt_panel_funcs = {
-	.get_modes = vbt_panel_get_modes,
-};
-
-struct drm_panel *vbt_panel_init(struct intel_dsi *intel_dsi, u16 panel_id)
+bool intel_dsi_vbt_init(struct intel_dsi *intel_dsi, u16 panel_id)
 {
 	struct drm_device *dev = intel_dsi->base.base.dev;
 	struct drm_i915_private *dev_priv = to_i915(dev);
 	struct mipi_config *mipi_config = dev_priv->vbt.dsi.config;
 	struct mipi_pps_data *pps = dev_priv->vbt.dsi.pps;
 	struct drm_display_mode *mode = dev_priv->vbt.lfp_lvds_vbt_mode;
-	struct vbt_panel *vbt_panel;
 	u32 bpp;
 	u32 tlpx_ns, extra_byte_count, bitrate, tlpx_ui;
 	u32 ui_num, ui_den;
@@ -588,7 +568,7 @@ struct drm_panel *vbt_panel_init(struct intel_dsi *intel_dsi, u16 panel_id)
 			if (mipi_config->target_burst_mode_freq <
 								computed_ddr) {
 				DRM_ERROR("Burst mode freq is less than computed\n");
-				return NULL;
+				return false;
 			}
 
 			burst_mode_ratio = DIV_ROUND_UP(
@@ -598,7 +578,7 @@ struct drm_panel *vbt_panel_init(struct intel_dsi *intel_dsi, u16 panel_id)
 			pclk = DIV_ROUND_UP(pclk * burst_mode_ratio, 100);
 		} else {
 			DRM_ERROR("Burst mode target is not set\n");
-			return NULL;
+			return false;
 		}
 	} else
 		burst_mode_ratio = 100;
@@ -809,20 +789,10 @@ struct drm_panel *vbt_panel_init(struct intel_dsi *intel_dsi, u16 panel_id)
 	intel_dsi->panel_off_delay = pps->panel_off_delay / 10;
 	intel_dsi->panel_pwr_cycle_delay = pps->panel_power_cycle_delay / 10;
 
-	/* This is cheating a bit with the cleanup. */
-	vbt_panel = devm_kzalloc(dev->dev, sizeof(*vbt_panel), GFP_KERNEL);
-	if (!vbt_panel)
-		return NULL;
-
-	vbt_panel->intel_dsi = intel_dsi;
-	drm_panel_init(&vbt_panel->panel);
-	vbt_panel->panel.funcs = &vbt_panel_funcs;
-	drm_panel_add(&vbt_panel->panel);
-
 	/* a regular driver would get the device in probe */
 	for_each_dsi_port(port, intel_dsi->ports) {
 		mipi_dsi_attach(intel_dsi->dsi_hosts[port]->device);
 	}
 
-	return &vbt_panel->panel;
+	return true;
 }
diff --git a/drivers/gpu/drm/i915/intel_engine_cs.c b/drivers/gpu/drm/i915/intel_engine_cs.c
index 73fe718516a5..4200faa520c7 100644
--- a/drivers/gpu/drm/i915/intel_engine_cs.c
+++ b/drivers/gpu/drm/i915/intel_engine_cs.c
@@ -105,6 +105,8 @@ intel_engine_setup(struct drm_i915_private *dev_priv,
 	/* Nothing to do here, execute in order of dependencies */
 	engine->schedule = NULL;
 
+	ATOMIC_INIT_NOTIFIER_HEAD(&engine->context_status_notifier);
+
 	dev_priv->engine[id] = engine;
 	return 0;
 }
@@ -191,6 +193,7 @@ int intel_engines_init(struct drm_i915_private *dev_priv)
 			goto cleanup;
 		}
 
+		GEM_BUG_ON(!engine->submit_request);
 		mask |= ENGINE_MASK(id);
 	}
 
@@ -248,8 +251,7 @@ void intel_engine_init_global_seqno(struct intel_engine_cs *engine, u32 seqno)
 	}
 
 	intel_write_status_page(engine, I915_GEM_HWS_INDEX, seqno);
-	if (engine->irq_seqno_barrier)
-		engine->irq_seqno_barrier(engine);
+	clear_bit(ENGINE_IRQ_BREADCRUMB, &engine->irq_posted);
 
 	GEM_BUG_ON(i915_gem_active_isset(&engine->timeline->last_request));
 	engine->hangcheck.seqno = seqno;
@@ -342,6 +344,8 @@ int intel_engine_init_common(struct intel_engine_cs *engine)
 {
 	int ret;
 
+	engine->set_default_submission(engine);
+
 	/* We may need to do things with the shrinker which
 	 * require us to immediately switch back to the default
 	 * context. This can cause a problem as pinning the
@@ -1115,6 +1119,15 @@ bool intel_engines_are_idle(struct drm_i915_private *dev_priv)
 	return true;
 }
 
+void intel_engines_reset_default_submission(struct drm_i915_private *i915)
+{
+	struct intel_engine_cs *engine;
+	enum intel_engine_id id;
+
+	for_each_engine(engine, i915, id)
+		engine->set_default_submission(engine);
+}
+
 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
 #include "selftests/mock_engine.c"
 #endif
diff --git a/drivers/gpu/drm/i915/intel_fbc.c b/drivers/gpu/drm/i915/intel_fbc.c
index 17d418b23d77..ded2add18b26 100644
--- a/drivers/gpu/drm/i915/intel_fbc.c
+++ b/drivers/gpu/drm/i915/intel_fbc.c
@@ -1061,7 +1061,7 @@ void intel_fbc_choose_crtc(struct drm_i915_private *dev_priv,
 	 * plane. We could go for fancier schemes such as checking the plane
 	 * size, but this would just affect the few platforms that don't tie FBC
 	 * to pipe or plane A. */
-	for_each_plane_in_state(state, plane, plane_state, i) {
+	for_each_new_plane_in_state(state, plane, plane_state, i) {
 		struct intel_plane_state *intel_plane_state =
 			to_intel_plane_state(plane_state);
 		struct intel_crtc_state *intel_crtc_state;
diff --git a/drivers/gpu/drm/i915/intel_fbdev.c b/drivers/gpu/drm/i915/intel_fbdev.c
index f7e9a4e69595..332254a8eebe 100644
--- a/drivers/gpu/drm/i915/intel_fbdev.c
+++ b/drivers/gpu/drm/i915/intel_fbdev.c
@@ -619,9 +619,7 @@ static bool intel_fbdev_init_bios(struct drm_device *dev,
 		}
 
 		cur_size = intel_crtc->config->base.adjusted_mode.crtc_vdisplay;
-		cur_size = intel_fb_align_height(to_i915(dev), cur_size,
-						 fb->base.format->format,
-						 fb->base.modifier);
+		cur_size = intel_fb_align_height(&fb->base, 0, cur_size);
 		cur_size *= fb->base.pitches[0];
 		DRM_DEBUG_KMS("pipe %c area: %dx%d, bpp: %d, size: %d\n",
 			      pipe_name(intel_crtc->pipe),
diff --git a/drivers/gpu/drm/i915/intel_guc_loader.c b/drivers/gpu/drm/i915/intel_guc_loader.c
index 9885f760f2ef..2f270d02894c 100644
--- a/drivers/gpu/drm/i915/intel_guc_loader.c
+++ b/drivers/gpu/drm/i915/intel_guc_loader.c
@@ -26,7 +26,6 @@
  *    Dave Gordon <david.s.gordon@intel.com>
  *    Alex Dai <yu.dai@intel.com>
  */
-#include <linux/firmware.h>
 #include "i915_drv.h"
 #include "intel_uc.h"
 
@@ -91,70 +90,6 @@ const char *intel_uc_fw_status_repr(enum intel_uc_fw_status status)
 	}
 };
 
-static void guc_interrupts_release(struct drm_i915_private *dev_priv)
-{
-	struct intel_engine_cs *engine;
-	enum intel_engine_id id;
-	int irqs;
-
-	/* tell all command streamers NOT to forward interrupts or vblank to GuC */
-	irqs = _MASKED_FIELD(GFX_FORWARD_VBLANK_MASK, GFX_FORWARD_VBLANK_NEVER);
-	irqs |= _MASKED_BIT_DISABLE(GFX_INTERRUPT_STEERING);
-	for_each_engine(engine, dev_priv, id)
-		I915_WRITE(RING_MODE_GEN7(engine), irqs);
-
-	/* route all GT interrupts to the host */
-	I915_WRITE(GUC_BCS_RCS_IER, 0);
-	I915_WRITE(GUC_VCS2_VCS1_IER, 0);
-	I915_WRITE(GUC_WD_VECS_IER, 0);
-}
-
-static void guc_interrupts_capture(struct drm_i915_private *dev_priv)
-{
-	struct intel_engine_cs *engine;
-	enum intel_engine_id id;
-	int irqs;
-	u32 tmp;
-
-	/* tell all command streamers to forward interrupts (but not vblank) to GuC */
-	irqs = _MASKED_BIT_ENABLE(GFX_INTERRUPT_STEERING);
-	for_each_engine(engine, dev_priv, id)
-		I915_WRITE(RING_MODE_GEN7(engine), irqs);
-
-	/* route USER_INTERRUPT to Host, all others are sent to GuC. */
-	irqs = GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
-	       GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT;
-	/* These three registers have the same bit definitions */
-	I915_WRITE(GUC_BCS_RCS_IER, ~irqs);
-	I915_WRITE(GUC_VCS2_VCS1_IER, ~irqs);
-	I915_WRITE(GUC_WD_VECS_IER, ~irqs);
-
-	/*
-	 * The REDIRECT_TO_GUC bit of the PMINTRMSK register directs all
-	 * (unmasked) PM interrupts to the GuC. All other bits of this
-	 * register *disable* generation of a specific interrupt.
-	 *
-	 * 'pm_intr_keep' indicates bits that are NOT to be set when
-	 * writing to the PM interrupt mask register, i.e. interrupts
-	 * that must not be disabled.
-	 *
-	 * If the GuC is handling these interrupts, then we must not let
-	 * the PM code disable ANY interrupt that the GuC is expecting.
-	 * So for each ENABLED (0) bit in this register, we must SET the
-	 * bit in pm_intr_keep so that it's left enabled for the GuC.
-	 *
-	 * OTOH the REDIRECT_TO_GUC bit is initially SET in pm_intr_keep
-	 * (so interrupts go to the DISPLAY unit at first); but here we
-	 * need to CLEAR that bit, which will result in the register bit
-	 * being left SET!
-	 */
-	tmp = I915_READ(GEN6_PMINTRMSK);
-	if (tmp & GEN8_PMINTR_REDIRECT_TO_GUC) {
-		dev_priv->rps.pm_intr_keep |= ~tmp;
-		dev_priv->rps.pm_intr_keep &= ~GEN8_PMINTR_REDIRECT_TO_GUC;
-	}
-}
-
 static u32 get_gttype(struct drm_i915_private *dev_priv)
 {
 	/* XXX: GT type based on PCI device ID? field seems unused by fw */
@@ -409,378 +344,91 @@ static int guc_ucode_xfer(struct drm_i915_private *dev_priv)
 	return ret;
 }
 
-static int guc_hw_reset(struct drm_i915_private *dev_priv)
-{
-	int ret;
-	u32 guc_status;
-
-	ret = intel_guc_reset(dev_priv);
-	if (ret) {
-		DRM_ERROR("GuC reset failed, ret = %d\n", ret);
-		return ret;
-	}
-
-	guc_status = I915_READ(GUC_STATUS);
-	WARN(!(guc_status & GS_MIA_IN_RESET),
-	     "GuC status: 0x%x, MIA core expected to be in reset\n", guc_status);
-
-	return ret;
-}
-
 /**
- * intel_guc_setup() - finish preparing the GuC for activity
- * @dev_priv:	i915 device private
+ * intel_guc_init_hw() - finish preparing the GuC for activity
+ * @guc: intel_guc structure
  *
- * Called from gem_init_hw() during driver loading and also after a GPU reset.
+ * Called during driver loading and also after a GPU reset.
  *
  * The main action required here it to load the GuC uCode into the device.
  * The firmware image should have already been fetched into memory by the
- * earlier call to intel_guc_init(), so here we need only check that worked,
- * and then transfer the image to the h/w.
+ * earlier call to intel_guc_init(), so here we need only check that
+ * worked, and then transfer the image to the h/w.
  *
  * Return:	non-zero code on error
  */
-int intel_guc_setup(struct drm_i915_private *dev_priv)
+int intel_guc_init_hw(struct intel_guc *guc)
 {
-	struct intel_uc_fw *guc_fw = &dev_priv->guc.fw;
-	const char *fw_path = guc_fw->path;
-	int retries, ret, err;
+	struct drm_i915_private *dev_priv = guc_to_i915(guc);
+	const char *fw_path = guc->fw.path;
+	int ret;
 
 	DRM_DEBUG_DRIVER("GuC fw status: path %s, fetch %s, load %s\n",
 		fw_path,
-		intel_uc_fw_status_repr(guc_fw->fetch_status),
-		intel_uc_fw_status_repr(guc_fw->load_status));
-
-	/* Loading forbidden, or no firmware to load? */
-	if (!i915.enable_guc_loading) {
-		err = 0;
-		goto fail;
-	} else if (fw_path == NULL) {
-		/* Device is known to have no uCode (e.g. no GuC) */
-		err = -ENXIO;
-		goto fail;
-	} else if (*fw_path == '\0') {
-		/* Device has a GuC but we don't know what f/w to load? */
-		WARN(1, "No GuC firmware known for this platform!\n");
-		err = -ENODEV;
-		goto fail;
-	}
-
-	/* Fetch failed, or already fetched but failed to load? */
-	if (guc_fw->fetch_status != INTEL_UC_FIRMWARE_SUCCESS) {
-		err = -EIO;
-		goto fail;
-	} else if (guc_fw->load_status == INTEL_UC_FIRMWARE_FAIL) {
-		err = -ENOEXEC;
-		goto fail;
-	}
+		intel_uc_fw_status_repr(guc->fw.fetch_status),
+		intel_uc_fw_status_repr(guc->fw.load_status));
 
-	guc_interrupts_release(dev_priv);
-	gen9_reset_guc_interrupts(dev_priv);
+	if (guc->fw.fetch_status != INTEL_UC_FIRMWARE_SUCCESS)
+		return -EIO;
 
-	/* We need to notify the guc whenever we change the GGTT */
-	i915_ggtt_enable_guc(dev_priv);
-
-	guc_fw->load_status = INTEL_UC_FIRMWARE_PENDING;
+	guc->fw.load_status = INTEL_UC_FIRMWARE_PENDING;
 
 	DRM_DEBUG_DRIVER("GuC fw status: fetch %s, load %s\n",
-		intel_uc_fw_status_repr(guc_fw->fetch_status),
-		intel_uc_fw_status_repr(guc_fw->load_status));
-
-	err = i915_guc_submission_init(dev_priv);
-	if (err)
-		goto fail;
-
-	/*
-	 * WaEnableuKernelHeaderValidFix:skl,bxt
-	 * For BXT, this is only upto B0 but below WA is required for later
-	 * steppings also so this is extended as well.
-	 */
-	/* WaEnableGuCBootHashCheckNotSet:skl,bxt */
-	for (retries = 3; ; ) {
-		/*
-		 * Always reset the GuC just before (re)loading, so
-		 * that the state and timing are fairly predictable
-		 */
-		err = guc_hw_reset(dev_priv);
-		if (err)
-			goto fail;
-
-		intel_huc_load(dev_priv);
-		err = guc_ucode_xfer(dev_priv);
-		if (!err)
-			break;
-
-		if (--retries == 0)
-			goto fail;
-
-		DRM_INFO("GuC fw load failed: %d; will reset and "
-			 "retry %d more time(s)\n", err, retries);
-	}
+		intel_uc_fw_status_repr(guc->fw.fetch_status),
+		intel_uc_fw_status_repr(guc->fw.load_status));
 
-	guc_fw->load_status = INTEL_UC_FIRMWARE_SUCCESS;
+	ret = guc_ucode_xfer(dev_priv);
 
-	intel_guc_auth_huc(dev_priv);
+	if (ret)
+		return -EAGAIN;
 
-	if (i915.enable_guc_submission) {
-		if (i915.guc_log_level >= 0)
-			gen9_enable_guc_interrupts(dev_priv);
-
-		err = i915_guc_submission_enable(dev_priv);
-		if (err)
-			goto fail;
-		guc_interrupts_capture(dev_priv);
-	}
+	guc->fw.load_status = INTEL_UC_FIRMWARE_SUCCESS;
 
 	DRM_INFO("GuC %s (firmware %s [version %u.%u])\n",
 		 i915.enable_guc_submission ? "submission enabled" : "loaded",
-		 guc_fw->path,
-		 guc_fw->major_ver_found, guc_fw->minor_ver_found);
+		 guc->fw.path,
+		 guc->fw.major_ver_found, guc->fw.minor_ver_found);
 
 	return 0;
-
-fail:
-	if (guc_fw->load_status == INTEL_UC_FIRMWARE_PENDING)
-		guc_fw->load_status = INTEL_UC_FIRMWARE_FAIL;
-
-	guc_interrupts_release(dev_priv);
-	i915_guc_submission_disable(dev_priv);
-	i915_guc_submission_fini(dev_priv);
-	i915_ggtt_disable_guc(dev_priv);
-
-	/*
-	 * We've failed to load the firmware :(
-	 *
-	 * Decide whether to disable GuC submission and fall back to
-	 * execlist mode, and whether to hide the error by returning
-	 * zero or to return -EIO, which the caller will treat as a
-	 * nonfatal error (i.e. it doesn't prevent driver load, but
-	 * marks the GPU as wedged until reset).
-	 */
-	if (i915.enable_guc_loading > 1) {
-		ret = -EIO;
-	} else if (i915.enable_guc_submission > 1) {
-		ret = -EIO;
-	} else {
-		ret = 0;
-	}
-
-	if (err == 0 && !HAS_GUC_UCODE(dev_priv))
-		;	/* Don't mention the GuC! */
-	else if (err == 0)
-		DRM_INFO("GuC firmware load skipped\n");
-	else if (ret != -EIO)
-		DRM_NOTE("GuC firmware load failed: %d\n", err);
-	else
-		DRM_WARN("GuC firmware load failed: %d\n", err);
-
-	if (i915.enable_guc_submission) {
-		if (fw_path == NULL)
-			DRM_INFO("GuC submission without firmware not supported\n");
-		if (ret == 0)
-			DRM_NOTE("Falling back from GuC submission to execlist mode\n");
-		else
-			DRM_ERROR("GuC init failed: %d\n", ret);
-	}
-	i915.enable_guc_submission = 0;
-
-	return ret;
-}
-
-void intel_uc_fw_fetch(struct drm_i915_private *dev_priv,
-			 struct intel_uc_fw *uc_fw)
-{
-	struct pci_dev *pdev = dev_priv->drm.pdev;
-	struct drm_i915_gem_object *obj;
-	const struct firmware *fw = NULL;
-	struct uc_css_header *css;
-	size_t size;
-	int err;
-
-	DRM_DEBUG_DRIVER("before requesting firmware: uC fw fetch status %s\n",
-		intel_uc_fw_status_repr(uc_fw->fetch_status));
-
-	err = request_firmware(&fw, uc_fw->path, &pdev->dev);
-	if (err)
-		goto fail;
-	if (!fw)
-		goto fail;
-
-	DRM_DEBUG_DRIVER("fetch uC fw from %s succeeded, fw %p\n",
-		uc_fw->path, fw);
-
-	/* Check the size of the blob before examining buffer contents */
-	if (fw->size < sizeof(struct uc_css_header)) {
-		DRM_NOTE("Firmware header is missing\n");
-		goto fail;
-	}
-
-	css = (struct uc_css_header *)fw->data;
-
-	/* Firmware bits always start from header */
-	uc_fw->header_offset = 0;
-	uc_fw->header_size = (css->header_size_dw - css->modulus_size_dw -
-		css->key_size_dw - css->exponent_size_dw) * sizeof(u32);
-
-	if (uc_fw->header_size != sizeof(struct uc_css_header)) {
-		DRM_NOTE("CSS header definition mismatch\n");
-		goto fail;
-	}
-
-	/* then, uCode */
-	uc_fw->ucode_offset = uc_fw->header_offset + uc_fw->header_size;
-	uc_fw->ucode_size = (css->size_dw - css->header_size_dw) * sizeof(u32);
-
-	/* now RSA */
-	if (css->key_size_dw != UOS_RSA_SCRATCH_MAX_COUNT) {
-		DRM_NOTE("RSA key size is bad\n");
-		goto fail;
-	}
-	uc_fw->rsa_offset = uc_fw->ucode_offset + uc_fw->ucode_size;
-	uc_fw->rsa_size = css->key_size_dw * sizeof(u32);
-
-	/* At least, it should have header, uCode and RSA. Size of all three. */
-	size = uc_fw->header_size + uc_fw->ucode_size + uc_fw->rsa_size;
-	if (fw->size < size) {
-		DRM_NOTE("Missing firmware components\n");
-		goto fail;
-	}
-
-	/*
-	 * The GuC firmware image has the version number embedded at a well-known
-	 * offset within the firmware blob; note that major / minor version are
-	 * TWO bytes each (i.e. u16), although all pointers and offsets are defined
-	 * in terms of bytes (u8).
-	 */
-	switch (uc_fw->fw) {
-	case INTEL_UC_FW_TYPE_GUC:
-		/* Header and uCode will be loaded to WOPCM. Size of the two. */
-		size = uc_fw->header_size + uc_fw->ucode_size;
-
-		/* Top 32k of WOPCM is reserved (8K stack + 24k RC6 context). */
-		if (size > intel_guc_wopcm_size(dev_priv)) {
-			DRM_ERROR("Firmware is too large to fit in WOPCM\n");
-			goto fail;
-		}
-		uc_fw->major_ver_found = css->guc.sw_version >> 16;
-		uc_fw->minor_ver_found = css->guc.sw_version & 0xFFFF;
-		break;
-
-	case INTEL_UC_FW_TYPE_HUC:
-		uc_fw->major_ver_found = css->huc.sw_version >> 16;
-		uc_fw->minor_ver_found = css->huc.sw_version & 0xFFFF;
-		break;
-
-	default:
-		DRM_ERROR("Unknown firmware type %d\n", uc_fw->fw);
-		err = -ENOEXEC;
-		goto fail;
-	}
-
-	if (uc_fw->major_ver_found != uc_fw->major_ver_wanted ||
-	    uc_fw->minor_ver_found < uc_fw->minor_ver_wanted) {
-		DRM_NOTE("uC firmware version %d.%d, required %d.%d\n",
-			uc_fw->major_ver_found, uc_fw->minor_ver_found,
-			uc_fw->major_ver_wanted, uc_fw->minor_ver_wanted);
-		err = -ENOEXEC;
-		goto fail;
-	}
-
-	DRM_DEBUG_DRIVER("firmware version %d.%d OK (minimum %d.%d)\n",
-			uc_fw->major_ver_found, uc_fw->minor_ver_found,
-			uc_fw->major_ver_wanted, uc_fw->minor_ver_wanted);
-
-	mutex_lock(&dev_priv->drm.struct_mutex);
-	obj = i915_gem_object_create_from_data(dev_priv, fw->data, fw->size);
-	mutex_unlock(&dev_priv->drm.struct_mutex);
-	if (IS_ERR_OR_NULL(obj)) {
-		err = obj ? PTR_ERR(obj) : -ENOMEM;
-		goto fail;
-	}
-
-	uc_fw->obj = obj;
-	uc_fw->size = fw->size;
-
-	DRM_DEBUG_DRIVER("uC fw fetch status SUCCESS, obj %p\n",
-			uc_fw->obj);
-
-	release_firmware(fw);
-	uc_fw->fetch_status = INTEL_UC_FIRMWARE_SUCCESS;
-	return;
-
-fail:
-	DRM_WARN("Failed to fetch valid uC firmware from %s (error %d)\n",
-		 uc_fw->path, err);
-	DRM_DEBUG_DRIVER("uC fw fetch status FAIL; err %d, fw %p, obj %p\n",
-		err, fw, uc_fw->obj);
-
-	obj = fetch_and_zero(&uc_fw->obj);
-	if (obj)
-		i915_gem_object_put(obj);
-
-	release_firmware(fw);		/* OK even if fw is NULL */
-	uc_fw->fetch_status = INTEL_UC_FIRMWARE_FAIL;
 }
 
 /**
- * intel_guc_init() - define parameters and fetch firmware
- * @dev_priv:	i915 device private
- *
- * Called early during driver load, but after GEM is initialised.
+ * intel_guc_select_fw() - selects GuC firmware for loading
+ * @guc:	intel_guc struct
  *
- * The firmware will be transferred to the GuC's memory later,
- * when intel_guc_setup() is called.
+ * Return: zero when we know firmware, non-zero in other case
  */
-void intel_guc_init(struct drm_i915_private *dev_priv)
+int intel_guc_select_fw(struct intel_guc *guc)
 {
-	struct intel_uc_fw *guc_fw = &dev_priv->guc.fw;
-	const char *fw_path;
+	struct drm_i915_private *dev_priv = guc_to_i915(guc);
 
-	if (!HAS_GUC(dev_priv)) {
-		i915.enable_guc_loading = 0;
-		i915.enable_guc_submission = 0;
-	} else {
-		/* A negative value means "use platform default" */
-		if (i915.enable_guc_loading < 0)
-			i915.enable_guc_loading = HAS_GUC_UCODE(dev_priv);
-		if (i915.enable_guc_submission < 0)
-			i915.enable_guc_submission = HAS_GUC_SCHED(dev_priv);
-	}
+	guc->fw.path = NULL;
+	guc->fw.fetch_status = INTEL_UC_FIRMWARE_NONE;
+	guc->fw.load_status = INTEL_UC_FIRMWARE_NONE;
+	guc->fw.type = INTEL_UC_FW_TYPE_GUC;
 
-	if (!HAS_GUC_UCODE(dev_priv)) {
-		fw_path = NULL;
+	if (i915.guc_firmware_path) {
+		guc->fw.path = i915.guc_firmware_path;
+		guc->fw.major_ver_wanted = 0;
+		guc->fw.minor_ver_wanted = 0;
 	} else if (IS_SKYLAKE(dev_priv)) {
-		fw_path = I915_SKL_GUC_UCODE;
-		guc_fw->major_ver_wanted = SKL_FW_MAJOR;
-		guc_fw->minor_ver_wanted = SKL_FW_MINOR;
+		guc->fw.path = I915_SKL_GUC_UCODE;
+		guc->fw.major_ver_wanted = SKL_FW_MAJOR;
+		guc->fw.minor_ver_wanted = SKL_FW_MINOR;
 	} else if (IS_BROXTON(dev_priv)) {
-		fw_path = I915_BXT_GUC_UCODE;
-		guc_fw->major_ver_wanted = BXT_FW_MAJOR;
-		guc_fw->minor_ver_wanted = BXT_FW_MINOR;
+		guc->fw.path = I915_BXT_GUC_UCODE;
+		guc->fw.major_ver_wanted = BXT_FW_MAJOR;
+		guc->fw.minor_ver_wanted = BXT_FW_MINOR;
 	} else if (IS_KABYLAKE(dev_priv)) {
-		fw_path = I915_KBL_GUC_UCODE;
-		guc_fw->major_ver_wanted = KBL_FW_MAJOR;
-		guc_fw->minor_ver_wanted = KBL_FW_MINOR;
+		guc->fw.path = I915_KBL_GUC_UCODE;
+		guc->fw.major_ver_wanted = KBL_FW_MAJOR;
+		guc->fw.minor_ver_wanted = KBL_FW_MINOR;
 	} else {
-		fw_path = "";	/* unknown device */
+		DRM_ERROR("No GuC firmware known for platform with GuC!\n");
+		return -ENOENT;
 	}
 
-	guc_fw->path = fw_path;
-	guc_fw->fetch_status = INTEL_UC_FIRMWARE_NONE;
-	guc_fw->load_status = INTEL_UC_FIRMWARE_NONE;
-
-	/* Early (and silent) return if GuC loading is disabled */
-	if (!i915.enable_guc_loading)
-		return;
-	if (fw_path == NULL)
-		return;
-	if (*fw_path == '\0')
-		return;
-
-	guc_fw->fetch_status = INTEL_UC_FIRMWARE_PENDING;
-	DRM_DEBUG_DRIVER("GuC firmware pending, path %s\n", fw_path);
-	intel_uc_fw_fetch(dev_priv, guc_fw);
-	/* status must now be FAIL or SUCCESS */
+	return 0;
 }
 
 /**
@@ -793,7 +441,6 @@ void intel_guc_fini(struct drm_i915_private *dev_priv)
 	struct drm_i915_gem_object *obj;
 
 	mutex_lock(&dev_priv->drm.struct_mutex);
-	guc_interrupts_release(dev_priv);
 	i915_guc_submission_disable(dev_priv);
 	i915_guc_submission_fini(dev_priv);
 	mutex_unlock(&dev_priv->drm.struct_mutex);
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index c2184f755ec6..3eec74ca5116 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -1297,16 +1297,34 @@ intel_hdmi_mode_valid(struct drm_connector *connector,
 
 static bool hdmi_12bpc_possible(struct intel_crtc_state *crtc_state)
 {
-	struct drm_device *dev = crtc_state->base.crtc->dev;
+	struct drm_i915_private *dev_priv =
+		to_i915(crtc_state->base.crtc->dev);
+	struct drm_atomic_state *state = crtc_state->base.state;
+	struct drm_connector_state *connector_state;
+	struct drm_connector *connector;
+	int i;
 
-	if (HAS_GMCH_DISPLAY(to_i915(dev)))
+	if (HAS_GMCH_DISPLAY(dev_priv))
 		return false;
 
 	/*
 	 * HDMI 12bpc affects the clocks, so it's only possible
 	 * when not cloning with other encoder types.
 	 */
-	return crtc_state->output_types == 1 << INTEL_OUTPUT_HDMI;
+	if (crtc_state->output_types != 1 << INTEL_OUTPUT_HDMI)
+		return false;
+
+	for_each_connector_in_state(state, connector, connector_state, i) {
+		const struct drm_display_info *info = &connector->display_info;
+
+		if (connector_state->crtc != crtc_state->base.crtc)
+			continue;
+
+		if ((info->edid_hdmi_dc_modes & DRM_EDID_HDMI_DC_36) == 0)
+			return false;
+	}
+
+	return true;
 }
 
 bool intel_hdmi_compute_config(struct intel_encoder *encoder,
diff --git a/drivers/gpu/drm/i915/intel_hotplug.c b/drivers/gpu/drm/i915/intel_hotplug.c
index ba763e7d7dcf..7d210097eefa 100644
--- a/drivers/gpu/drm/i915/intel_hotplug.c
+++ b/drivers/gpu/drm/i915/intel_hotplug.c
@@ -150,16 +150,17 @@ static bool intel_hpd_irq_storm_detect(struct drm_i915_private *dev_priv,
 static void intel_hpd_irq_storm_disable(struct drm_i915_private *dev_priv)
 {
 	struct drm_device *dev = &dev_priv->drm;
-	struct drm_mode_config *mode_config = &dev->mode_config;
 	struct intel_connector *intel_connector;
 	struct intel_encoder *intel_encoder;
 	struct drm_connector *connector;
+	struct drm_connector_list_iter conn_iter;
 	enum hpd_pin pin;
 	bool hpd_disabled = false;
 
 	lockdep_assert_held(&dev_priv->irq_lock);
 
-	list_for_each_entry(connector, &mode_config->connector_list, head) {
+	drm_connector_list_iter_begin(dev, &conn_iter);
+	drm_for_each_connector_iter(connector, &conn_iter) {
 		if (connector->polled != DRM_CONNECTOR_POLL_HPD)
 			continue;
 
@@ -182,6 +183,7 @@ static void intel_hpd_irq_storm_disable(struct drm_i915_private *dev_priv)
 			| DRM_CONNECTOR_POLL_DISCONNECT;
 		hpd_disabled = true;
 	}
+	drm_connector_list_iter_end(&conn_iter);
 
 	/* Enable polling and queue hotplug re-enabling. */
 	if (hpd_disabled) {
@@ -197,7 +199,6 @@ static void intel_hpd_irq_storm_reenable_work(struct work_struct *work)
 		container_of(work, typeof(*dev_priv),
 			     hotplug.reenable_work.work);
 	struct drm_device *dev = &dev_priv->drm;
-	struct drm_mode_config *mode_config = &dev->mode_config;
 	int i;
 
 	intel_runtime_pm_get(dev_priv);
@@ -205,13 +206,15 @@ static void intel_hpd_irq_storm_reenable_work(struct work_struct *work)
 	spin_lock_irq(&dev_priv->irq_lock);
 	for_each_hpd_pin(i) {
 		struct drm_connector *connector;
+		struct drm_connector_list_iter conn_iter;
 
 		if (dev_priv->hotplug.stats[i].state != HPD_DISABLED)
 			continue;
 
 		dev_priv->hotplug.stats[i].state = HPD_ENABLED;
 
-		list_for_each_entry(connector, &mode_config->connector_list, head) {
+		drm_connector_list_iter_begin(dev, &conn_iter);
+		drm_for_each_connector_iter(connector, &conn_iter) {
 			struct intel_connector *intel_connector = to_intel_connector(connector);
 
 			if (intel_connector->encoder->hpd_pin == i) {
@@ -223,6 +226,7 @@ static void intel_hpd_irq_storm_reenable_work(struct work_struct *work)
 					connector->polled = DRM_CONNECTOR_POLL_HPD;
 			}
 		}
+		drm_connector_list_iter_end(&conn_iter);
 	}
 	if (dev_priv->display_irqs_enabled && dev_priv->display.hpd_irq_setup)
 		dev_priv->display.hpd_irq_setup(dev_priv);
@@ -308,14 +312,14 @@ static void i915_hotplug_work_func(struct work_struct *work)
 	struct drm_i915_private *dev_priv =
 		container_of(work, struct drm_i915_private, hotplug.hotplug_work);
 	struct drm_device *dev = &dev_priv->drm;
-	struct drm_mode_config *mode_config = &dev->mode_config;
 	struct intel_connector *intel_connector;
 	struct intel_encoder *intel_encoder;
 	struct drm_connector *connector;
+	struct drm_connector_list_iter conn_iter;
 	bool changed = false;
 	u32 hpd_event_bits;
 
-	mutex_lock(&mode_config->mutex);
+	mutex_lock(&dev->mode_config.mutex);
 	DRM_DEBUG_KMS("running encoder hotplug functions\n");
 
 	spin_lock_irq(&dev_priv->irq_lock);
@@ -328,7 +332,8 @@ static void i915_hotplug_work_func(struct work_struct *work)
 
 	spin_unlock_irq(&dev_priv->irq_lock);
 
-	list_for_each_entry(connector, &mode_config->connector_list, head) {
+	drm_connector_list_iter_begin(dev, &conn_iter);
+	drm_for_each_connector_iter(connector, &conn_iter) {
 		intel_connector = to_intel_connector(connector);
 		if (!intel_connector->encoder)
 			continue;
@@ -342,7 +347,8 @@ static void i915_hotplug_work_func(struct work_struct *work)
 				changed = true;
 		}
 	}
-	mutex_unlock(&mode_config->mutex);
+	drm_connector_list_iter_end(&conn_iter);
+	mutex_unlock(&dev->mode_config.mutex);
 
 	if (changed)
 		drm_kms_helper_hotplug_event(dev);
@@ -490,15 +496,16 @@ static void i915_hpd_poll_init_work(struct work_struct *work)
 		container_of(work, struct drm_i915_private,
 			     hotplug.poll_init_work);
 	struct drm_device *dev = &dev_priv->drm;
-	struct drm_mode_config *mode_config = &dev->mode_config;
 	struct drm_connector *connector;
+	struct drm_connector_list_iter conn_iter;
 	bool enabled;
 
 	mutex_lock(&dev->mode_config.mutex);
 
 	enabled = READ_ONCE(dev_priv->hotplug.poll_enabled);
 
-	list_for_each_entry(connector, &mode_config->connector_list, head) {
+	drm_connector_list_iter_begin(dev, &conn_iter);
+	drm_for_each_connector_iter(connector, &conn_iter) {
 		struct intel_connector *intel_connector =
 			to_intel_connector(connector);
 		connector->polled = intel_connector->polled;
@@ -516,6 +523,7 @@ static void i915_hpd_poll_init_work(struct work_struct *work)
 				DRM_CONNECTOR_POLL_HPD;
 		}
 	}
+	drm_connector_list_iter_end(&conn_iter);
 
 	if (enabled)
 		drm_kms_helper_poll_enable(dev);
diff --git a/drivers/gpu/drm/i915/intel_huc.c b/drivers/gpu/drm/i915/intel_huc.c
index e660109fc51e..7af900bcdc05 100644
--- a/drivers/gpu/drm/i915/intel_huc.c
+++ b/drivers/gpu/drm/i915/intel_huc.c
@@ -141,60 +141,43 @@ static int huc_ucode_xfer(struct drm_i915_private *dev_priv)
 }
 
 /**
- * intel_huc_init() - initiate HuC firmware loading request
- * @dev_priv: the drm_i915_private device
- *
- * Called early during driver load, but after GEM is initialised. The loading
- * will continue only when driver explicitly specify firmware name and version.
- * All other cases are considered as INTEL_UC_FIRMWARE_NONE either because HW
- * is not capable or driver yet support it. And there will be no error message
- * for INTEL_UC_FIRMWARE_NONE cases.
- *
- * The DMA-copying to HW is done later when intel_huc_load() is called.
+ * intel_huc_select_fw() - selects HuC firmware for loading
+ * @huc:	intel_huc struct
  */
-void intel_huc_init(struct drm_i915_private *dev_priv)
+void intel_huc_select_fw(struct intel_huc *huc)
 {
-	struct intel_huc *huc = &dev_priv->huc;
-	struct intel_uc_fw *huc_fw = &huc->fw;
-	const char *fw_path = NULL;
-
-	huc_fw->path = NULL;
-	huc_fw->fetch_status = INTEL_UC_FIRMWARE_NONE;
-	huc_fw->load_status = INTEL_UC_FIRMWARE_NONE;
-	huc_fw->fw = INTEL_UC_FW_TYPE_HUC;
-
-	if (!HAS_HUC_UCODE(dev_priv))
-		return;
-
-	if (IS_SKYLAKE(dev_priv)) {
-		fw_path = I915_SKL_HUC_UCODE;
-		huc_fw->major_ver_wanted = SKL_HUC_FW_MAJOR;
-		huc_fw->minor_ver_wanted = SKL_HUC_FW_MINOR;
+	struct drm_i915_private *dev_priv = huc_to_i915(huc);
+
+	huc->fw.path = NULL;
+	huc->fw.fetch_status = INTEL_UC_FIRMWARE_NONE;
+	huc->fw.load_status = INTEL_UC_FIRMWARE_NONE;
+	huc->fw.type = INTEL_UC_FW_TYPE_HUC;
+
+	if (i915.huc_firmware_path) {
+		huc->fw.path = i915.huc_firmware_path;
+		huc->fw.major_ver_wanted = 0;
+		huc->fw.minor_ver_wanted = 0;
+	} else if (IS_SKYLAKE(dev_priv)) {
+		huc->fw.path = I915_SKL_HUC_UCODE;
+		huc->fw.major_ver_wanted = SKL_HUC_FW_MAJOR;
+		huc->fw.minor_ver_wanted = SKL_HUC_FW_MINOR;
 	} else if (IS_BROXTON(dev_priv)) {
-		fw_path = I915_BXT_HUC_UCODE;
-		huc_fw->major_ver_wanted = BXT_HUC_FW_MAJOR;
-		huc_fw->minor_ver_wanted = BXT_HUC_FW_MINOR;
+		huc->fw.path = I915_BXT_HUC_UCODE;
+		huc->fw.major_ver_wanted = BXT_HUC_FW_MAJOR;
+		huc->fw.minor_ver_wanted = BXT_HUC_FW_MINOR;
 	} else if (IS_KABYLAKE(dev_priv)) {
-		fw_path = I915_KBL_HUC_UCODE;
-		huc_fw->major_ver_wanted = KBL_HUC_FW_MAJOR;
-		huc_fw->minor_ver_wanted = KBL_HUC_FW_MINOR;
-	}
-
-	huc_fw->path = fw_path;
-
-	if (huc_fw->path == NULL)
+		huc->fw.path = I915_KBL_HUC_UCODE;
+		huc->fw.major_ver_wanted = KBL_HUC_FW_MAJOR;
+		huc->fw.minor_ver_wanted = KBL_HUC_FW_MINOR;
+	} else {
+		DRM_ERROR("No HuC firmware known for platform with HuC!\n");
 		return;
-
-	huc_fw->fetch_status = INTEL_UC_FIRMWARE_PENDING;
-
-	DRM_DEBUG_DRIVER("HuC firmware pending, path %s\n", fw_path);
-
-	intel_uc_fw_fetch(dev_priv, huc_fw);
+	}
 }
 
 /**
- * intel_huc_load() - load HuC uCode to device
- * @dev_priv: the drm_i915_private device
+ * intel_huc_init_hw() - load HuC uCode to device
+ * @huc: intel_huc structure
  *
  * Called from guc_setup() during driver loading and also after a GPU reset.
  * Be note that HuC loading must be done before GuC loading.
@@ -205,26 +188,26 @@ void intel_huc_init(struct drm_i915_private *dev_priv)
  *
  * Return:	non-zero code on error
  */
-int intel_huc_load(struct drm_i915_private *dev_priv)
+int intel_huc_init_hw(struct intel_huc *huc)
 {
-	struct intel_uc_fw *huc_fw = &dev_priv->huc.fw;
+	struct drm_i915_private *dev_priv = huc_to_i915(huc);
 	int err;
 
-	if (huc_fw->fetch_status == INTEL_UC_FIRMWARE_NONE)
+	if (huc->fw.fetch_status == INTEL_UC_FIRMWARE_NONE)
 		return 0;
 
 	DRM_DEBUG_DRIVER("%s fw status: fetch %s, load %s\n",
-		huc_fw->path,
-		intel_uc_fw_status_repr(huc_fw->fetch_status),
-		intel_uc_fw_status_repr(huc_fw->load_status));
+		huc->fw.path,
+		intel_uc_fw_status_repr(huc->fw.fetch_status),
+		intel_uc_fw_status_repr(huc->fw.load_status));
 
-	if (huc_fw->fetch_status == INTEL_UC_FIRMWARE_SUCCESS &&
-	    huc_fw->load_status == INTEL_UC_FIRMWARE_FAIL)
+	if (huc->fw.fetch_status == INTEL_UC_FIRMWARE_SUCCESS &&
+	    huc->fw.load_status == INTEL_UC_FIRMWARE_FAIL)
 		return -ENOEXEC;
 
-	huc_fw->load_status = INTEL_UC_FIRMWARE_PENDING;
+	huc->fw.load_status = INTEL_UC_FIRMWARE_PENDING;
 
-	switch (huc_fw->fetch_status) {
+	switch (huc->fw.fetch_status) {
 	case INTEL_UC_FIRMWARE_FAIL:
 		/* something went wrong :( */
 		err = -EIO;
@@ -235,9 +218,9 @@ int intel_huc_load(struct drm_i915_private *dev_priv)
 	default:
 		/* "can't happen" */
 		WARN_ONCE(1, "HuC fw %s invalid fetch_status %s [%d]\n",
-			huc_fw->path,
-			intel_uc_fw_status_repr(huc_fw->fetch_status),
-			huc_fw->fetch_status);
+			huc->fw.path,
+			intel_uc_fw_status_repr(huc->fw.fetch_status),
+			huc->fw.fetch_status);
 		err = -ENXIO;
 		goto fail;
 
@@ -249,18 +232,18 @@ int intel_huc_load(struct drm_i915_private *dev_priv)
 	if (err)
 		goto fail;
 
-	huc_fw->load_status = INTEL_UC_FIRMWARE_SUCCESS;
+	huc->fw.load_status = INTEL_UC_FIRMWARE_SUCCESS;
 
 	DRM_DEBUG_DRIVER("%s fw status: fetch %s, load %s\n",
-		huc_fw->path,
-		intel_uc_fw_status_repr(huc_fw->fetch_status),
-		intel_uc_fw_status_repr(huc_fw->load_status));
+		huc->fw.path,
+		intel_uc_fw_status_repr(huc->fw.fetch_status),
+		intel_uc_fw_status_repr(huc->fw.load_status));
 
 	return 0;
 
 fail:
-	if (huc_fw->load_status == INTEL_UC_FIRMWARE_PENDING)
-		huc_fw->load_status = INTEL_UC_FIRMWARE_FAIL;
+	if (huc->fw.load_status == INTEL_UC_FIRMWARE_PENDING)
+		huc->fw.load_status = INTEL_UC_FIRMWARE_FAIL;
 
 	DRM_ERROR("Failed to complete HuC uCode load with ret %d\n", err);
 
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index 89f38e7def9f..77168e673e0a 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -306,7 +306,8 @@ execlists_context_status_change(struct drm_i915_gem_request *rq,
 	if (!IS_ENABLED(CONFIG_DRM_I915_GVT))
 		return;
 
-	atomic_notifier_call_chain(&rq->ctx->status_notifier, status, rq);
+	atomic_notifier_call_chain(&rq->engine->context_status_notifier,
+				   status, rq);
 }
 
 static void
@@ -402,6 +403,18 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
 	struct rb_node *rb;
 	bool submit = false;
 
+	/* After execlist_first is updated, the tasklet will be rescheduled.
+	 *
+	 * If we are currently running (inside the tasklet) and a third
+	 * party queues a request and so updates engine->execlist_first under
+	 * the spinlock (which we have elided), it will atomically set the
+	 * TASKLET_SCHED flag causing the us to be re-executed and pick up
+	 * the change in state (the update to TASKLET_SCHED incurs a memory
+	 * barrier making this cross-cpu checking safe).
+	 */
+	if (!READ_ONCE(engine->execlist_first))
+		return;
+
 	last = port->request;
 	if (last)
 		/* WaIdleLiteRestore:bdw,skl
@@ -741,6 +754,7 @@ static int execlists_context_pin(struct intel_engine_cs *engine,
 
 	if (ce->pin_count++)
 		return 0;
+	GEM_BUG_ON(!ce->pin_count); /* no overflow please! */
 
 	if (!ce->state) {
 		ret = execlists_context_deferred_alloc(ctx, engine);
@@ -1159,7 +1173,7 @@ static int gen8_init_common_ring(struct intel_engine_cs *engine)
 
 	/* After a GPU reset, we may have requests to replay */
 	clear_bit(ENGINE_IRQ_EXECLIST, &engine->irq_posted);
-	if (!execlists_elsp_idle(engine)) {
+	if (!i915.enable_guc_submission && !execlists_elsp_idle(engine)) {
 		DRM_DEBUG_DRIVER("Restarting %s from requests [0x%x, 0x%x]\n",
 				 engine->name,
 				 port_seqno(&engine->execlist_port[0]),
@@ -1244,9 +1258,6 @@ static void reset_common_ring(struct intel_engine_cs *engine,
 	request->ring->last_retired_head = -1;
 	intel_ring_update_space(request->ring);
 
-	if (i915.enable_guc_submission)
-		return;
-
 	/* Catch up with any missed context-switch interrupts */
 	if (request->ctx != port[0].request->ctx) {
 		i915_gem_request_put(port[0].request);
@@ -1560,15 +1571,10 @@ void intel_logical_ring_cleanup(struct intel_engine_cs *engine)
 	kfree(engine);
 }
 
-void intel_execlists_enable_submission(struct drm_i915_private *dev_priv)
+static void execlists_set_default_submission(struct intel_engine_cs *engine)
 {
-	struct intel_engine_cs *engine;
-	enum intel_engine_id id;
-
-	for_each_engine(engine, dev_priv, id) {
-		engine->submit_request = execlists_submit_request;
-		engine->schedule = execlists_schedule;
-	}
+	engine->submit_request = execlists_submit_request;
+	engine->schedule = execlists_schedule;
 }
 
 static void
@@ -1586,8 +1592,8 @@ logical_ring_default_vfuncs(struct intel_engine_cs *engine)
 	engine->emit_flush = gen8_emit_flush;
 	engine->emit_breadcrumb = gen8_emit_breadcrumb;
 	engine->emit_breadcrumb_sz = gen8_emit_breadcrumb_sz;
-	engine->submit_request = execlists_submit_request;
-	engine->schedule = execlists_schedule;
+
+	engine->set_default_submission = execlists_set_default_submission;
 
 	engine->irq_enable = gen8_logical_ring_enable_irq;
 	engine->irq_disable = gen8_logical_ring_disable_irq;
diff --git a/drivers/gpu/drm/i915/intel_lrc.h b/drivers/gpu/drm/i915/intel_lrc.h
index 5fc07761caff..e8015e7bf4e9 100644
--- a/drivers/gpu/drm/i915/intel_lrc.h
+++ b/drivers/gpu/drm/i915/intel_lrc.h
@@ -87,6 +87,5 @@ uint64_t intel_lr_context_descriptor(struct i915_gem_context *ctx,
 /* Execlists */
 int intel_sanitize_enable_execlists(struct drm_i915_private *dev_priv,
 				    int enable_execlists);
-void intel_execlists_enable_submission(struct drm_i915_private *dev_priv);
 
 #endif /* _INTEL_LRC_H_ */
diff --git a/drivers/gpu/drm/i915/intel_opregion.c b/drivers/gpu/drm/i915/intel_opregion.c
index 4a862a358c70..441c01466384 100644
--- a/drivers/gpu/drm/i915/intel_opregion.c
+++ b/drivers/gpu/drm/i915/intel_opregion.c
@@ -434,6 +434,7 @@ int intel_opregion_notify_adapter(struct drm_i915_private *dev_priv,
 static u32 asle_set_backlight(struct drm_i915_private *dev_priv, u32 bclp)
 {
 	struct intel_connector *connector;
+	struct drm_connector_list_iter conn_iter;
 	struct opregion_asle *asle = dev_priv->opregion.asle;
 	struct drm_device *dev = &dev_priv->drm;
 
@@ -458,8 +459,10 @@ static u32 asle_set_backlight(struct drm_i915_private *dev_priv, u32 bclp)
 	 * only one).
 	 */
 	DRM_DEBUG_KMS("updating opregion backlight %d/255\n", bclp);
-	for_each_intel_connector(dev, connector)
+	drm_connector_list_iter_begin(dev, &conn_iter);
+	for_each_intel_connector_iter(connector, &conn_iter)
 		intel_panel_set_backlight_acpi(connector, bclp, 255);
+	drm_connector_list_iter_end(&conn_iter);
 	asle->cblv = DIV_ROUND_UP(bclp * 100, 255) | ASLE_CBLV_VALID;
 
 	drm_modeset_unlock(&dev->mode_config.connection_mutex);
@@ -701,6 +704,7 @@ static void intel_didl_outputs(struct drm_i915_private *dev_priv)
 {
 	struct intel_opregion *opregion = &dev_priv->opregion;
 	struct intel_connector *connector;
+	struct drm_connector_list_iter conn_iter;
 	int i = 0, max_outputs;
 	int display_index[16] = {};
 
@@ -714,7 +718,8 @@ static void intel_didl_outputs(struct drm_i915_private *dev_priv)
 	max_outputs = ARRAY_SIZE(opregion->acpi->didl) +
 		ARRAY_SIZE(opregion->acpi->did2);
 
-	for_each_intel_connector(&dev_priv->drm, connector) {
+	drm_connector_list_iter_begin(&dev_priv->drm, &conn_iter);
+	for_each_intel_connector_iter(connector, &conn_iter) {
 		u32 device_id, type;
 
 		device_id = acpi_display_type(connector);
@@ -729,6 +734,7 @@ static void intel_didl_outputs(struct drm_i915_private *dev_priv)
 			set_did(opregion, i, device_id);
 		i++;
 	}
+	drm_connector_list_iter_end(&conn_iter);
 
 	DRM_DEBUG_KMS("%d outputs detected\n", i);
 
@@ -745,6 +751,7 @@ static void intel_setup_cadls(struct drm_i915_private *dev_priv)
 {
 	struct intel_opregion *opregion = &dev_priv->opregion;
 	struct intel_connector *connector;
+	struct drm_connector_list_iter conn_iter;
 	int i = 0;
 
 	/*
@@ -757,11 +764,13 @@ static void intel_setup_cadls(struct drm_i915_private *dev_priv)
 	 * Note that internal panels should be at the front of the connector
 	 * list already, ensuring they're not left out.
 	 */
-	for_each_intel_connector(&dev_priv->drm, connector) {
+	drm_connector_list_iter_begin(&dev_priv->drm, &conn_iter);
+	for_each_intel_connector_iter(connector, &conn_iter) {
 		if (i >= ARRAY_SIZE(opregion->acpi->cadl))
 			break;
 		opregion->acpi->cadl[i++] = connector->acpi_device_id;
 	}
+	drm_connector_list_iter_end(&conn_iter);
 
 	/* If fewer than 8 active devices, the list must be null terminated */
 	if (i < ARRAY_SIZE(opregion->acpi->cadl))
@@ -1061,16 +1070,5 @@ intel_opregion_get_panel_type(struct drm_i915_private *dev_priv)
 		return -ENODEV;
 	}
 
-	/*
-	 * FIXME On Dell XPS 13 9350 the OpRegion panel type (0) gives us
-	 * low vswing for eDP, whereas the VBT panel type (2) gives us normal
-	 * vswing instead. Low vswing results in some display flickers, so
-	 * let's simply ignore the OpRegion panel type on SKL for now.
-	 */
-	if (IS_SKYLAKE(dev_priv)) {
-		DRM_DEBUG_KMS("Ignoring OpRegion panel type (%d)\n", ret - 1);
-		return -ENODEV;
-	}
-
 	return ret - 1;
 }
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c
index 5ef9f5bfb92c..2e0c56ed22bb 100644
--- a/drivers/gpu/drm/i915/intel_overlay.c
+++ b/drivers/gpu/drm/i915/intel_overlay.c
@@ -278,7 +278,7 @@ static int intel_overlay_on(struct intel_overlay *overlay)
 
 	cs = intel_ring_begin(req, 4);
 	if (IS_ERR(cs)) {
-		i915_add_request_no_flush(req);
+		i915_add_request(req);
 		return PTR_ERR(cs);
 	}
 
@@ -343,7 +343,7 @@ static int intel_overlay_continue(struct intel_overlay *overlay,
 
 	cs = intel_ring_begin(req, 2);
 	if (IS_ERR(cs)) {
-		i915_add_request_no_flush(req);
+		i915_add_request(req);
 		return PTR_ERR(cs);
 	}
 
@@ -419,7 +419,7 @@ static int intel_overlay_off(struct intel_overlay *overlay)
 
 	cs = intel_ring_begin(req, 6);
 	if (IS_ERR(cs)) {
-		i915_add_request_no_flush(req);
+		i915_add_request(req);
 		return PTR_ERR(cs);
 	}
 
@@ -477,7 +477,7 @@ static int intel_overlay_release_old_vid(struct intel_overlay *overlay)
 
 		cs = intel_ring_begin(req, 2);
 		if (IS_ERR(cs)) {
-			i915_add_request_no_flush(req);
+			i915_add_request(req);
 			return PTR_ERR(cs);
 		}
 
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 99e09f63d4b3..aece0ff88a5d 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -1358,13 +1358,22 @@ static void vlv_atomic_update_fifo(struct intel_atomic_state *state,
 
 	trace_vlv_fifo_size(crtc, sprite0_start, sprite1_start, fifo_size);
 
-	spin_lock(&dev_priv->wm.dsparb_lock);
+	/*
+	 * uncore.lock serves a double purpose here. It allows us to
+	 * use the less expensive I915_{READ,WRITE}_FW() functions, and
+	 * it protects the DSPARB registers from getting clobbered by
+	 * parallel updates from multiple pipes.
+	 *
+	 * intel_pipe_update_start() has already disabled interrupts
+	 * for us, so a plain spin_lock() is sufficient here.
+	 */
+	spin_lock(&dev_priv->uncore.lock);
 
 	switch (crtc->pipe) {
 		uint32_t dsparb, dsparb2, dsparb3;
 	case PIPE_A:
-		dsparb = I915_READ(DSPARB);
-		dsparb2 = I915_READ(DSPARB2);
+		dsparb = I915_READ_FW(DSPARB);
+		dsparb2 = I915_READ_FW(DSPARB2);
 
 		dsparb &= ~(VLV_FIFO(SPRITEA, 0xff) |
 			    VLV_FIFO(SPRITEB, 0xff));
@@ -1376,12 +1385,12 @@ static void vlv_atomic_update_fifo(struct intel_atomic_state *state,
 		dsparb2 |= (VLV_FIFO(SPRITEA_HI, sprite0_start >> 8) |
 			   VLV_FIFO(SPRITEB_HI, sprite1_start >> 8));
 
-		I915_WRITE(DSPARB, dsparb);
-		I915_WRITE(DSPARB2, dsparb2);
+		I915_WRITE_FW(DSPARB, dsparb);
+		I915_WRITE_FW(DSPARB2, dsparb2);
 		break;
 	case PIPE_B:
-		dsparb = I915_READ(DSPARB);
-		dsparb2 = I915_READ(DSPARB2);
+		dsparb = I915_READ_FW(DSPARB);
+		dsparb2 = I915_READ_FW(DSPARB2);
 
 		dsparb &= ~(VLV_FIFO(SPRITEC, 0xff) |
 			    VLV_FIFO(SPRITED, 0xff));
@@ -1393,12 +1402,12 @@ static void vlv_atomic_update_fifo(struct intel_atomic_state *state,
 		dsparb2 |= (VLV_FIFO(SPRITEC_HI, sprite0_start >> 8) |
 			   VLV_FIFO(SPRITED_HI, sprite1_start >> 8));
 
-		I915_WRITE(DSPARB, dsparb);
-		I915_WRITE(DSPARB2, dsparb2);
+		I915_WRITE_FW(DSPARB, dsparb);
+		I915_WRITE_FW(DSPARB2, dsparb2);
 		break;
 	case PIPE_C:
-		dsparb3 = I915_READ(DSPARB3);
-		dsparb2 = I915_READ(DSPARB2);
+		dsparb3 = I915_READ_FW(DSPARB3);
+		dsparb2 = I915_READ_FW(DSPARB2);
 
 		dsparb3 &= ~(VLV_FIFO(SPRITEE, 0xff) |
 			     VLV_FIFO(SPRITEF, 0xff));
@@ -1410,16 +1419,16 @@ static void vlv_atomic_update_fifo(struct intel_atomic_state *state,
 		dsparb2 |= (VLV_FIFO(SPRITEE_HI, sprite0_start >> 8) |
 			   VLV_FIFO(SPRITEF_HI, sprite1_start >> 8));
 
-		I915_WRITE(DSPARB3, dsparb3);
-		I915_WRITE(DSPARB2, dsparb2);
+		I915_WRITE_FW(DSPARB3, dsparb3);
+		I915_WRITE_FW(DSPARB2, dsparb2);
 		break;
 	default:
 		break;
 	}
 
-	POSTING_READ(DSPARB);
+	POSTING_READ_FW(DSPARB);
 
-	spin_unlock(&dev_priv->wm.dsparb_lock);
+	spin_unlock(&dev_priv->uncore.lock);
 }
 
 #undef VLV_FIFO
@@ -4120,7 +4129,7 @@ pipes_modified(struct drm_atomic_state *state)
 	struct drm_crtc_state *cstate;
 	uint32_t i, ret = 0;
 
-	for_each_crtc_in_state(state, crtc, cstate, i)
+	for_each_new_crtc_in_state(state, crtc, cstate, i)
 		ret |= drm_crtc_mask(crtc);
 
 	return ret;
@@ -4263,7 +4272,7 @@ skl_print_wm_changes(const struct drm_atomic_state *state)
 	const struct skl_ddb_allocation *new_ddb = &intel_state->wm_results.ddb;
 	int i;
 
-	for_each_crtc_in_state(state, crtc, cstate, i) {
+	for_each_new_crtc_in_state(state, crtc, cstate, i) {
 		const struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 		enum pipe pipe = intel_crtc->pipe;
 
@@ -4305,7 +4314,7 @@ skl_compute_wm(struct drm_atomic_state *state)
 	 * since any racing commits that want to update them would need to
 	 * hold _all_ CRTC state mutexes.
 	 */
-	for_each_crtc_in_state(state, crtc, cstate, i)
+	for_each_new_crtc_in_state(state, crtc, cstate, i)
 		changed = true;
 	if (!changed)
 		return 0;
@@ -4327,7 +4336,7 @@ skl_compute_wm(struct drm_atomic_state *state)
 	 * should allow skl_update_pipe_wm() to return failure in cases where
 	 * no suitable watermark values can be found.
 	 */
-	for_each_crtc_in_state(state, crtc, cstate, i) {
+	for_each_new_crtc_in_state(state, crtc, cstate, i) {
 		struct intel_crtc_state *intel_cstate =
 			to_intel_crtc_state(cstate);
 		const struct skl_pipe_wm *old_pipe_wm =
@@ -5166,8 +5175,9 @@ static u32 gen6_rps_pm_mask(struct drm_i915_private *dev_priv, u8 val)
 {
 	u32 mask = 0;
 
+	/* We use UP_EI_EXPIRED interupts for both up/down in manual mode */
 	if (val > dev_priv->rps.min_freq_softlimit)
-		mask |= GEN6_PM_RP_DOWN_EI_EXPIRED | GEN6_PM_RP_DOWN_THRESHOLD | GEN6_PM_RP_DOWN_TIMEOUT;
+		mask |= GEN6_PM_RP_UP_EI_EXPIRED | GEN6_PM_RP_DOWN_THRESHOLD | GEN6_PM_RP_DOWN_TIMEOUT;
 	if (val < dev_priv->rps.max_freq_softlimit)
 		mask |= GEN6_PM_RP_UP_EI_EXPIRED | GEN6_PM_RP_UP_THRESHOLD;
 
@@ -5277,7 +5287,7 @@ void gen6_rps_busy(struct drm_i915_private *dev_priv)
 	if (dev_priv->rps.enabled) {
 		u8 freq;
 
-		if (dev_priv->pm_rps_events & (GEN6_PM_RP_DOWN_EI_EXPIRED | GEN6_PM_RP_UP_EI_EXPIRED))
+		if (dev_priv->pm_rps_events & GEN6_PM_RP_UP_EI_EXPIRED)
 			gen6_rps_reset_ei(dev_priv);
 		I915_WRITE(GEN6_PMINTRMSK,
 			   gen6_rps_pm_mask(dev_priv, dev_priv->rps.cur_freq));
@@ -6382,7 +6392,8 @@ static void valleyview_enable_rps(struct drm_i915_private *dev_priv)
 
 	/* allows RC6 residency counter to work */
 	I915_WRITE(VLV_COUNTER_CONTROL,
-		   _MASKED_BIT_ENABLE(VLV_MEDIA_RC0_COUNT_EN |
+		   _MASKED_BIT_ENABLE(VLV_COUNT_RANGE_HIGH |
+				      VLV_MEDIA_RC0_COUNT_EN |
 				      VLV_RENDER_RC0_COUNT_EN |
 				      VLV_MEDIA_RC6_COUNT_EN |
 				      VLV_RENDER_RC6_COUNT_EN));
@@ -7075,7 +7086,7 @@ static void __intel_autoenable_gt_powersave(struct work_struct *work)
 		rcs->init_context(req);
 
 	/* Mark the device busy, calling intel_enable_gt_powersave() */
-	i915_add_request_no_flush(req);
+	i915_add_request(req);
 
 unlock:
 	mutex_unlock(&dev_priv->drm.struct_mutex);
@@ -8339,3 +8350,79 @@ void intel_pm_setup(struct drm_i915_private *dev_priv)
 	dev_priv->pm.suspended = false;
 	atomic_set(&dev_priv->pm.wakeref_count, 0);
 }
+
+static u64 vlv_residency_raw(struct drm_i915_private *dev_priv,
+			     const i915_reg_t reg)
+{
+	u32 lower, upper, tmp;
+
+	/* The register accessed do not need forcewake. We borrow
+	 * uncore lock to prevent concurrent access to range reg.
+	 */
+	spin_lock_irq(&dev_priv->uncore.lock);
+
+	/* vlv and chv residency counters are 40 bits in width.
+	 * With a control bit, we can choose between upper or lower
+	 * 32bit window into this counter.
+	 *
+	 * Although we always use the counter in high-range mode elsewhere,
+	 * userspace may attempt to read the value before rc6 is initialised,
+	 * before we have set the default VLV_COUNTER_CONTROL value. So always
+	 * set the high bit to be safe.
+	 */
+	I915_WRITE_FW(VLV_COUNTER_CONTROL,
+		      _MASKED_BIT_ENABLE(VLV_COUNT_RANGE_HIGH));
+	upper = I915_READ_FW(reg);
+	do {
+		tmp = upper;
+
+		I915_WRITE_FW(VLV_COUNTER_CONTROL,
+			      _MASKED_BIT_DISABLE(VLV_COUNT_RANGE_HIGH));
+		lower = I915_READ_FW(reg);
+
+		I915_WRITE_FW(VLV_COUNTER_CONTROL,
+			      _MASKED_BIT_ENABLE(VLV_COUNT_RANGE_HIGH));
+		upper = I915_READ_FW(reg);
+	} while (upper != tmp);
+
+	/* Everywhere else we always use VLV_COUNTER_CONTROL with the
+	 * VLV_COUNT_RANGE_HIGH bit set - so it is safe to leave it set
+	 * now.
+	 */
+
+	spin_unlock_irq(&dev_priv->uncore.lock);
+
+	return lower | (u64)upper << 8;
+}
+
+u64 intel_rc6_residency_us(struct drm_i915_private *dev_priv,
+			   const i915_reg_t reg)
+{
+	u64 time_hw, units, div;
+
+	if (!intel_enable_rc6())
+		return 0;
+
+	intel_runtime_pm_get(dev_priv);
+
+	/* On VLV and CHV, residency time is in CZ units rather than 1.28us */
+	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
+		units = 1000;
+		div = dev_priv->czclk_freq;
+
+		time_hw = vlv_residency_raw(dev_priv, reg);
+	} else if (IS_GEN9_LP(dev_priv)) {
+		units = 1000;
+		div = 1200;		/* 833.33ns */
+
+		time_hw = I915_READ(reg);
+	} else {
+		units = 128000; /* 1.28us */
+		div = 100000;
+
+		time_hw = I915_READ(reg);
+	}
+
+	intel_runtime_pm_put(dev_priv);
+	return DIV_ROUND_UP_ULL(time_hw * units, div);
+}
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 4a864f8c9387..d9b8d17c3fc6 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -1445,6 +1445,7 @@ static int intel_ring_context_pin(struct intel_engine_cs *engine,
 
 	if (ce->pin_count++)
 		return 0;
+	GEM_BUG_ON(!ce->pin_count); /* no overflow please! */
 
 	if (ce->state) {
 		ret = context_pin(ctx);
@@ -2050,6 +2051,16 @@ static void intel_ring_init_irq(struct drm_i915_private *dev_priv,
 	}
 }
 
+static void i9xx_set_default_submission(struct intel_engine_cs *engine)
+{
+	engine->submit_request = i9xx_submit_request;
+}
+
+static void gen6_bsd_set_default_submission(struct intel_engine_cs *engine)
+{
+	engine->submit_request = gen6_bsd_submit_request;
+}
+
 static void intel_ring_default_vfuncs(struct drm_i915_private *dev_priv,
 				      struct intel_engine_cs *engine)
 {
@@ -2080,7 +2091,8 @@ static void intel_ring_default_vfuncs(struct drm_i915_private *dev_priv,
 				engine->emit_breadcrumb_sz++;
 		}
 	}
-	engine->submit_request = i9xx_submit_request;
+
+	engine->set_default_submission = i9xx_set_default_submission;
 
 	if (INTEL_GEN(dev_priv) >= 8)
 		engine->emit_bb_start = gen8_emit_bb_start;
@@ -2165,7 +2177,7 @@ int intel_init_bsd_ring_buffer(struct intel_engine_cs *engine)
 	if (INTEL_GEN(dev_priv) >= 6) {
 		/* gen6 bsd needs a special wa for tail updates */
 		if (IS_GEN6(dev_priv))
-			engine->submit_request = gen6_bsd_submit_request;
+			engine->set_default_submission = gen6_bsd_set_default_submission;
 		engine->emit_flush = gen6_bsd_ring_flush;
 		if (INTEL_GEN(dev_priv) < 8)
 			engine->irq_enable_mask = GT_BSD_USER_INTERRUPT;
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index 0ef491df5b4e..847aea554464 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -273,6 +273,8 @@ struct intel_engine_cs {
 	void		(*reset_hw)(struct intel_engine_cs *engine,
 				    struct drm_i915_gem_request *req);
 
+	void		(*set_default_submission)(struct intel_engine_cs *engine);
+
 	int		(*context_pin)(struct intel_engine_cs *engine,
 				       struct i915_gem_context *ctx);
 	void		(*context_unpin)(struct intel_engine_cs *engine,
@@ -408,6 +410,9 @@ struct intel_engine_cs {
 	 */
 	struct i915_gem_context *legacy_active_context;
 
+	/* status_notifier: list of callbacks for context-switch changes */
+	struct atomic_notifier_head context_status_notifier;
+
 	struct intel_engine_hangcheck hangcheck;
 
 	bool needs_cmd_parser;
@@ -462,7 +467,11 @@ static inline void
 intel_write_status_page(struct intel_engine_cs *engine,
 			int reg, u32 value)
 {
+	mb();
+	clflush(&engine->status_page.page_addr[reg]);
 	engine->status_page.page_addr[reg] = value;
+	clflush(&engine->status_page.page_addr[reg]);
+	mb();
 }
 
 /*
@@ -669,4 +678,6 @@ static inline u32 *gen8_emit_pipe_control(u32 *batch, u32 flags, u32 offset)
 bool intel_engine_is_idle(struct intel_engine_cs *engine);
 bool intel_engines_are_idle(struct drm_i915_private *dev_priv);
 
+void intel_engines_reset_default_submission(struct drm_i915_private *i915);
+
 #endif /* _INTEL_RINGBUFFER_H_ */
diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c
index 27e0752d1578..b931d0bd7a64 100644
--- a/drivers/gpu/drm/i915/intel_sprite.c
+++ b/drivers/gpu/drm/i915/intel_sprite.c
@@ -65,6 +65,8 @@ int intel_usecs_to_scanlines(const struct drm_display_mode *adjusted_mode,
 			    1000 * adjusted_mode->crtc_htotal);
 }
 
+#define VBLANK_EVASION_TIME_US 100
+
 /**
  * intel_pipe_update_start() - start update of a set of display registers
  * @crtc: the crtc of which the registers are going to be updated
@@ -92,7 +94,8 @@ void intel_pipe_update_start(struct intel_crtc *crtc)
 		vblank_start = DIV_ROUND_UP(vblank_start, 2);
 
 	/* FIXME needs to be calibrated sensibly */
-	min = vblank_start - intel_usecs_to_scanlines(adjusted_mode, 100);
+	min = vblank_start - intel_usecs_to_scanlines(adjusted_mode,
+						      VBLANK_EVASION_TIME_US);
 	max = vblank_start - 1;
 
 	local_irq_disable();
@@ -158,6 +161,7 @@ void intel_pipe_update_end(struct intel_crtc *crtc, struct intel_flip_work *work
 	int scanline_end = intel_get_crtc_scanline(crtc);
 	u32 end_vbl_count = intel_crtc_get_vblank_counter(crtc);
 	ktime_t end_vbl_time = ktime_get();
+	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
 
 	if (work) {
 		work->flip_queued_vblank = end_vbl_count;
@@ -183,6 +187,9 @@ void intel_pipe_update_end(struct intel_crtc *crtc, struct intel_flip_work *work
 
 	local_irq_enable();
 
+	if (intel_vgpu_active(dev_priv))
+		return;
+
 	if (crtc->debug.start_vbl_count &&
 	    crtc->debug.start_vbl_count != end_vbl_count) {
 		DRM_ERROR("Atomic update failure on pipe %c (start=%u end=%u) time %lld us, min %d, max %d, scanline start %d, end %d\n",
@@ -191,7 +198,12 @@ void intel_pipe_update_end(struct intel_crtc *crtc, struct intel_flip_work *work
 			  ktime_us_delta(end_vbl_time, crtc->debug.start_vbl_time),
 			  crtc->debug.min_vbl, crtc->debug.max_vbl,
 			  crtc->debug.scanline_start, scanline_end);
-	}
+	} else if (ktime_us_delta(end_vbl_time, crtc->debug.start_vbl_time) >
+		   VBLANK_EVASION_TIME_US)
+		DRM_WARN("Atomic update on pipe (%c) took %lld us, max time under evasion is %u us\n",
+			 pipe_name(pipe),
+			 ktime_us_delta(end_vbl_time, crtc->debug.start_vbl_time),
+			 VBLANK_EVASION_TIME_US);
 }
 
 static void
@@ -218,15 +230,11 @@ skl_update_plane(struct drm_plane *drm_plane,
 	uint32_t y = plane_state->main.y;
 	uint32_t src_w = drm_rect_width(&plane_state->base.src) >> 16;
 	uint32_t src_h = drm_rect_height(&plane_state->base.src) >> 16;
+	unsigned long irqflags;
 
 	plane_ctl = PLANE_CTL_ENABLE;
 
-	if (IS_GEMINILAKE(dev_priv)) {
-		I915_WRITE(PLANE_COLOR_CTL(pipe, plane_id),
-			   PLANE_COLOR_PIPE_GAMMA_ENABLE |
-			   PLANE_COLOR_PIPE_CSC_ENABLE |
-			   PLANE_COLOR_PLANE_GAMMA_DISABLE);
-	} else {
+	if (!IS_GEMINILAKE(dev_priv)) {
 		plane_ctl |=
 			PLANE_CTL_PIPE_GAMMA_ENABLE |
 			PLANE_CTL_PIPE_CSC_ENABLE |
@@ -237,12 +245,6 @@ skl_update_plane(struct drm_plane *drm_plane,
 	plane_ctl |= skl_plane_ctl_tiling(fb->modifier);
 	plane_ctl |= skl_plane_ctl_rotation(rotation);
 
-	if (key->flags) {
-		I915_WRITE(PLANE_KEYVAL(pipe, plane_id), key->min_value);
-		I915_WRITE(PLANE_KEYMAX(pipe, plane_id), key->max_value);
-		I915_WRITE(PLANE_KEYMSK(pipe, plane_id), key->channel_mask);
-	}
-
 	if (key->flags & I915_SET_COLORKEY_DESTINATION)
 		plane_ctl |= PLANE_CTL_KEY_ENABLE_DESTINATION;
 	else if (key->flags & I915_SET_COLORKEY_SOURCE)
@@ -254,36 +256,50 @@ skl_update_plane(struct drm_plane *drm_plane,
 	crtc_w--;
 	crtc_h--;
 
-	I915_WRITE(PLANE_OFFSET(pipe, plane_id), (y << 16) | x);
-	I915_WRITE(PLANE_STRIDE(pipe, plane_id), stride);
-	I915_WRITE(PLANE_SIZE(pipe, plane_id), (src_h << 16) | src_w);
+	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
+
+	if (IS_GEMINILAKE(dev_priv)) {
+		I915_WRITE_FW(PLANE_COLOR_CTL(pipe, plane_id),
+			      PLANE_COLOR_PIPE_GAMMA_ENABLE |
+			      PLANE_COLOR_PIPE_CSC_ENABLE |
+			      PLANE_COLOR_PLANE_GAMMA_DISABLE);
+	}
+
+	if (key->flags) {
+		I915_WRITE_FW(PLANE_KEYVAL(pipe, plane_id), key->min_value);
+		I915_WRITE_FW(PLANE_KEYMAX(pipe, plane_id), key->max_value);
+		I915_WRITE_FW(PLANE_KEYMSK(pipe, plane_id), key->channel_mask);
+	}
+
+	I915_WRITE_FW(PLANE_OFFSET(pipe, plane_id), (y << 16) | x);
+	I915_WRITE_FW(PLANE_STRIDE(pipe, plane_id), stride);
+	I915_WRITE_FW(PLANE_SIZE(pipe, plane_id), (src_h << 16) | src_w);
 
 	/* program plane scaler */
 	if (plane_state->scaler_id >= 0) {
 		int scaler_id = plane_state->scaler_id;
 		const struct intel_scaler *scaler;
 
-		DRM_DEBUG_KMS("plane = %d PS_PLANE_SEL(plane) = 0x%x\n",
-			      plane_id, PS_PLANE_SEL(plane_id));
-
 		scaler = &crtc_state->scaler_state.scalers[scaler_id];
 
-		I915_WRITE(SKL_PS_CTRL(pipe, scaler_id),
-			   PS_SCALER_EN | PS_PLANE_SEL(plane_id) | scaler->mode);
-		I915_WRITE(SKL_PS_PWR_GATE(pipe, scaler_id), 0);
-		I915_WRITE(SKL_PS_WIN_POS(pipe, scaler_id), (crtc_x << 16) | crtc_y);
-		I915_WRITE(SKL_PS_WIN_SZ(pipe, scaler_id),
-			((crtc_w + 1) << 16)|(crtc_h + 1));
+		I915_WRITE_FW(SKL_PS_CTRL(pipe, scaler_id),
+			      PS_SCALER_EN | PS_PLANE_SEL(plane_id) | scaler->mode);
+		I915_WRITE_FW(SKL_PS_PWR_GATE(pipe, scaler_id), 0);
+		I915_WRITE_FW(SKL_PS_WIN_POS(pipe, scaler_id), (crtc_x << 16) | crtc_y);
+		I915_WRITE_FW(SKL_PS_WIN_SZ(pipe, scaler_id),
+			      ((crtc_w + 1) << 16)|(crtc_h + 1));
 
-		I915_WRITE(PLANE_POS(pipe, plane_id), 0);
+		I915_WRITE_FW(PLANE_POS(pipe, plane_id), 0);
 	} else {
-		I915_WRITE(PLANE_POS(pipe, plane_id), (crtc_y << 16) | crtc_x);
+		I915_WRITE_FW(PLANE_POS(pipe, plane_id), (crtc_y << 16) | crtc_x);
 	}
 
-	I915_WRITE(PLANE_CTL(pipe, plane_id), plane_ctl);
-	I915_WRITE(PLANE_SURF(pipe, plane_id),
-		   intel_plane_ggtt_offset(plane_state) + surf_addr);
-	POSTING_READ(PLANE_SURF(pipe, plane_id));
+	I915_WRITE_FW(PLANE_CTL(pipe, plane_id), plane_ctl);
+	I915_WRITE_FW(PLANE_SURF(pipe, plane_id),
+		      intel_plane_ggtt_offset(plane_state) + surf_addr);
+	POSTING_READ_FW(PLANE_SURF(pipe, plane_id));
+
+	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
 }
 
 static void
@@ -294,11 +310,16 @@ skl_disable_plane(struct drm_plane *dplane, struct drm_crtc *crtc)
 	struct intel_plane *intel_plane = to_intel_plane(dplane);
 	enum plane_id plane_id = intel_plane->id;
 	enum pipe pipe = intel_plane->pipe;
+	unsigned long irqflags;
+
+	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
+
+	I915_WRITE_FW(PLANE_CTL(pipe, plane_id), 0);
 
-	I915_WRITE(PLANE_CTL(pipe, plane_id), 0);
+	I915_WRITE_FW(PLANE_SURF(pipe, plane_id), 0);
+	POSTING_READ_FW(PLANE_SURF(pipe, plane_id));
 
-	I915_WRITE(PLANE_SURF(pipe, plane_id), 0);
-	POSTING_READ(PLANE_SURF(pipe, plane_id));
+	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
 }
 
 static void
@@ -321,23 +342,23 @@ chv_update_csc(struct intel_plane *intel_plane, uint32_t format)
 	 * Cb and Cr apparently come in as signed already, so no
 	 * need for any offset. For Y we need to remove the offset.
 	 */
-	I915_WRITE(SPCSCYGOFF(plane_id), SPCSC_OOFF(0) | SPCSC_IOFF(-64));
-	I915_WRITE(SPCSCCBOFF(plane_id), SPCSC_OOFF(0) | SPCSC_IOFF(0));
-	I915_WRITE(SPCSCCROFF(plane_id), SPCSC_OOFF(0) | SPCSC_IOFF(0));
-
-	I915_WRITE(SPCSCC01(plane_id), SPCSC_C1(4769) | SPCSC_C0(6537));
-	I915_WRITE(SPCSCC23(plane_id), SPCSC_C1(-3330) | SPCSC_C0(0));
-	I915_WRITE(SPCSCC45(plane_id), SPCSC_C1(-1605) | SPCSC_C0(4769));
-	I915_WRITE(SPCSCC67(plane_id), SPCSC_C1(4769) | SPCSC_C0(0));
-	I915_WRITE(SPCSCC8(plane_id), SPCSC_C0(8263));
-
-	I915_WRITE(SPCSCYGICLAMP(plane_id), SPCSC_IMAX(940) | SPCSC_IMIN(64));
-	I915_WRITE(SPCSCCBICLAMP(plane_id), SPCSC_IMAX(448) | SPCSC_IMIN(-448));
-	I915_WRITE(SPCSCCRICLAMP(plane_id), SPCSC_IMAX(448) | SPCSC_IMIN(-448));
-
-	I915_WRITE(SPCSCYGOCLAMP(plane_id), SPCSC_OMAX(1023) | SPCSC_OMIN(0));
-	I915_WRITE(SPCSCCBOCLAMP(plane_id), SPCSC_OMAX(1023) | SPCSC_OMIN(0));
-	I915_WRITE(SPCSCCROCLAMP(plane_id), SPCSC_OMAX(1023) | SPCSC_OMIN(0));
+	I915_WRITE_FW(SPCSCYGOFF(plane_id), SPCSC_OOFF(0) | SPCSC_IOFF(-64));
+	I915_WRITE_FW(SPCSCCBOFF(plane_id), SPCSC_OOFF(0) | SPCSC_IOFF(0));
+	I915_WRITE_FW(SPCSCCROFF(plane_id), SPCSC_OOFF(0) | SPCSC_IOFF(0));
+
+	I915_WRITE_FW(SPCSCC01(plane_id), SPCSC_C1(4769) | SPCSC_C0(6537));
+	I915_WRITE_FW(SPCSCC23(plane_id), SPCSC_C1(-3330) | SPCSC_C0(0));
+	I915_WRITE_FW(SPCSCC45(plane_id), SPCSC_C1(-1605) | SPCSC_C0(4769));
+	I915_WRITE_FW(SPCSCC67(plane_id), SPCSC_C1(4769) | SPCSC_C0(0));
+	I915_WRITE_FW(SPCSCC8(plane_id), SPCSC_C0(8263));
+
+	I915_WRITE_FW(SPCSCYGICLAMP(plane_id), SPCSC_IMAX(940) | SPCSC_IMIN(64));
+	I915_WRITE_FW(SPCSCCBICLAMP(plane_id), SPCSC_IMAX(448) | SPCSC_IMIN(-448));
+	I915_WRITE_FW(SPCSCCRICLAMP(plane_id), SPCSC_IMAX(448) | SPCSC_IMIN(-448));
+
+	I915_WRITE_FW(SPCSCYGOCLAMP(plane_id), SPCSC_OMAX(1023) | SPCSC_OMIN(0));
+	I915_WRITE_FW(SPCSCCBOCLAMP(plane_id), SPCSC_OMAX(1023) | SPCSC_OMIN(0));
+	I915_WRITE_FW(SPCSCCROCLAMP(plane_id), SPCSC_OMAX(1023) | SPCSC_OMIN(0));
 }
 
 static void
@@ -363,6 +384,7 @@ vlv_update_plane(struct drm_plane *dplane,
 	uint32_t y = plane_state->base.src.y1 >> 16;
 	uint32_t src_w = drm_rect_width(&plane_state->base.src) >> 16;
 	uint32_t src_h = drm_rect_height(&plane_state->base.src) >> 16;
+	unsigned long irqflags;
 
 	sprctl = SP_ENABLE;
 
@@ -424,6 +446,9 @@ vlv_update_plane(struct drm_plane *dplane,
 	if (rotation & DRM_REFLECT_X)
 		sprctl |= SP_MIRROR;
 
+	if (key->flags & I915_SET_COLORKEY_SOURCE)
+		sprctl |= SP_SOURCE_KEY;
+
 	/* Sizes are 0 based */
 	src_w--;
 	src_h--;
@@ -442,33 +467,33 @@ vlv_update_plane(struct drm_plane *dplane,
 
 	linear_offset = intel_fb_xy_to_linear(x, y, plane_state, 0);
 
-	if (key->flags) {
-		I915_WRITE(SPKEYMINVAL(pipe, plane_id), key->min_value);
-		I915_WRITE(SPKEYMAXVAL(pipe, plane_id), key->max_value);
-		I915_WRITE(SPKEYMSK(pipe, plane_id), key->channel_mask);
-	}
-
-	if (key->flags & I915_SET_COLORKEY_SOURCE)
-		sprctl |= SP_SOURCE_KEY;
+	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
 
 	if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B)
 		chv_update_csc(intel_plane, fb->format->format);
 
-	I915_WRITE(SPSTRIDE(pipe, plane_id), fb->pitches[0]);
-	I915_WRITE(SPPOS(pipe, plane_id), (crtc_y << 16) | crtc_x);
+	if (key->flags) {
+		I915_WRITE_FW(SPKEYMINVAL(pipe, plane_id), key->min_value);
+		I915_WRITE_FW(SPKEYMAXVAL(pipe, plane_id), key->max_value);
+		I915_WRITE_FW(SPKEYMSK(pipe, plane_id), key->channel_mask);
+	}
+	I915_WRITE_FW(SPSTRIDE(pipe, plane_id), fb->pitches[0]);
+	I915_WRITE_FW(SPPOS(pipe, plane_id), (crtc_y << 16) | crtc_x);
 
 	if (fb->modifier == I915_FORMAT_MOD_X_TILED)
-		I915_WRITE(SPTILEOFF(pipe, plane_id), (y << 16) | x);
+		I915_WRITE_FW(SPTILEOFF(pipe, plane_id), (y << 16) | x);
 	else
-		I915_WRITE(SPLINOFF(pipe, plane_id), linear_offset);
+		I915_WRITE_FW(SPLINOFF(pipe, plane_id), linear_offset);
+
+	I915_WRITE_FW(SPCONSTALPHA(pipe, plane_id), 0);
 
-	I915_WRITE(SPCONSTALPHA(pipe, plane_id), 0);
+	I915_WRITE_FW(SPSIZE(pipe, plane_id), (crtc_h << 16) | crtc_w);
+	I915_WRITE_FW(SPCNTR(pipe, plane_id), sprctl);
+	I915_WRITE_FW(SPSURF(pipe, plane_id),
+		      intel_plane_ggtt_offset(plane_state) + sprsurf_offset);
+	POSTING_READ_FW(SPSURF(pipe, plane_id));
 
-	I915_WRITE(SPSIZE(pipe, plane_id), (crtc_h << 16) | crtc_w);
-	I915_WRITE(SPCNTR(pipe, plane_id), sprctl);
-	I915_WRITE(SPSURF(pipe, plane_id),
-		   intel_plane_ggtt_offset(plane_state) + sprsurf_offset);
-	POSTING_READ(SPSURF(pipe, plane_id));
+	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
 }
 
 static void
@@ -479,11 +504,16 @@ vlv_disable_plane(struct drm_plane *dplane, struct drm_crtc *crtc)
 	struct intel_plane *intel_plane = to_intel_plane(dplane);
 	enum pipe pipe = intel_plane->pipe;
 	enum plane_id plane_id = intel_plane->id;
+	unsigned long irqflags;
 
-	I915_WRITE(SPCNTR(pipe, plane_id), 0);
+	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
 
-	I915_WRITE(SPSURF(pipe, plane_id), 0);
-	POSTING_READ(SPSURF(pipe, plane_id));
+	I915_WRITE_FW(SPCNTR(pipe, plane_id), 0);
+
+	I915_WRITE_FW(SPSURF(pipe, plane_id), 0);
+	POSTING_READ_FW(SPSURF(pipe, plane_id));
+
+	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
 }
 
 static void
@@ -508,6 +538,7 @@ ivb_update_plane(struct drm_plane *plane,
 	uint32_t y = plane_state->base.src.y1 >> 16;
 	uint32_t src_w = drm_rect_width(&plane_state->base.src) >> 16;
 	uint32_t src_h = drm_rect_height(&plane_state->base.src) >> 16;
+	unsigned long irqflags;
 
 	sprctl = SPRITE_ENABLE;
 
@@ -554,6 +585,11 @@ ivb_update_plane(struct drm_plane *plane,
 	if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
 		sprctl |= SPRITE_PIPE_CSC_ENABLE;
 
+	if (key->flags & I915_SET_COLORKEY_DESTINATION)
+		sprctl |= SPRITE_DEST_KEY;
+	else if (key->flags & I915_SET_COLORKEY_SOURCE)
+		sprctl |= SPRITE_SOURCE_KEY;
+
 	/* Sizes are 0 based */
 	src_w--;
 	src_h--;
@@ -575,36 +611,35 @@ ivb_update_plane(struct drm_plane *plane,
 
 	linear_offset = intel_fb_xy_to_linear(x, y, plane_state, 0);
 
+	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
+
 	if (key->flags) {
-		I915_WRITE(SPRKEYVAL(pipe), key->min_value);
-		I915_WRITE(SPRKEYMAX(pipe), key->max_value);
-		I915_WRITE(SPRKEYMSK(pipe), key->channel_mask);
+		I915_WRITE_FW(SPRKEYVAL(pipe), key->min_value);
+		I915_WRITE_FW(SPRKEYMAX(pipe), key->max_value);
+		I915_WRITE_FW(SPRKEYMSK(pipe), key->channel_mask);
 	}
 
-	if (key->flags & I915_SET_COLORKEY_DESTINATION)
-		sprctl |= SPRITE_DEST_KEY;
-	else if (key->flags & I915_SET_COLORKEY_SOURCE)
-		sprctl |= SPRITE_SOURCE_KEY;
-
-	I915_WRITE(SPRSTRIDE(pipe), fb->pitches[0]);
-	I915_WRITE(SPRPOS(pipe), (crtc_y << 16) | crtc_x);
+	I915_WRITE_FW(SPRSTRIDE(pipe), fb->pitches[0]);
+	I915_WRITE_FW(SPRPOS(pipe), (crtc_y << 16) | crtc_x);
 
 	/* HSW consolidates SPRTILEOFF and SPRLINOFF into a single SPROFFSET
 	 * register */
 	if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
-		I915_WRITE(SPROFFSET(pipe), (y << 16) | x);
+		I915_WRITE_FW(SPROFFSET(pipe), (y << 16) | x);
 	else if (fb->modifier == I915_FORMAT_MOD_X_TILED)
-		I915_WRITE(SPRTILEOFF(pipe), (y << 16) | x);
+		I915_WRITE_FW(SPRTILEOFF(pipe), (y << 16) | x);
 	else
-		I915_WRITE(SPRLINOFF(pipe), linear_offset);
+		I915_WRITE_FW(SPRLINOFF(pipe), linear_offset);
 
-	I915_WRITE(SPRSIZE(pipe), (crtc_h << 16) | crtc_w);
+	I915_WRITE_FW(SPRSIZE(pipe), (crtc_h << 16) | crtc_w);
 	if (intel_plane->can_scale)
-		I915_WRITE(SPRSCALE(pipe), sprscale);
-	I915_WRITE(SPRCTL(pipe), sprctl);
-	I915_WRITE(SPRSURF(pipe),
-		   intel_plane_ggtt_offset(plane_state) + sprsurf_offset);
-	POSTING_READ(SPRSURF(pipe));
+		I915_WRITE_FW(SPRSCALE(pipe), sprscale);
+	I915_WRITE_FW(SPRCTL(pipe), sprctl);
+	I915_WRITE_FW(SPRSURF(pipe),
+		      intel_plane_ggtt_offset(plane_state) + sprsurf_offset);
+	POSTING_READ_FW(SPRSURF(pipe));
+
+	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
 }
 
 static void
@@ -614,14 +649,19 @@ ivb_disable_plane(struct drm_plane *plane, struct drm_crtc *crtc)
 	struct drm_i915_private *dev_priv = to_i915(dev);
 	struct intel_plane *intel_plane = to_intel_plane(plane);
 	int pipe = intel_plane->pipe;
+	unsigned long irqflags;
 
-	I915_WRITE(SPRCTL(pipe), 0);
+	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
+
+	I915_WRITE_FW(SPRCTL(pipe), 0);
 	/* Can't leave the scaler enabled... */
 	if (intel_plane->can_scale)
-		I915_WRITE(SPRSCALE(pipe), 0);
+		I915_WRITE_FW(SPRSCALE(pipe), 0);
+
+	I915_WRITE_FW(SPRSURF(pipe), 0);
+	POSTING_READ_FW(SPRSURF(pipe));
 
-	I915_WRITE(SPRSURF(pipe), 0);
-	POSTING_READ(SPRSURF(pipe));
+	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
 }
 
 static void
@@ -646,6 +686,7 @@ ilk_update_plane(struct drm_plane *plane,
 	uint32_t y = plane_state->base.src.y1 >> 16;
 	uint32_t src_w = drm_rect_width(&plane_state->base.src) >> 16;
 	uint32_t src_h = drm_rect_height(&plane_state->base.src) >> 16;
+	unsigned long irqflags;
 
 	dvscntr = DVS_ENABLE;
 
@@ -687,6 +728,11 @@ ilk_update_plane(struct drm_plane *plane,
 	if (IS_GEN6(dev_priv))
 		dvscntr |= DVS_TRICKLE_FEED_DISABLE; /* must disable */
 
+	if (key->flags & I915_SET_COLORKEY_DESTINATION)
+		dvscntr |= DVS_DEST_KEY;
+	else if (key->flags & I915_SET_COLORKEY_SOURCE)
+		dvscntr |= DVS_SOURCE_KEY;
+
 	/* Sizes are 0 based */
 	src_w--;
 	src_h--;
@@ -707,31 +753,30 @@ ilk_update_plane(struct drm_plane *plane,
 
 	linear_offset = intel_fb_xy_to_linear(x, y, plane_state, 0);
 
+	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
+
 	if (key->flags) {
-		I915_WRITE(DVSKEYVAL(pipe), key->min_value);
-		I915_WRITE(DVSKEYMAX(pipe), key->max_value);
-		I915_WRITE(DVSKEYMSK(pipe), key->channel_mask);
+		I915_WRITE_FW(DVSKEYVAL(pipe), key->min_value);
+		I915_WRITE_FW(DVSKEYMAX(pipe), key->max_value);
+		I915_WRITE_FW(DVSKEYMSK(pipe), key->channel_mask);
 	}
 
-	if (key->flags & I915_SET_COLORKEY_DESTINATION)
-		dvscntr |= DVS_DEST_KEY;
-	else if (key->flags & I915_SET_COLORKEY_SOURCE)
-		dvscntr |= DVS_SOURCE_KEY;
-
-	I915_WRITE(DVSSTRIDE(pipe), fb->pitches[0]);
-	I915_WRITE(DVSPOS(pipe), (crtc_y << 16) | crtc_x);
+	I915_WRITE_FW(DVSSTRIDE(pipe), fb->pitches[0]);
+	I915_WRITE_FW(DVSPOS(pipe), (crtc_y << 16) | crtc_x);
 
 	if (fb->modifier == I915_FORMAT_MOD_X_TILED)
-		I915_WRITE(DVSTILEOFF(pipe), (y << 16) | x);
+		I915_WRITE_FW(DVSTILEOFF(pipe), (y << 16) | x);
 	else
-		I915_WRITE(DVSLINOFF(pipe), linear_offset);
-
-	I915_WRITE(DVSSIZE(pipe), (crtc_h << 16) | crtc_w);
-	I915_WRITE(DVSSCALE(pipe), dvsscale);
-	I915_WRITE(DVSCNTR(pipe), dvscntr);
-	I915_WRITE(DVSSURF(pipe),
-		   intel_plane_ggtt_offset(plane_state) + dvssurf_offset);
-	POSTING_READ(DVSSURF(pipe));
+		I915_WRITE_FW(DVSLINOFF(pipe), linear_offset);
+
+	I915_WRITE_FW(DVSSIZE(pipe), (crtc_h << 16) | crtc_w);
+	I915_WRITE_FW(DVSSCALE(pipe), dvsscale);
+	I915_WRITE_FW(DVSCNTR(pipe), dvscntr);
+	I915_WRITE_FW(DVSSURF(pipe),
+		      intel_plane_ggtt_offset(plane_state) + dvssurf_offset);
+	POSTING_READ_FW(DVSSURF(pipe));
+
+	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
 }
 
 static void
@@ -741,13 +786,18 @@ ilk_disable_plane(struct drm_plane *plane, struct drm_crtc *crtc)
 	struct drm_i915_private *dev_priv = to_i915(dev);
 	struct intel_plane *intel_plane = to_intel_plane(plane);
 	int pipe = intel_plane->pipe;
+	unsigned long irqflags;
+
+	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
 
-	I915_WRITE(DVSCNTR(pipe), 0);
+	I915_WRITE_FW(DVSCNTR(pipe), 0);
 	/* Disable the scaler */
-	I915_WRITE(DVSSCALE(pipe), 0);
+	I915_WRITE_FW(DVSSCALE(pipe), 0);
+
+	I915_WRITE_FW(DVSSURF(pipe), 0);
+	POSTING_READ_FW(DVSSURF(pipe));
 
-	I915_WRITE(DVSSURF(pipe), 0);
-	POSTING_READ(DVSSURF(pipe));
+	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
 }
 
 static int
diff --git a/drivers/gpu/drm/i915/intel_uc.c b/drivers/gpu/drm/i915/intel_uc.c
index c46bc8594f22..d15a7d9d4eb0 100644
--- a/drivers/gpu/drm/i915/intel_uc.c
+++ b/drivers/gpu/drm/i915/intel_uc.c
@@ -24,12 +24,167 @@
 
 #include "i915_drv.h"
 #include "intel_uc.h"
+#include <linux/firmware.h>
+
+/* Reset GuC providing us with fresh state for both GuC and HuC.
+ */
+static int __intel_uc_reset_hw(struct drm_i915_private *dev_priv)
+{
+	int ret;
+	u32 guc_status;
+
+	ret = intel_guc_reset(dev_priv);
+	if (ret) {
+		DRM_ERROR("GuC reset failed, ret = %d\n", ret);
+		return ret;
+	}
+
+	guc_status = I915_READ(GUC_STATUS);
+	WARN(!(guc_status & GS_MIA_IN_RESET),
+	     "GuC status: 0x%x, MIA core expected to be in reset\n",
+	     guc_status);
+
+	return ret;
+}
+
+void intel_uc_sanitize_options(struct drm_i915_private *dev_priv)
+{
+	if (!HAS_GUC(dev_priv)) {
+		if (i915.enable_guc_loading > 0 ||
+		    i915.enable_guc_submission > 0)
+			DRM_INFO("Ignoring GuC options, no hardware\n");
+
+		i915.enable_guc_loading = 0;
+		i915.enable_guc_submission = 0;
+		return;
+	}
+
+	/* A negative value means "use platform default" */
+	if (i915.enable_guc_loading < 0)
+		i915.enable_guc_loading = HAS_GUC_UCODE(dev_priv);
+
+	/* Verify firmware version */
+	if (i915.enable_guc_loading) {
+		if (HAS_HUC_UCODE(dev_priv))
+			intel_huc_select_fw(&dev_priv->huc);
+
+		if (intel_guc_select_fw(&dev_priv->guc))
+			i915.enable_guc_loading = 0;
+	}
+
+	/* Can't enable guc submission without guc loaded */
+	if (!i915.enable_guc_loading)
+		i915.enable_guc_submission = 0;
+
+	/* A negative value means "use platform default" */
+	if (i915.enable_guc_submission < 0)
+		i915.enable_guc_submission = HAS_GUC_SCHED(dev_priv);
+}
 
 void intel_uc_init_early(struct drm_i915_private *dev_priv)
 {
 	mutex_init(&dev_priv->guc.send_mutex);
 }
 
+void intel_uc_init_fw(struct drm_i915_private *dev_priv)
+{
+	if (dev_priv->huc.fw.path)
+		intel_uc_prepare_fw(dev_priv, &dev_priv->huc.fw);
+
+	if (dev_priv->guc.fw.path)
+		intel_uc_prepare_fw(dev_priv, &dev_priv->guc.fw);
+}
+
+int intel_uc_init_hw(struct drm_i915_private *dev_priv)
+{
+	int ret, attempts;
+
+	/* GuC not enabled, nothing to do */
+	if (!i915.enable_guc_loading)
+		return 0;
+
+	gen9_reset_guc_interrupts(dev_priv);
+
+	/* We need to notify the guc whenever we change the GGTT */
+	i915_ggtt_enable_guc(dev_priv);
+
+	if (i915.enable_guc_submission) {
+		ret = i915_guc_submission_init(dev_priv);
+		if (ret)
+			goto err;
+	}
+
+	/* WaEnableuKernelHeaderValidFix:skl */
+	/* WaEnableGuCBootHashCheckNotSet:skl,bxt,kbl */
+	if (IS_GEN9(dev_priv))
+		attempts = 3;
+	else
+		attempts = 1;
+
+	while (attempts--) {
+		/*
+		 * Always reset the GuC just before (re)loading, so
+		 * that the state and timing are fairly predictable
+		 */
+		ret = __intel_uc_reset_hw(dev_priv);
+		if (ret)
+			goto err_submission;
+
+		intel_huc_init_hw(&dev_priv->huc);
+		ret = intel_guc_init_hw(&dev_priv->guc);
+		if (ret == 0 || ret != -EAGAIN)
+			break;
+
+		DRM_DEBUG_DRIVER("GuC fw load failed: %d; will reset and "
+				 "retry %d more time(s)\n", ret, attempts);
+	}
+
+	/* Did we succeded or run out of retries? */
+	if (ret)
+		goto err_submission;
+
+	intel_guc_auth_huc(dev_priv);
+	if (i915.enable_guc_submission) {
+		if (i915.guc_log_level >= 0)
+			gen9_enable_guc_interrupts(dev_priv);
+
+		ret = i915_guc_submission_enable(dev_priv);
+		if (ret)
+			goto err_submission;
+	}
+
+	return 0;
+
+	/*
+	 * We've failed to load the firmware :(
+	 *
+	 * Decide whether to disable GuC submission and fall back to
+	 * execlist mode, and whether to hide the error by returning
+	 * zero or to return -EIO, which the caller will treat as a
+	 * nonfatal error (i.e. it doesn't prevent driver load, but
+	 * marks the GPU as wedged until reset).
+	 */
+err_submission:
+	if (i915.enable_guc_submission)
+		i915_guc_submission_fini(dev_priv);
+
+err:
+	i915_ggtt_disable_guc(dev_priv);
+
+	DRM_ERROR("GuC init failed\n");
+	if (i915.enable_guc_loading > 1 || i915.enable_guc_submission > 1)
+		ret = -EIO;
+	else
+		ret = 0;
+
+	if (i915.enable_guc_submission) {
+		i915.enable_guc_submission = 0;
+		DRM_NOTE("Falling back from GuC submission to execlist mode\n");
+	}
+
+	return ret;
+}
+
 /*
  * Read GuC command/status register (SOFT_SCRATCH_0)
  * Return true if it contains a response rather than a command
@@ -114,3 +269,135 @@ int intel_guc_sample_forcewake(struct intel_guc *guc)
 	return intel_guc_send(guc, action, ARRAY_SIZE(action));
 }
 
+void intel_uc_prepare_fw(struct drm_i915_private *dev_priv,
+			 struct intel_uc_fw *uc_fw)
+{
+	struct pci_dev *pdev = dev_priv->drm.pdev;
+	struct drm_i915_gem_object *obj;
+	const struct firmware *fw = NULL;
+	struct uc_css_header *css;
+	size_t size;
+	int err;
+
+	uc_fw->fetch_status = INTEL_UC_FIRMWARE_PENDING;
+
+	DRM_DEBUG_DRIVER("before requesting firmware: uC fw fetch status %s\n",
+			 intel_uc_fw_status_repr(uc_fw->fetch_status));
+
+	err = request_firmware(&fw, uc_fw->path, &pdev->dev);
+	if (err)
+		goto fail;
+	if (!fw)
+		goto fail;
+
+	DRM_DEBUG_DRIVER("fetch uC fw from %s succeeded, fw %p\n",
+		uc_fw->path, fw);
+
+	/* Check the size of the blob before examining buffer contents */
+	if (fw->size < sizeof(struct uc_css_header)) {
+		DRM_NOTE("Firmware header is missing\n");
+		goto fail;
+	}
+
+	css = (struct uc_css_header *)fw->data;
+
+	/* Firmware bits always start from header */
+	uc_fw->header_offset = 0;
+	uc_fw->header_size = (css->header_size_dw - css->modulus_size_dw -
+		css->key_size_dw - css->exponent_size_dw) * sizeof(u32);
+
+	if (uc_fw->header_size != sizeof(struct uc_css_header)) {
+		DRM_NOTE("CSS header definition mismatch\n");
+		goto fail;
+	}
+
+	/* then, uCode */
+	uc_fw->ucode_offset = uc_fw->header_offset + uc_fw->header_size;
+	uc_fw->ucode_size = (css->size_dw - css->header_size_dw) * sizeof(u32);
+
+	/* now RSA */
+	if (css->key_size_dw != UOS_RSA_SCRATCH_MAX_COUNT) {
+		DRM_NOTE("RSA key size is bad\n");
+		goto fail;
+	}
+	uc_fw->rsa_offset = uc_fw->ucode_offset + uc_fw->ucode_size;
+	uc_fw->rsa_size = css->key_size_dw * sizeof(u32);
+
+	/* At least, it should have header, uCode and RSA. Size of all three. */
+	size = uc_fw->header_size + uc_fw->ucode_size + uc_fw->rsa_size;
+	if (fw->size < size) {
+		DRM_NOTE("Missing firmware components\n");
+		goto fail;
+	}
+
+	/*
+	 * The GuC firmware image has the version number embedded at a
+	 * well-known offset within the firmware blob; note that major / minor
+	 * version are TWO bytes each (i.e. u16), although all pointers and
+	 * offsets are defined in terms of bytes (u8).
+	 */
+	switch (uc_fw->type) {
+	case INTEL_UC_FW_TYPE_GUC:
+		/* Header and uCode will be loaded to WOPCM. Size of the two. */
+		size = uc_fw->header_size + uc_fw->ucode_size;
+
+		/* Top 32k of WOPCM is reserved (8K stack + 24k RC6 context). */
+		if (size > intel_guc_wopcm_size(dev_priv)) {
+			DRM_ERROR("Firmware is too large to fit in WOPCM\n");
+			goto fail;
+		}
+		uc_fw->major_ver_found = css->guc.sw_version >> 16;
+		uc_fw->minor_ver_found = css->guc.sw_version & 0xFFFF;
+		break;
+
+	case INTEL_UC_FW_TYPE_HUC:
+		uc_fw->major_ver_found = css->huc.sw_version >> 16;
+		uc_fw->minor_ver_found = css->huc.sw_version & 0xFFFF;
+		break;
+
+	default:
+		DRM_ERROR("Unknown firmware type %d\n", uc_fw->type);
+		err = -ENOEXEC;
+		goto fail;
+	}
+
+	if (uc_fw->major_ver_wanted == 0 && uc_fw->minor_ver_wanted == 0) {
+		DRM_NOTE("Skipping uC firmware version check\n");
+	} else if (uc_fw->major_ver_found != uc_fw->major_ver_wanted ||
+		   uc_fw->minor_ver_found < uc_fw->minor_ver_wanted) {
+		DRM_NOTE("uC firmware version %d.%d, required %d.%d\n",
+			uc_fw->major_ver_found, uc_fw->minor_ver_found,
+			uc_fw->major_ver_wanted, uc_fw->minor_ver_wanted);
+		err = -ENOEXEC;
+		goto fail;
+	}
+
+	DRM_DEBUG_DRIVER("firmware version %d.%d OK (minimum %d.%d)\n",
+			uc_fw->major_ver_found, uc_fw->minor_ver_found,
+			uc_fw->major_ver_wanted, uc_fw->minor_ver_wanted);
+
+	obj = i915_gem_object_create_from_data(dev_priv, fw->data, fw->size);
+	if (IS_ERR(obj)) {
+		err = PTR_ERR(obj);
+		goto fail;
+	}
+
+	uc_fw->obj = obj;
+	uc_fw->size = fw->size;
+
+	DRM_DEBUG_DRIVER("uC fw fetch status SUCCESS, obj %p\n",
+			uc_fw->obj);
+
+	release_firmware(fw);
+	uc_fw->fetch_status = INTEL_UC_FIRMWARE_SUCCESS;
+	return;
+
+fail:
+	DRM_WARN("Failed to fetch valid uC firmware from %s (error %d)\n",
+		 uc_fw->path, err);
+	DRM_DEBUG_DRIVER("uC fw fetch status FAIL; err %d, fw %p, obj %p\n",
+		err, fw, uc_fw->obj);
+
+	release_firmware(fw);		/* OK even if fw is NULL */
+	uc_fw->fetch_status = INTEL_UC_FIRMWARE_FAIL;
+}
diff --git a/drivers/gpu/drm/i915/intel_uc.h b/drivers/gpu/drm/i915/intel_uc.h
index d74f4d3ad8dc..a35ededfaa40 100644
--- a/drivers/gpu/drm/i915/intel_uc.h
+++ b/drivers/gpu/drm/i915/intel_uc.h
@@ -121,7 +121,7 @@ struct intel_uc_fw {
 	uint16_t major_ver_found;
 	uint16_t minor_ver_found;
 
-	enum intel_uc_fw_type fw;
+	enum intel_uc_fw_type type;
 	uint32_t header_size;
 	uint32_t header_offset;
 	uint32_t rsa_size;
@@ -184,19 +184,22 @@ struct intel_huc {
 };
 
 /* intel_uc.c */
+void intel_uc_sanitize_options(struct drm_i915_private *dev_priv);
 void intel_uc_init_early(struct drm_i915_private *dev_priv);
+void intel_uc_init_fw(struct drm_i915_private *dev_priv);
+int intel_uc_init_hw(struct drm_i915_private *dev_priv);
+void intel_uc_prepare_fw(struct drm_i915_private *dev_priv,
+			 struct intel_uc_fw *uc_fw);
 int intel_guc_send(struct intel_guc *guc, const u32 *action, u32 len);
 int intel_guc_sample_forcewake(struct intel_guc *guc);
 
 /* intel_guc_loader.c */
-extern void intel_guc_init(struct drm_i915_private *dev_priv);
-extern int intel_guc_setup(struct drm_i915_private *dev_priv);
-extern void intel_guc_fini(struct drm_i915_private *dev_priv);
-extern const char *intel_uc_fw_status_repr(enum intel_uc_fw_status status);
-extern int intel_guc_suspend(struct drm_i915_private *dev_priv);
-extern int intel_guc_resume(struct drm_i915_private *dev_priv);
-void intel_uc_fw_fetch(struct drm_i915_private *dev_priv,
-	struct intel_uc_fw *uc_fw);
+int intel_guc_select_fw(struct intel_guc *guc);
+int intel_guc_init_hw(struct intel_guc *guc);
+void intel_guc_fini(struct drm_i915_private *dev_priv);
+const char *intel_uc_fw_status_repr(enum intel_uc_fw_status status);
+int intel_guc_suspend(struct drm_i915_private *dev_priv);
+int intel_guc_resume(struct drm_i915_private *dev_priv);
 u32 intel_guc_wopcm_size(struct drm_i915_private *dev_priv);
 
 /* i915_guc_submission.c */
@@ -223,9 +226,9 @@ static inline u32 guc_ggtt_offset(struct i915_vma *vma)
 }
 
 /* intel_huc.c */
-void intel_huc_init(struct drm_i915_private *dev_priv);
+void intel_huc_select_fw(struct intel_huc *huc);
 void intel_huc_fini(struct drm_i915_private  *dev_priv);
-int intel_huc_load(struct drm_i915_private *dev_priv);
+int intel_huc_init_hw(struct intel_huc *huc);
 void intel_guc_auth_huc(struct drm_i915_private *dev_priv);
 
 #endif
diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
index b35b7a03eeaf..09f5f02d7901 100644
--- a/drivers/gpu/drm/i915/intel_uncore.c
+++ b/drivers/gpu/drm/i915/intel_uncore.c
@@ -25,6 +25,7 @@
 #include "intel_drv.h"
 #include "i915_vgpu.h"
 
+#include <asm/iosf_mbi.h>
 #include <linux/pm_runtime.h>
 
 #define FORCEWAKE_ACK_TIMEOUT_MS 50
@@ -119,6 +120,8 @@ fw_domains_get(struct drm_i915_private *dev_priv, enum forcewake_domains fw_doma
 
 	for_each_fw_domain_masked(d, fw_domains, dev_priv)
 		fw_domain_wait_ack(d);
+
+	dev_priv->uncore.fw_domains_active |= fw_domains;
 }
 
 static void
@@ -130,13 +133,8 @@ fw_domains_put(struct drm_i915_private *dev_priv, enum forcewake_domains fw_doma
 		fw_domain_put(d);
 		fw_domain_posting_read(d);
 	}
-}
 
-static void
-vgpu_fw_domains_nop(struct drm_i915_private *dev_priv,
-		    enum forcewake_domains fw_domains)
-{
-	/* Guest driver doesn't need to takes care forcewake. */
+	dev_priv->uncore.fw_domains_active &= ~fw_domains;
 }
 
 static void
@@ -247,18 +245,16 @@ intel_uncore_fw_release_timer(struct hrtimer *timer)
 	if (WARN_ON(domain->wake_count == 0))
 		domain->wake_count++;
 
-	if (--domain->wake_count == 0) {
+	if (--domain->wake_count == 0)
 		dev_priv->uncore.funcs.force_wake_put(dev_priv, domain->mask);
-		dev_priv->uncore.fw_domains_active &= ~domain->mask;
-	}
 
 	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
 
 	return HRTIMER_NORESTART;
 }
 
-void intel_uncore_forcewake_reset(struct drm_i915_private *dev_priv,
-				  bool restore)
+static void intel_uncore_forcewake_reset(struct drm_i915_private *dev_priv,
+					 bool restore)
 {
 	unsigned long irqflags;
 	struct intel_uncore_forcewake_domain *domain;
@@ -434,10 +430,18 @@ static void __intel_uncore_early_sanitize(struct drm_i915_private *dev_priv,
 	intel_uncore_forcewake_reset(dev_priv, restore_forcewake);
 }
 
-void intel_uncore_early_sanitize(struct drm_i915_private *dev_priv,
-				 bool restore_forcewake)
+void intel_uncore_suspend(struct drm_i915_private *dev_priv)
+{
+	iosf_mbi_unregister_pmic_bus_access_notifier(
+		&dev_priv->uncore.pmic_bus_access_nb);
+	intel_uncore_forcewake_reset(dev_priv, false);
+}
+
+void intel_uncore_resume_early(struct drm_i915_private *dev_priv)
 {
-	__intel_uncore_early_sanitize(dev_priv, restore_forcewake);
+	__intel_uncore_early_sanitize(dev_priv, true);
+	iosf_mbi_register_pmic_bus_access_notifier(
+		&dev_priv->uncore.pmic_bus_access_nb);
 	i915_check_and_clear_faults(dev_priv);
 }
 
@@ -461,10 +465,8 @@ static void __intel_uncore_forcewake_get(struct drm_i915_private *dev_priv,
 			fw_domains &= ~domain->mask;
 	}
 
-	if (fw_domains) {
+	if (fw_domains)
 		dev_priv->uncore.funcs.force_wake_get(dev_priv, fw_domains);
-		dev_priv->uncore.fw_domains_active |= fw_domains;
-	}
 }
 
 /**
@@ -931,7 +933,6 @@ static noinline void ___force_wake_auto(struct drm_i915_private *dev_priv,
 		fw_domain_arm_timer(domain);
 
 	dev_priv->uncore.funcs.force_wake_get(dev_priv, fw_domains);
-	dev_priv->uncore.fw_domains_active |= fw_domains;
 }
 
 static inline void __force_wake_auto(struct drm_i915_private *dev_priv,
@@ -1179,7 +1180,7 @@ static void fw_domain_init(struct drm_i915_private *dev_priv,
 
 static void intel_uncore_fw_domains_init(struct drm_i915_private *dev_priv)
 {
-	if (INTEL_INFO(dev_priv)->gen <= 5)
+	if (INTEL_GEN(dev_priv) <= 5 || intel_vgpu_active(dev_priv))
 		return;
 
 	if (IS_GEN9(dev_priv)) {
@@ -1265,11 +1266,6 @@ static void intel_uncore_fw_domains_init(struct drm_i915_private *dev_priv)
 			       FORCEWAKE, FORCEWAKE_ACK);
 	}
 
-	if (intel_vgpu_active(dev_priv)) {
-		dev_priv->uncore.funcs.force_wake_get = vgpu_fw_domains_nop;
-		dev_priv->uncore.funcs.force_wake_put = vgpu_fw_domains_nop;
-	}
-
 	/* All future platforms are expected to require complex power gating */
 	WARN_ON(dev_priv->uncore.fw_domains == 0);
 }
@@ -1281,6 +1277,32 @@ static void intel_uncore_fw_domains_init(struct drm_i915_private *dev_priv)
 	dev_priv->uncore.fw_domains_table_entries = ARRAY_SIZE((d)); \
 }
 
+static int i915_pmic_bus_access_notifier(struct notifier_block *nb,
+					 unsigned long action, void *data)
+{
+	struct drm_i915_private *dev_priv = container_of(nb,
+			struct drm_i915_private, uncore.pmic_bus_access_nb);
+
+	switch (action) {
+	case MBI_PMIC_BUS_ACCESS_BEGIN:
+		/*
+		 * forcewake all now to make sure that we don't need to do a
+		 * forcewake later which on systems where this notifier gets
+		 * called requires the punit to access to the shared pmic i2c
+		 * bus, which will be busy after this notification, leading to:
+		 * "render: timed out waiting for forcewake ack request."
+		 * errors.
+		 */
+		intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
+		break;
+	case MBI_PMIC_BUS_ACCESS_END:
+		intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
+		break;
+	}
+
+	return NOTIFY_OK;
+}
+
 void intel_uncore_init(struct drm_i915_private *dev_priv)
 {
 	i915_check_vgpu(dev_priv);
@@ -1290,23 +1312,25 @@ void intel_uncore_init(struct drm_i915_private *dev_priv)
 	__intel_uncore_early_sanitize(dev_priv, false);
 
 	dev_priv->uncore.unclaimed_mmio_check = 1;
+	dev_priv->uncore.pmic_bus_access_nb.notifier_call =
+		i915_pmic_bus_access_notifier;
 
-	switch (INTEL_INFO(dev_priv)->gen) {
-	default:
-	case 9:
-		ASSIGN_FW_DOMAINS_TABLE(__gen9_fw_ranges);
-		ASSIGN_WRITE_MMIO_VFUNCS(fwtable);
-		ASSIGN_READ_MMIO_VFUNCS(fwtable);
-		if (HAS_DECOUPLED_MMIO(dev_priv)) {
-			dev_priv->uncore.funcs.mmio_readl =
-						gen9_decoupled_read32;
-			dev_priv->uncore.funcs.mmio_readq =
-						gen9_decoupled_read64;
-			dev_priv->uncore.funcs.mmio_writel =
-						gen9_decoupled_write32;
+	if (IS_GEN(dev_priv, 2, 4) || intel_vgpu_active(dev_priv)) {
+		ASSIGN_WRITE_MMIO_VFUNCS(gen2);
+		ASSIGN_READ_MMIO_VFUNCS(gen2);
+	} else if (IS_GEN5(dev_priv)) {
+		ASSIGN_WRITE_MMIO_VFUNCS(gen5);
+		ASSIGN_READ_MMIO_VFUNCS(gen5);
+	} else if (IS_GEN(dev_priv, 6, 7)) {
+		ASSIGN_WRITE_MMIO_VFUNCS(gen6);
+
+		if (IS_VALLEYVIEW(dev_priv)) {
+			ASSIGN_FW_DOMAINS_TABLE(__vlv_fw_ranges);
+			ASSIGN_READ_MMIO_VFUNCS(fwtable);
+		} else {
+			ASSIGN_READ_MMIO_VFUNCS(gen6);
 		}
-		break;
-	case 8:
+	} else if (IS_GEN8(dev_priv)) {
 		if (IS_CHERRYVIEW(dev_priv)) {
 			ASSIGN_FW_DOMAINS_TABLE(__chv_fw_ranges);
 			ASSIGN_WRITE_MMIO_VFUNCS(fwtable);
@@ -1316,30 +1340,23 @@ void intel_uncore_init(struct drm_i915_private *dev_priv)
 			ASSIGN_WRITE_MMIO_VFUNCS(gen8);
 			ASSIGN_READ_MMIO_VFUNCS(gen6);
 		}
-		break;
-	case 7:
-	case 6:
-		ASSIGN_WRITE_MMIO_VFUNCS(gen6);
-
-		if (IS_VALLEYVIEW(dev_priv)) {
-			ASSIGN_FW_DOMAINS_TABLE(__vlv_fw_ranges);
-			ASSIGN_READ_MMIO_VFUNCS(fwtable);
-		} else {
-			ASSIGN_READ_MMIO_VFUNCS(gen6);
+	} else {
+		ASSIGN_FW_DOMAINS_TABLE(__gen9_fw_ranges);
+		ASSIGN_WRITE_MMIO_VFUNCS(fwtable);
+		ASSIGN_READ_MMIO_VFUNCS(fwtable);
+		if (HAS_DECOUPLED_MMIO(dev_priv)) {
+			dev_priv->uncore.funcs.mmio_readl =
+						gen9_decoupled_read32;
+			dev_priv->uncore.funcs.mmio_readq =
+						gen9_decoupled_read64;
+			dev_priv->uncore.funcs.mmio_writel =
+						gen9_decoupled_write32;
 		}
-		break;
-	case 5:
-		ASSIGN_WRITE_MMIO_VFUNCS(gen5);
-		ASSIGN_READ_MMIO_VFUNCS(gen5);
-		break;
-	case 4:
-	case 3:
-	case 2:
-		ASSIGN_WRITE_MMIO_VFUNCS(gen2);
-		ASSIGN_READ_MMIO_VFUNCS(gen2);
-		break;
 	}
 
+	iosf_mbi_register_pmic_bus_access_notifier(
+		&dev_priv->uncore.pmic_bus_access_nb);
+
 	i915_check_and_clear_faults(dev_priv);
 }
 #undef ASSIGN_WRITE_MMIO_VFUNCS
@@ -1347,6 +1364,9 @@ void intel_uncore_init(struct drm_i915_private *dev_priv)
 
 void intel_uncore_fini(struct drm_i915_private *dev_priv)
 {
+	iosf_mbi_unregister_pmic_bus_access_notifier(
+		&dev_priv->uncore.pmic_bus_access_nb);
+
 	/* Paranoia: make sure we have disabled everything before we exit. */
 	intel_uncore_sanitize(dev_priv);
 	intel_uncore_forcewake_reset(dev_priv, false);
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_context.c b/drivers/gpu/drm/i915/selftests/i915_gem_context.c
index 3813a19a6179..1afb8b06e3e1 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_context.c
@@ -320,8 +320,8 @@ static unsigned long max_dwords(struct drm_i915_gem_object *obj)
 static int igt_ctx_exec(void *arg)
 {
 	struct drm_i915_private *i915 = arg;
-	struct drm_file *file = mock_file(i915);
 	struct drm_i915_gem_object *obj;
+	struct drm_file *file;
 	IGT_TIMEOUT(end_time);
 	LIST_HEAD(objects);
 	unsigned long ncontexts, ndwords, dw;
@@ -333,6 +333,10 @@ static int igt_ctx_exec(void *arg)
 	 * up in the expected pages of our obj.
 	 */
 
+	file = mock_file(i915);
+	if (IS_ERR(file))
+		return PTR_ERR(file);
+
 	mutex_lock(&i915->drm.struct_mutex);
 
 	ncontexts = 0;
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_evict.c b/drivers/gpu/drm/i915/selftests/i915_gem_evict.c
index 97af353db218..14e9c2fbc4e6 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_evict.c
@@ -202,6 +202,95 @@ cleanup:
 	return err;
 }
 
+static void mock_color_adjust(const struct drm_mm_node *node,
+			      unsigned long color,
+			      u64 *start,
+			      u64 *end)
+{
+}
+
+static int igt_evict_for_cache_color(void *arg)
+{
+	struct drm_i915_private *i915 = arg;
+	struct i915_ggtt *ggtt = &i915->ggtt;
+	const unsigned long flags = PIN_OFFSET_FIXED;
+	struct drm_mm_node target = {
+		.start = I915_GTT_PAGE_SIZE * 2,
+		.size = I915_GTT_PAGE_SIZE,
+		.color = I915_CACHE_LLC,
+	};
+	struct drm_i915_gem_object *obj;
+	struct i915_vma *vma;
+	int err;
+
+	/* Currently the use of color_adjust is limited to cache domains within
+	 * the ggtt, and so the presence of mm.color_adjust is assumed to be
+	 * i915_gtt_color_adjust throughout our driver, so using a mock color
+	 * adjust will work just fine for our purposes.
+	 */
+	ggtt->base.mm.color_adjust = mock_color_adjust;
+
+	obj = i915_gem_object_create_internal(i915, I915_GTT_PAGE_SIZE);
+	if (IS_ERR(obj)) {
+		err = PTR_ERR(obj);
+		goto cleanup;
+	}
+	i915_gem_object_set_cache_level(obj, I915_CACHE_LLC);
+
+	vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0,
+				       I915_GTT_PAGE_SIZE | flags);
+	if (IS_ERR(vma)) {
+		pr_err("[0]i915_gem_object_ggtt_pin failed\n");
+		err = PTR_ERR(vma);
+		goto cleanup;
+	}
+
+	obj = i915_gem_object_create_internal(i915, I915_GTT_PAGE_SIZE);
+	if (IS_ERR(obj)) {
+		err = PTR_ERR(obj);
+		goto cleanup;
+	}
+	i915_gem_object_set_cache_level(obj, I915_CACHE_LLC);
+
+	/* Neighbouring; same colour - should fit */
+	vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0,
+				       (I915_GTT_PAGE_SIZE * 2) | flags);
+	if (IS_ERR(vma)) {
+		pr_err("[1]i915_gem_object_ggtt_pin failed\n");
+		err = PTR_ERR(vma);
+		goto cleanup;
+	}
+
+	i915_vma_unpin(vma);
+
+	/* Remove just the second vma */
+	err = i915_gem_evict_for_node(&ggtt->base, &target, 0);
+	if (err) {
+		pr_err("[0]i915_gem_evict_for_node returned err=%d\n", err);
+		goto cleanup;
+	}
+
+	/* Attempt to remove the first *pinned* vma, by removing the (empty)
+	 * neighbour -- this should fail.
+	 */
+	target.color = I915_CACHE_L3_LLC;
+
+	err = i915_gem_evict_for_node(&ggtt->base, &target, 0);
+	if (!err) {
+		pr_err("[1]i915_gem_evict_for_node returned err=%d\n", err);
+		err = -EINVAL;
+		goto cleanup;
+	}
+
+	err = 0;
+
+cleanup:
+	unpin_ggtt(i915);
+	cleanup_objects(i915);
+	ggtt->base.mm.color_adjust = NULL;
+	return err;
+}
+
 static int igt_evict_vm(void *arg)
 {
 	struct drm_i915_private *i915 = arg;
@@ -241,6 +330,7 @@ int i915_gem_evict_mock_selftests(void)
 	static const struct i915_subtest tests[] = {
 		SUBTEST(igt_evict_something),
 		SUBTEST(igt_evict_for_vma),
+		SUBTEST(igt_evict_for_cache_color),
 		SUBTEST(igt_evict_vm),
 		SUBTEST(igt_overcommit),
 	};
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
index 0f3fa34377c6..50710e3f1caa 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
@@ -103,7 +103,7 @@ fake_dma_object(struct drm_i915_private *i915, u64 size)
 
 	obj = i915_gem_object_alloc(i915);
 	if (!obj)
-		return ERR_PTR(-ENOMEM);
+		goto err;
 
 	drm_gem_private_object_init(&i915->drm, &obj->base, size);
 	i915_gem_object_init(obj, &fake_ops);
@@ -114,10 +114,15 @@ fake_dma_object(struct drm_i915_private *i915, u64 size)
 
 	/* Preallocate the "backing storage" */
 	if (i915_gem_object_pin_pages(obj))
-		return ERR_PTR(-ENOMEM);
+		goto err_obj;
 
 	i915_gem_object_unpin_pages(obj);
 	return obj;
+
+err_obj:
+	i915_gem_object_put(obj);
+err:
+	return ERR_PTR(-ENOMEM);
 }
 
 static int igt_ppgtt_alloc(void *arg)
@@ -534,7 +539,7 @@ static int walk_hole(struct drm_i915_private *i915,
 		vma = i915_vma_instance(obj, vm, NULL);
 		if (IS_ERR(vma)) {
 			err = PTR_ERR(vma);
-			goto err;
+			goto err_put;
 		}
 
 		for (addr = hole_start;
@@ -545,7 +550,7 @@ static int walk_hole(struct drm_i915_private *i915,
 				pr_err("%s bind failed at %llx + %llx [hole %llx- %llx] with err=%d\n",
 				       __func__, addr, vma->size,
 				       hole_start, hole_end, err);
-				goto err;
+				goto err_close;
 			}
 			i915_vma_unpin(vma);
 
@@ -554,14 +559,14 @@ static int walk_hole(struct drm_i915_private *i915,
 				pr_err("%s incorrect at %llx + %llx\n",
 				       __func__, addr, vma->size);
 				err = -EINVAL;
-				goto err;
+				goto err_close;
 			}
 
 			err = i915_vma_unbind(vma);
 			if (err) {
 				pr_err("%s unbind failed at %llx + %llx  with err=%d\n",
 				       __func__, addr, vma->size, err);
-				goto err;
+				goto err_close;
 			}
 
 			GEM_BUG_ON(drm_mm_node_allocated(&vma->node));
@@ -570,13 +575,14 @@ static int walk_hole(struct drm_i915_private *i915,
 					"%s timed out at %llx\n",
 					__func__, addr)) {
 				err = -EINTR;
-				goto err;
+				goto err_close;
 			}
 		}
 
-err:
+err_close:
 		if (!i915_vma_is_ggtt(vma))
 			i915_vma_close(vma);
+err_put:
 		i915_gem_object_put(obj);
 		if (err)
 			return err;
diff --git a/drivers/gpu/drm/i915/selftests/i915_selftest.c b/drivers/gpu/drm/i915/selftests/i915_selftest.c
index 6ba3abb10c6f..addc5a599c4a 100644
--- a/drivers/gpu/drm/i915/selftests/i915_selftest.c
+++ b/drivers/gpu/drm/i915/selftests/i915_selftest.c
@@ -63,13 +63,13 @@ struct selftest {
 	};
 };
 
-#define selftest(n, f) [mock_##n] = { .name = #n, .mock = f },
+#define selftest(n, f) [mock_##n] = { .name = #n, { .mock = f } },
 static struct selftest mock_selftests[] = {
 #include "i915_mock_selftests.h"
 };
 #undef selftest
 
-#define selftest(n, f) [live_##n] = { .name = #n, .live = f },
+#define selftest(n, f) [live_##n] = { .name = #n, { .live = f } },
 static struct selftest live_selftests[] = {
 #include "i915_live_selftests.h"
 };
diff --git a/drivers/gpu/drm/i915/selftests/intel_hangcheck.c b/drivers/gpu/drm/i915/selftests/intel_hangcheck.c
index d4acee6730e9..6ec7c731a267 100644
--- a/drivers/gpu/drm/i915/selftests/intel_hangcheck.c
+++ b/drivers/gpu/drm/i915/selftests/intel_hangcheck.c
@@ -301,7 +301,8 @@ static int igt_global_reset(void *arg)
 
 	/* Check that we can issue a global GPU reset */
 
-	set_bit(I915_RESET_IN_PROGRESS, &i915->gpu_error.flags);
+	set_bit(I915_RESET_BACKOFF, &i915->gpu_error.flags);
+	set_bit(I915_RESET_HANDOFF, &i915->gpu_error.flags);
 
 	mutex_lock(&i915->drm.struct_mutex);
 	reset_count = i915_reset_count(&i915->gpu_error);
@@ -314,7 +315,8 @@ static int igt_global_reset(void *arg)
 	}
 	mutex_unlock(&i915->drm.struct_mutex);
 
-	GEM_BUG_ON(test_bit(I915_RESET_IN_PROGRESS, &i915->gpu_error.flags));
+	GEM_BUG_ON(test_bit(I915_RESET_HANDOFF, &i915->gpu_error.flags));
+	clear_bit(I915_RESET_BACKOFF, &i915->gpu_error.flags);
 	if (i915_terminally_wedged(&i915->gpu_error))
 		err = -EIO;
 
@@ -330,7 +332,7 @@ static u32 fake_hangcheck(struct drm_i915_gem_request *rq)
 
 	reset_count = i915_reset_count(&rq->i915->gpu_error);
 
-	set_bit(I915_RESET_IN_PROGRESS, &rq->i915->gpu_error.flags);
+	set_bit(I915_RESET_HANDOFF, &rq->i915->gpu_error.flags);
 	wake_up_all(&rq->i915->gpu_error.wait_queue);
 
 	return reset_count;
@@ -357,7 +359,7 @@ static int igt_wait_reset(void *arg)
 
 	/* Check that we detect a stuck waiter and issue a reset */
 
-	set_bit(I915_RESET_IN_PROGRESS, &i915->gpu_error.flags);
+	set_bit(I915_RESET_BACKOFF, &i915->gpu_error.flags);
 
 	mutex_lock(&i915->drm.struct_mutex);
 	err = hang_init(&h, i915);
@@ -388,8 +390,8 @@ static int igt_wait_reset(void *arg)
 		err = timeout;
 		goto out_rq;
 	}
-	GEM_BUG_ON(test_bit(I915_RESET_IN_PROGRESS, &i915->gpu_error.flags));
 
+	GEM_BUG_ON(test_bit(I915_RESET_HANDOFF, &i915->gpu_error.flags));
 	if (i915_reset_count(&i915->gpu_error) == reset_count) {
 		pr_err("No GPU reset recorded!\n");
 		err = -EINVAL;
@@ -402,6 +404,7 @@ fini:
 	hang_fini(&h);
 unlock:
 	mutex_unlock(&i915->drm.struct_mutex);
+	clear_bit(I915_RESET_BACKOFF, &i915->gpu_error.flags);
 
 	if (i915_terminally_wedged(&i915->gpu_error))
 		return -EIO;
@@ -422,6 +425,7 @@ static int igt_reset_queue(void *arg)
 	if (!igt_can_mi_store_dword_imm(i915))
 		return 0;
 
+	set_bit(I915_RESET_BACKOFF, &i915->gpu_error.flags);
 	mutex_lock(&i915->drm.struct_mutex);
 	err = hang_init(&h, i915);
 	if (err)
@@ -470,8 +474,9 @@ static int igt_reset_queue(void *arg)
 
 			i915_reset(i915);
 
-			GEM_BUG_ON(test_bit(I915_RESET_IN_PROGRESS,
+			GEM_BUG_ON(test_bit(I915_RESET_HANDOFF,
 					    &i915->gpu_error.flags));
+
 			if (prev->fence.error != -EIO) {
 				pr_err("GPU reset not recorded on hanging request [fence.error=%d]!\n",
 				       prev->fence.error);
@@ -514,6 +519,7 @@ fini:
 	hang_fini(&h);
 unlock:
 	mutex_unlock(&i915->drm.struct_mutex);
+	clear_bit(I915_RESET_BACKOFF, &i915->gpu_error.flags);
 
 	if (i915_terminally_wedged(&i915->gpu_error))
 		return -EIO;
diff --git a/drivers/i2c/busses/i2c-designware-baytrail.c b/drivers/i2c/busses/i2c-designware-baytrail.c
index 1590ad0a8081..1749a0f5a9fa 100644
--- a/drivers/i2c/busses/i2c-designware-baytrail.c
+++ b/drivers/i2c/busses/i2c-designware-baytrail.c
@@ -16,6 +16,7 @@
 #include <linux/acpi.h>
 #include <linux/i2c.h>
 #include <linux/interrupt.h>
+#include <linux/pm_qos.h>
 
 #include <asm/iosf_mbi.h>
 
@@ -23,19 +24,29 @@
 
 #define SEMAPHORE_TIMEOUT	100
 #define PUNIT_SEMAPHORE		0x7
+#define PUNIT_SEMAPHORE_CHT	0x10e
 #define PUNIT_SEMAPHORE_BIT	BIT(0)
 #define PUNIT_SEMAPHORE_ACQUIRE	BIT(1)
 
 static unsigned long acquired;
 
-static int get_sem(struct device *dev, u32 *sem)
+static u32 get_sem_addr(struct dw_i2c_dev *dev)
 {
+	if (dev->flags & MODEL_CHERRYTRAIL)
+		return PUNIT_SEMAPHORE_CHT;
+	else
+		return PUNIT_SEMAPHORE;
+}
+
+static int get_sem(struct dw_i2c_dev *dev, u32 *sem)
+{
+	u32 addr = get_sem_addr(dev);
 	u32 data;
 	int ret;
 
-	ret = iosf_mbi_read(BT_MBI_UNIT_PMC, MBI_REG_READ, PUNIT_SEMAPHORE, &data);
+	ret = iosf_mbi_read(BT_MBI_UNIT_PMC, MBI_REG_READ, addr, &data);
 	if (ret) {
-		dev_err(dev, "iosf failed to read punit semaphore\n");
+		dev_err(dev->dev, "iosf failed to read punit semaphore\n");
 		return ret;
 	}
 
@@ -44,22 +55,22 @@ static int get_sem(struct device *dev, u32 *sem)
 	return 0;
 }
 
-static void reset_semaphore(struct device *dev)
+static void reset_semaphore(struct dw_i2c_dev *dev)
 {
-	u32 data;
+	if (iosf_mbi_modify(BT_MBI_UNIT_PMC, MBI_REG_READ, get_sem_addr(dev),
+			    0, PUNIT_SEMAPHORE_BIT))
+		dev_err(dev->dev, "iosf failed to reset punit semaphore during write\n");
 
-	if (iosf_mbi_read(BT_MBI_UNIT_PMC, MBI_REG_READ, PUNIT_SEMAPHORE, &data)) {
-		dev_err(dev, "iosf failed to reset punit semaphore during read\n");
-		return;
-	}
+	pm_qos_update_request(&dev->pm_qos, PM_QOS_DEFAULT_VALUE);
 
-	data &= ~PUNIT_SEMAPHORE_BIT;
-	if (iosf_mbi_write(BT_MBI_UNIT_PMC, MBI_REG_WRITE, PUNIT_SEMAPHORE, data))
-		dev_err(dev, "iosf failed to reset punit semaphore during write\n");
+	iosf_mbi_call_pmic_bus_access_notifier_chain(MBI_PMIC_BUS_ACCESS_END,
+						     NULL);
+	iosf_mbi_punit_release();
 }
 
 static int baytrail_i2c_acquire(struct dw_i2c_dev *dev)
 {
+	u32 addr = get_sem_addr(dev);
 	u32 sem = PUNIT_SEMAPHORE_ACQUIRE;
 	int ret;
 	unsigned long start, end;
@@ -72,18 +83,29 @@ static int baytrail_i2c_acquire(struct dw_i2c_dev *dev)
 	if (!dev->release_lock)
 		return 0;
 
+	iosf_mbi_punit_acquire();
+	iosf_mbi_call_pmic_bus_access_notifier_chain(MBI_PMIC_BUS_ACCESS_BEGIN,
+						     NULL);
+
+	/*
+	 * Disallow the CPU to enter C6 or C7 state, entering these states
+	 * requires the punit to talk to the pmic and if this happens while
+	 * we're holding the semaphore, the SoC hangs.
+	 */
+	pm_qos_update_request(&dev->pm_qos, 0);
+
 	/* host driver writes to side band semaphore register */
-	ret = iosf_mbi_write(BT_MBI_UNIT_PMC, MBI_REG_WRITE, PUNIT_SEMAPHORE, sem);
+	ret = iosf_mbi_write(BT_MBI_UNIT_PMC, MBI_REG_WRITE, addr, sem);
 	if (ret) {
 		dev_err(dev->dev, "iosf punit semaphore request failed\n");
-		return ret;
+		goto out;
 	}
 
 	/* host driver waits for bit 0 to be set in semaphore register */
 	start = jiffies;
 	end = start + msecs_to_jiffies(SEMAPHORE_TIMEOUT);
 	do {
-		ret = get_sem(dev->dev, &sem);
+		ret = get_sem(dev, &sem);
 		if (!ret && sem) {
 			acquired = jiffies;
 			dev_dbg(dev->dev, "punit semaphore acquired after %ums\n",
@@ -95,9 +117,10 @@ static int baytrail_i2c_acquire(struct dw_i2c_dev *dev)
 	} while (time_before(jiffies, end));
 
 	dev_err(dev->dev, "punit semaphore timed out, resetting\n");
-	reset_semaphore(dev->dev);
+out:
+	reset_semaphore(dev);
 
-	ret = iosf_mbi_read(BT_MBI_UNIT_PMC, MBI_REG_READ, PUNIT_SEMAPHORE, &sem);
+	ret = iosf_mbi_read(BT_MBI_UNIT_PMC, MBI_REG_READ, addr, &sem);
 	if (ret)
 		dev_err(dev->dev, "iosf failed to read punit semaphore\n");
 	else
@@ -116,12 +139,12 @@ static void baytrail_i2c_release(struct dw_i2c_dev *dev)
 	if (!dev->acquire_lock)
 		return;
 
-	reset_semaphore(dev->dev);
+	reset_semaphore(dev);
 	dev_dbg(dev->dev, "punit semaphore held for %ums\n",
 		jiffies_to_msecs(jiffies - acquired));
 }
 
-int i2c_dw_eval_lock_support(struct dw_i2c_dev *dev)
+int i2c_dw_probe_lock_support(struct dw_i2c_dev *dev)
 {
 	acpi_status status;
 	unsigned long long shared_host = 0;
@@ -138,15 +161,25 @@ int i2c_dw_eval_lock_support(struct dw_i2c_dev *dev)
 	if (ACPI_FAILURE(status))
 		return 0;
 
-	if (shared_host) {
-		dev_info(dev->dev, "I2C bus managed by PUNIT\n");
-		dev->acquire_lock = baytrail_i2c_acquire;
-		dev->release_lock = baytrail_i2c_release;
-		dev->pm_runtime_disabled = true;
-	}
+	if (!shared_host)
+		return 0;
 
 	if (!iosf_mbi_available())
 		return -EPROBE_DEFER;
 
+	dev_info(dev->dev, "I2C bus managed by PUNIT\n");
+	dev->acquire_lock = baytrail_i2c_acquire;
+	dev->release_lock = baytrail_i2c_release;
+	dev->pm_runtime_disabled = true;
+
+	pm_qos_add_request(&dev->pm_qos, PM_QOS_CPU_DMA_LATENCY,
+			   PM_QOS_DEFAULT_VALUE);
+
 	return 0;
 }
+
+void i2c_dw_remove_lock_support(struct dw_i2c_dev *dev)
+{
+	if (dev->acquire_lock)
+		pm_qos_remove_request(&dev->pm_qos);
+}
diff --git a/drivers/i2c/busses/i2c-designware-core.c b/drivers/i2c/busses/i2c-designware-core.c
index 7a3faa551cf8..15a534818d4f 100644
--- a/drivers/i2c/busses/i2c-designware-core.c
+++ b/drivers/i2c/busses/i2c-designware-core.c
@@ -177,13 +177,13 @@ static u32 dw_readl(struct dw_i2c_dev *dev, int offset)
 {
 	u32 value;
 
-	if (dev->accessor_flags & ACCESS_16BIT)
+	if (dev->flags & ACCESS_16BIT)
 		value = readw_relaxed(dev->base + offset) |
 			(readw_relaxed(dev->base + offset + 2) << 16);
 	else
 		value = readl_relaxed(dev->base + offset);
 
-	if (dev->accessor_flags & ACCESS_SWAP)
+	if (dev->flags & ACCESS_SWAP)
 		return swab32(value);
 	else
 		return value;
@@ -191,10 +191,10 @@ static u32 dw_readl(struct dw_i2c_dev *dev, int offset)
 
 static void dw_writel(struct dw_i2c_dev *dev, u32 b, int offset)
 {
-	if (dev->accessor_flags & ACCESS_SWAP)
+	if (dev->flags & ACCESS_SWAP)
 		b = swab32(b);
 
-	if (dev->accessor_flags & ACCESS_16BIT) {
+	if (dev->flags & ACCESS_16BIT) {
 		writew_relaxed((u16)b, dev->base + offset);
 		writew_relaxed((u16)(b >> 16), dev->base + offset + 2);
 	} else {
@@ -339,10 +339,10 @@ int i2c_dw_init(struct dw_i2c_dev *dev)
 	reg = dw_readl(dev, DW_IC_COMP_TYPE);
 	if (reg == ___constant_swab32(DW_IC_COMP_TYPE_VALUE)) {
 		/* Configure register endianess access */
-		dev->accessor_flags |= ACCESS_SWAP;
+		dev->flags |= ACCESS_SWAP;
 	} else if (reg == (DW_IC_COMP_TYPE_VALUE & 0x0000ffff)) {
 		/* Configure register access mode 16bit */
-		dev->accessor_flags |= ACCESS_16BIT;
+		dev->flags |= ACCESS_16BIT;
 	} else if (reg != DW_IC_COMP_TYPE_VALUE) {
 		dev_err(dev->dev, "Unknown Synopsys component type: "
 			"0x%08x\n", reg);
@@ -924,7 +924,7 @@ static irqreturn_t i2c_dw_isr(int this_irq, void *dev_id)
 tx_aborted:
 	if ((stat & (DW_IC_INTR_TX_ABRT | DW_IC_INTR_STOP_DET)) || dev->msg_err)
 		complete(&dev->cmd_complete);
-	else if (unlikely(dev->accessor_flags & ACCESS_INTR_MASK)) {
+	else if (unlikely(dev->flags & ACCESS_INTR_MASK)) {
 		/* workaround to trigger pending interrupt */
 		stat = dw_readl(dev, DW_IC_INTR_MASK);
 		i2c_dw_disable_int(dev);
diff --git a/drivers/i2c/busses/i2c-designware-core.h b/drivers/i2c/busses/i2c-designware-core.h
index c1db3a5a340f..091c24925c9c 100644
--- a/drivers/i2c/busses/i2c-designware-core.h
+++ b/drivers/i2c/busses/i2c-designware-core.h
@@ -23,6 +23,7 @@
  */
 
 #include <linux/i2c.h>
+#include <linux/pm_qos.h>
 
 #define DW_IC_DEFAULT_FUNCTIONALITY (I2C_FUNC_I2C |			\
 					I2C_FUNC_SMBUS_BYTE |		\
@@ -75,6 +76,7 @@
  * @fp_lcnt: fast plus LCNT value
  * @hs_hcnt: high speed HCNT value
  * @hs_lcnt: high speed LCNT value
+ * @pm_qos: pm_qos_request used while holding a hardware lock on the bus
  * @acquire_lock: function to acquire a hardware lock on the bus
  * @release_lock: function to release a hardware lock on the bus
  * @pm_runtime_disabled: true if pm runtime is disabled
@@ -103,7 +105,7 @@ struct dw_i2c_dev {
 	unsigned int		status;
 	u32			abort_source;
 	int			irq;
-	u32			accessor_flags;
+	u32			flags;
 	struct i2c_adapter	adapter;
 	u32			functionality;
 	u32			master_cfg;
@@ -122,6 +124,7 @@ struct dw_i2c_dev {
 	u16			fp_lcnt;
 	u16			hs_hcnt;
 	u16			hs_lcnt;
+	struct pm_qos_request	pm_qos;
 	int			(*acquire_lock)(struct dw_i2c_dev *dev);
 	void			(*release_lock)(struct dw_i2c_dev *dev);
 	bool			pm_runtime_disabled;
@@ -131,6 +134,8 @@ struct dw_i2c_dev {
 #define ACCESS_16BIT		0x00000002
 #define ACCESS_INTR_MASK	0x00000004
 
+#define MODEL_CHERRYTRAIL	0x00000100
+
 extern int i2c_dw_init(struct dw_i2c_dev *dev);
 extern void i2c_dw_disable(struct dw_i2c_dev *dev);
 extern void i2c_dw_disable_int(struct dw_i2c_dev *dev);
@@ -138,7 +143,9 @@ extern u32 i2c_dw_read_comp_param(struct dw_i2c_dev *dev);
 extern int i2c_dw_probe(struct dw_i2c_dev *dev);
 
 #if IS_ENABLED(CONFIG_I2C_DESIGNWARE_BAYTRAIL)
-extern int i2c_dw_eval_lock_support(struct dw_i2c_dev *dev);
+extern int i2c_dw_probe_lock_support(struct dw_i2c_dev *dev);
+extern void i2c_dw_remove_lock_support(struct dw_i2c_dev *dev);
 #else
-static inline int i2c_dw_eval_lock_support(struct dw_i2c_dev *dev) { return 0; }
+static inline int i2c_dw_probe_lock_support(struct dw_i2c_dev *dev) { return 0; }
+static inline void i2c_dw_remove_lock_support(struct dw_i2c_dev *dev) {}
 #endif
diff --git a/drivers/i2c/busses/i2c-designware-pcidrv.c b/drivers/i2c/busses/i2c-designware-pcidrv.c
index d6423cfac588..ed485b69b449 100644
--- a/drivers/i2c/busses/i2c-designware-pcidrv.c
+++ b/drivers/i2c/busses/i2c-designware-pcidrv.c
@@ -45,6 +45,7 @@ enum dw_pci_ctl_id_t {
 	medfield,
 	merrifield,
 	baytrail,
+	cherrytrail,
 	haswell,
 };
 
@@ -63,6 +64,7 @@ struct dw_pci_controller {
 	u32 rx_fifo_depth;
 	u32 clk_khz;
 	u32 functionality;
+	u32 flags;
 	struct dw_scl_sda_cfg *scl_sda_cfg;
 	int (*setup)(struct pci_dev *pdev, struct dw_pci_controller *c);
 };
@@ -170,6 +172,15 @@ static struct dw_pci_controller dw_pci_controllers[] = {
 		.functionality = I2C_FUNC_10BIT_ADDR,
 		.scl_sda_cfg = &hsw_config,
 	},
+	[cherrytrail] = {
+		.bus_num = -1,
+		.bus_cfg = INTEL_MID_STD_CFG | DW_IC_CON_SPEED_FAST,
+		.tx_fifo_depth = 32,
+		.rx_fifo_depth = 32,
+		.functionality = I2C_FUNC_10BIT_ADDR,
+		.flags = MODEL_CHERRYTRAIL,
+		.scl_sda_cfg = &byt_config,
+	},
 };
 
 #ifdef CONFIG_PM
@@ -237,6 +248,7 @@ static int i2c_dw_pci_probe(struct pci_dev *pdev,
 	dev->base = pcim_iomap_table(pdev)[0];
 	dev->dev = &pdev->dev;
 	dev->irq = pdev->irq;
+	dev->flags |= controller->flags;
 
 	if (controller->setup) {
 		r = controller->setup(pdev, controller);
@@ -317,13 +329,13 @@ static const struct pci_device_id i2_designware_pci_ids[] = {
 	{ PCI_VDEVICE(INTEL, 0x9c61), haswell },
 	{ PCI_VDEVICE(INTEL, 0x9c62), haswell },
 	/* Braswell / Cherrytrail */
-	{ PCI_VDEVICE(INTEL, 0x22C1), baytrail },
-	{ PCI_VDEVICE(INTEL, 0x22C2), baytrail },
-	{ PCI_VDEVICE(INTEL, 0x22C3), baytrail },
-	{ PCI_VDEVICE(INTEL, 0x22C4), baytrail },
-	{ PCI_VDEVICE(INTEL, 0x22C5), baytrail },
-	{ PCI_VDEVICE(INTEL, 0x22C6), baytrail },
-	{ PCI_VDEVICE(INTEL, 0x22C7), baytrail },
+	{ PCI_VDEVICE(INTEL, 0x22C1), cherrytrail },
+	{ PCI_VDEVICE(INTEL, 0x22C2), cherrytrail },
+	{ PCI_VDEVICE(INTEL, 0x22C3), cherrytrail },
+	{ PCI_VDEVICE(INTEL, 0x22C4), cherrytrail },
+	{ PCI_VDEVICE(INTEL, 0x22C5), cherrytrail },
+	{ PCI_VDEVICE(INTEL, 0x22C6), cherrytrail },
+	{ PCI_VDEVICE(INTEL, 0x22C7), cherrytrail },
 	{ 0,}
 };
 MODULE_DEVICE_TABLE(pci, i2_designware_pci_ids);
diff --git a/drivers/i2c/busses/i2c-designware-platdrv.c b/drivers/i2c/busses/i2c-designware-platdrv.c
index 6ce431323125..df0ff7d82b49 100644
--- a/drivers/i2c/busses/i2c-designware-platdrv.c
+++ b/drivers/i2c/busses/i2c-designware-platdrv.c
@@ -112,7 +112,7 @@ static int dw_i2c_acpi_configure(struct platform_device *pdev)
 
 	id = acpi_match_device(pdev->dev.driver->acpi_match_table, &pdev->dev);
 	if (id && id->driver_data)
-		dev->accessor_flags |= (u32)id->driver_data;
+		dev->flags |= (u32)id->driver_data;
 
 	return 0;
 }
@@ -123,7 +123,7 @@ static const struct acpi_device_id dw_i2c_acpi_match[] = {
 	{ "INT3432", 0 },
 	{ "INT3433", 0 },
 	{ "80860F41", 0 },
-	{ "808622C1", 0 },
+	{ "808622C1", MODEL_CHERRYTRAIL },
 	{ "AMD0010", ACCESS_INTR_MASK },
 	{ "AMDI0010", ACCESS_INTR_MASK },
 	{ "AMDI0510", 0 },
@@ -238,7 +238,7 @@ static int dw_i2c_plat_probe(struct platform_device *pdev)
 		return -EINVAL;
 	}
 
-	r = i2c_dw_eval_lock_support(dev);
+	r = i2c_dw_probe_lock_support(dev);
 	if (r)
 		return r;
 
@@ -307,6 +307,8 @@ static int dw_i2c_plat_remove(struct platform_device *pdev)
 	if (!dev->pm_runtime_disabled)
 		pm_runtime_disable(&pdev->dev);
 
+	i2c_dw_remove_lock_support(dev);
+
 	return 0;
 }
 
diff --git a/include/drm/i915_pciids.h b/include/drm/i915_pciids.h
index 466c71592a6f..27e0dbaa6c0e 100644
--- a/include/drm/i915_pciids.h
+++ b/include/drm/i915_pciids.h
@@ -47,6 +47,14 @@
 	0x030000, 0xff0000,			\
 	(unsigned long) info }
 
+#define INTEL_I810_IDS(info)					\
+	INTEL_VGA_DEVICE(0x7121, info), /* I810 */		\
+	INTEL_VGA_DEVICE(0x7123, info), /* I810_DC100 */	\
+	INTEL_VGA_DEVICE(0x7125, info)  /* I810_E */
+
+#define INTEL_I815_IDS(info)					\
+	INTEL_VGA_DEVICE(0x1132, info)  /* I815*/
+
 #define INTEL_I830_IDS(info)				\
 	INTEL_VGA_DEVICE(0x3577, info)
 
diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index 12e38c213b70..508cbf31d43e 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -3856,11 +3856,13 @@ void lockdep_set_current_reclaim_state(gfp_t gfp_mask)
 {
 	current->lockdep_reclaim_gfp = gfp_mask;
 }
+EXPORT_SYMBOL_GPL(lockdep_set_current_reclaim_state);
 
 void lockdep_clear_current_reclaim_state(void)
 {
 	current->lockdep_reclaim_gfp = 0;
 }
+EXPORT_SYMBOL_GPL(lockdep_clear_current_reclaim_state);
 
 #ifdef CONFIG_LOCK_STAT
 static int