summary refs log tree commit diff
path: root/drivers/gpu
diff options
context:
space:
mode:
authorDave Airlie <airlied@redhat.com>2018-12-13 07:21:29 +1000
committerDave Airlie <airlied@redhat.com>2018-12-13 07:21:29 +1000
commit71fb553974556e29ebf6fa3cf6bd73342e7dbc5d (patch)
treef97562db373672f2058e4c53bd181727248932e3 /drivers/gpu
parent40e020c129cfc991e8ab4736d2665351ffd1468d (diff)
parentd76b21ebf8ff0ec5030e98c64c50dca2254474f3 (diff)
downloadlinux-71fb553974556e29ebf6fa3cf6bd73342e7dbc5d.tar.gz
Merge tag 'drm-intel-fixes-2018-12-07' of git://anongit.freedesktop.org/drm/drm-intel into drm-fixes
- Fix for system crash after GPU hang (Bugzilla #107945)
- GVT fix for guest graphics corruption (https://github.com/intel/gvt-linux/issues/61)

Signed-off-by: Dave Airlie <airlied@redhat.com>
From: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20181207104352.GA18214@jlahtine-desk.ger.corp.intel.com
Diffstat (limited to 'drivers/gpu')
-rw-r--r--drivers/gpu/drm/i915/gvt/fb_decoder.c2
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c1
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h2
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c4
-rw-r--r--drivers/gpu/drm/i915/intel_engine_cs.c2
-rw-r--r--drivers/gpu/drm/i915/intel_lrc.c4
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.h2
-rw-r--r--drivers/gpu/drm/i915/intel_workarounds.c591
-rw-r--r--drivers/gpu/drm/i915/intel_workarounds.h26
9 files changed, 430 insertions, 204 deletions
diff --git a/drivers/gpu/drm/i915/gvt/fb_decoder.c b/drivers/gpu/drm/i915/gvt/fb_decoder.c
index 481896fb712a..85e6736f0a32 100644
--- a/drivers/gpu/drm/i915/gvt/fb_decoder.c
+++ b/drivers/gpu/drm/i915/gvt/fb_decoder.c
@@ -235,7 +235,7 @@ int intel_vgpu_decode_primary_plane(struct intel_vgpu *vgpu,
 		plane->bpp = skl_pixel_formats[fmt].bpp;
 		plane->drm_format = skl_pixel_formats[fmt].drm_format;
 	} else {
-		plane->tiled = !!(val & DISPPLANE_TILED);
+		plane->tiled = val & DISPPLANE_TILED;
 		fmt = bdw_format_to_drm(val & DISPPLANE_PIXFORMAT_MASK);
 		plane->bpp = bdw_pixel_formats[fmt].bpp;
 		plane->drm_format = bdw_pixel_formats[fmt].drm_format;
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index ffdbbac4400e..47062ee979cf 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -1444,6 +1444,7 @@ static int i915_driver_init_hw(struct drm_i915_private *dev_priv)
 
 	intel_uncore_sanitize(dev_priv);
 
+	intel_gt_init_workarounds(dev_priv);
 	i915_gem_load_init_fences(dev_priv);
 
 	/* On the 945G/GM, the chipset reports the MSI capability on the
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 9102571e9692..1331cde79c2e 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -67,6 +67,7 @@
 #include "intel_ringbuffer.h"
 #include "intel_uncore.h"
 #include "intel_wopcm.h"
+#include "intel_workarounds.h"
 #include "intel_uc.h"
 
 #include "i915_gem.h"
@@ -1805,6 +1806,7 @@ struct drm_i915_private {
 	int dpio_phy_iosf_port[I915_NUM_PHYS_VLV];
 
 	struct i915_workarounds workarounds;
+	struct i915_wa_list gt_wa_list;
 
 	struct i915_frontbuffer_tracking fb_tracking;
 
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 0c8aa57ce83b..ba371712c560 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -5305,7 +5305,7 @@ int i915_gem_init_hw(struct drm_i915_private *dev_priv)
 		}
 	}
 
-	intel_gt_workarounds_apply(dev_priv);
+	intel_gt_apply_workarounds(dev_priv);
 
 	i915_gem_init_swizzling(dev_priv);
 
@@ -5677,6 +5677,8 @@ void i915_gem_fini(struct drm_i915_private *dev_priv)
 	i915_gem_contexts_fini(dev_priv);
 	mutex_unlock(&dev_priv->drm.struct_mutex);
 
+	intel_wa_list_free(&dev_priv->gt_wa_list);
+
 	intel_cleanup_gt_powersave(dev_priv);
 
 	intel_uc_fini_misc(dev_priv);
diff --git a/drivers/gpu/drm/i915/intel_engine_cs.c b/drivers/gpu/drm/i915/intel_engine_cs.c
index 217ed3ee1cab..cdfa6b21cbff 100644
--- a/drivers/gpu/drm/i915/intel_engine_cs.c
+++ b/drivers/gpu/drm/i915/intel_engine_cs.c
@@ -720,6 +720,8 @@ void intel_engine_cleanup_common(struct intel_engine_cs *engine)
 	__intel_context_unpin(i915->kernel_context, engine);
 
 	i915_timeline_fini(&engine->timeline);
+
+	intel_wa_list_free(&engine->wa_list);
 }
 
 u64 intel_engine_get_active_head(const struct intel_engine_cs *engine)
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index 37c94a54efcb..4acb24c90c68 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -1793,6 +1793,8 @@ static bool unexpected_starting_state(struct intel_engine_cs *engine)
 
 static int gen8_init_common_ring(struct intel_engine_cs *engine)
 {
+	intel_engine_apply_workarounds(engine);
+
 	intel_mocs_init_engine(engine);
 
 	intel_engine_reset_breadcrumbs(engine);
@@ -2491,6 +2493,8 @@ int logical_render_ring_init(struct intel_engine_cs *engine)
 			  ret);
 	}
 
+	intel_engine_init_workarounds(engine);
+
 	return 0;
 
 err_cleanup_common:
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index 2dfa585712c2..04c61307f4d1 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -15,6 +15,7 @@
 #include "i915_selftest.h"
 #include "i915_timeline.h"
 #include "intel_gpu_commands.h"
+#include "intel_workarounds.h"
 
 struct drm_printer;
 struct i915_sched_attr;
@@ -440,6 +441,7 @@ struct intel_engine_cs {
 
 	struct intel_hw_status_page status_page;
 	struct i915_ctx_workarounds wa_ctx;
+	struct i915_wa_list wa_list;
 	struct i915_vma *scratch;
 
 	u32             irq_keep_mask; /* always keep these interrupts */
diff --git a/drivers/gpu/drm/i915/intel_workarounds.c b/drivers/gpu/drm/i915/intel_workarounds.c
index 4bcdeaf8d98f..6e580891db96 100644
--- a/drivers/gpu/drm/i915/intel_workarounds.c
+++ b/drivers/gpu/drm/i915/intel_workarounds.c
@@ -48,6 +48,20 @@
  * - Public functions to init or apply the given workaround type.
  */
 
+static void wa_init_start(struct i915_wa_list *wal, const char *name)
+{
+	wal->name = name;
+}
+
+static void wa_init_finish(struct i915_wa_list *wal)
+{
+	if (!wal->count)
+		return;
+
+	DRM_DEBUG_DRIVER("Initialized %u %s workarounds\n",
+			 wal->count, wal->name);
+}
+
 static void wa_add(struct drm_i915_private *i915,
 		   i915_reg_t reg, const u32 mask, const u32 val)
 {
@@ -580,160 +594,175 @@ int intel_ctx_workarounds_emit(struct i915_request *rq)
 	return 0;
 }
 
-static void bdw_gt_workarounds_apply(struct drm_i915_private *dev_priv)
+static void
+wal_add(struct i915_wa_list *wal, const struct i915_wa *wa)
+{
+	const unsigned int grow = 1 << 4;
+
+	GEM_BUG_ON(!is_power_of_2(grow));
+
+	if (IS_ALIGNED(wal->count, grow)) { /* Either uninitialized or full. */
+		struct i915_wa *list;
+
+		list = kmalloc_array(ALIGN(wal->count + 1, grow), sizeof(*wa),
+				     GFP_KERNEL);
+		if (!list) {
+			DRM_ERROR("No space for workaround init!\n");
+			return;
+		}
+
+		if (wal->list)
+			memcpy(list, wal->list, sizeof(*wa) * wal->count);
+
+		wal->list = list;
+	}
+
+	wal->list[wal->count++] = *wa;
+}
+
+static void
+wa_masked_en(struct i915_wa_list *wal, i915_reg_t reg, u32 val)
+{
+	struct i915_wa wa = {
+		.reg = reg,
+		.mask = val,
+		.val = _MASKED_BIT_ENABLE(val)
+	};
+
+	wal_add(wal, &wa);
+}
+
+static void
+wa_write_masked_or(struct i915_wa_list *wal, i915_reg_t reg, u32 mask,
+		   u32 val)
 {
+	struct i915_wa wa = {
+		.reg = reg,
+		.mask = mask,
+		.val = val
+	};
+
+	wal_add(wal, &wa);
 }
 
-static void chv_gt_workarounds_apply(struct drm_i915_private *dev_priv)
+static void
+wa_write(struct i915_wa_list *wal, i915_reg_t reg, u32 val)
 {
+	wa_write_masked_or(wal, reg, ~0, val);
 }
 
-static void gen9_gt_workarounds_apply(struct drm_i915_private *dev_priv)
+static void
+wa_write_or(struct i915_wa_list *wal, i915_reg_t reg, u32 val)
 {
-	/* WaContextSwitchWithConcurrentTLBInvalidate:skl,bxt,kbl,glk,cfl */
-	I915_WRITE(GEN9_CSFE_CHICKEN1_RCS,
-		   _MASKED_BIT_ENABLE(GEN9_PREEMPT_GPGPU_SYNC_SWITCH_DISABLE));
+	wa_write_masked_or(wal, reg, val, val);
+}
 
-	/* WaEnableLbsSlaRetryTimerDecrement:skl,bxt,kbl,glk,cfl */
-	I915_WRITE(BDW_SCRATCH1, I915_READ(BDW_SCRATCH1) |
-		   GEN9_LBS_SLA_RETRY_TIMER_DECREMENT_ENABLE);
+static void gen9_gt_workarounds_init(struct drm_i915_private *i915)
+{
+	struct i915_wa_list *wal = &i915->gt_wa_list;
 
 	/* WaDisableKillLogic:bxt,skl,kbl */
-	if (!IS_COFFEELAKE(dev_priv))
-		I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) |
-			   ECOCHK_DIS_TLB);
+	if (!IS_COFFEELAKE(i915))
+		wa_write_or(wal,
+			    GAM_ECOCHK,
+			    ECOCHK_DIS_TLB);
 
-	if (HAS_LLC(dev_priv)) {
+	if (HAS_LLC(i915)) {
 		/* WaCompressedResourceSamplerPbeMediaNewHashMode:skl,kbl
 		 *
 		 * Must match Display Engine. See
 		 * WaCompressedResourceDisplayNewHashMode.
 		 */
-		I915_WRITE(MMCD_MISC_CTRL,
-			   I915_READ(MMCD_MISC_CTRL) |
-			   MMCD_PCLA |
-			   MMCD_HOTSPOT_EN);
+		wa_write_or(wal,
+			    MMCD_MISC_CTRL,
+			    MMCD_PCLA | MMCD_HOTSPOT_EN);
 	}
 
 	/* WaDisableHDCInvalidation:skl,bxt,kbl,cfl */
-	I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) |
-		   BDW_DISABLE_HDC_INVALIDATION);
-
-	/* WaProgramL3SqcReg1DefaultForPerf:bxt,glk */
-	if (IS_GEN9_LP(dev_priv)) {
-		u32 val = I915_READ(GEN8_L3SQCREG1);
-
-		val &= ~L3_PRIO_CREDITS_MASK;
-		val |= L3_GENERAL_PRIO_CREDITS(62) | L3_HIGH_PRIO_CREDITS(2);
-		I915_WRITE(GEN8_L3SQCREG1, val);
-	}
-
-	/* WaOCLCoherentLineFlush:skl,bxt,kbl,cfl */
-	I915_WRITE(GEN8_L3SQCREG4,
-		   I915_READ(GEN8_L3SQCREG4) | GEN8_LQSC_FLUSH_COHERENT_LINES);
-
-	/* WaEnablePreemptionGranularityControlByUMD:skl,bxt,kbl,cfl,[cnl] */
-	I915_WRITE(GEN7_FF_SLICE_CS_CHICKEN1,
-		   _MASKED_BIT_ENABLE(GEN9_FFSC_PERCTX_PREEMPT_CTRL));
+	wa_write_or(wal,
+		    GAM_ECOCHK,
+		    BDW_DISABLE_HDC_INVALIDATION);
 }
 
-static void skl_gt_workarounds_apply(struct drm_i915_private *dev_priv)
+static void skl_gt_workarounds_init(struct drm_i915_private *i915)
 {
-	gen9_gt_workarounds_apply(dev_priv);
+	struct i915_wa_list *wal = &i915->gt_wa_list;
 
-	/* WaEnableGapsTsvCreditFix:skl */
-	I915_WRITE(GEN8_GARBCNTL,
-		   I915_READ(GEN8_GARBCNTL) | GEN9_GAPS_TSV_CREDIT_DISABLE);
+	gen9_gt_workarounds_init(i915);
 
 	/* WaDisableGafsUnitClkGating:skl */
-	I915_WRITE(GEN7_UCGCTL4,
-		   I915_READ(GEN7_UCGCTL4) | GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE);
+	wa_write_or(wal,
+		    GEN7_UCGCTL4,
+		    GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE);
 
 	/* WaInPlaceDecompressionHang:skl */
-	if (IS_SKL_REVID(dev_priv, SKL_REVID_H0, REVID_FOREVER))
-		I915_WRITE(GEN9_GAMT_ECO_REG_RW_IA,
-			   I915_READ(GEN9_GAMT_ECO_REG_RW_IA) |
-			   GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
+	if (IS_SKL_REVID(i915, SKL_REVID_H0, REVID_FOREVER))
+		wa_write_or(wal,
+			    GEN9_GAMT_ECO_REG_RW_IA,
+			    GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
 }
 
-static void bxt_gt_workarounds_apply(struct drm_i915_private *dev_priv)
+static void bxt_gt_workarounds_init(struct drm_i915_private *i915)
 {
-	gen9_gt_workarounds_apply(dev_priv);
+	struct i915_wa_list *wal = &i915->gt_wa_list;
 
-	/* WaDisablePooledEuLoadBalancingFix:bxt */
-	I915_WRITE(FF_SLICE_CS_CHICKEN2,
-		   _MASKED_BIT_ENABLE(GEN9_POOLED_EU_LOAD_BALANCING_FIX_DISABLE));
+	gen9_gt_workarounds_init(i915);
 
 	/* WaInPlaceDecompressionHang:bxt */
-	I915_WRITE(GEN9_GAMT_ECO_REG_RW_IA,
-		   I915_READ(GEN9_GAMT_ECO_REG_RW_IA) |
-		   GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
+	wa_write_or(wal,
+		    GEN9_GAMT_ECO_REG_RW_IA,
+		    GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
 }
 
-static void kbl_gt_workarounds_apply(struct drm_i915_private *dev_priv)
+static void kbl_gt_workarounds_init(struct drm_i915_private *i915)
 {
-	gen9_gt_workarounds_apply(dev_priv);
+	struct i915_wa_list *wal = &i915->gt_wa_list;
 
-	/* WaEnableGapsTsvCreditFix:kbl */
-	I915_WRITE(GEN8_GARBCNTL,
-		   I915_READ(GEN8_GARBCNTL) | GEN9_GAPS_TSV_CREDIT_DISABLE);
+	gen9_gt_workarounds_init(i915);
 
 	/* WaDisableDynamicCreditSharing:kbl */
-	if (IS_KBL_REVID(dev_priv, 0, KBL_REVID_B0))
-		I915_WRITE(GAMT_CHKN_BIT_REG,
-			   I915_READ(GAMT_CHKN_BIT_REG) |
-			   GAMT_CHKN_DISABLE_DYNAMIC_CREDIT_SHARING);
+	if (IS_KBL_REVID(i915, 0, KBL_REVID_B0))
+		wa_write_or(wal,
+			    GAMT_CHKN_BIT_REG,
+			    GAMT_CHKN_DISABLE_DYNAMIC_CREDIT_SHARING);
 
 	/* WaDisableGafsUnitClkGating:kbl */
-	I915_WRITE(GEN7_UCGCTL4,
-		   I915_READ(GEN7_UCGCTL4) | GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE);
+	wa_write_or(wal,
+		    GEN7_UCGCTL4,
+		    GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE);
 
 	/* WaInPlaceDecompressionHang:kbl */
-	I915_WRITE(GEN9_GAMT_ECO_REG_RW_IA,
-		   I915_READ(GEN9_GAMT_ECO_REG_RW_IA) |
-		   GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
-
-	/* WaKBLVECSSemaphoreWaitPoll:kbl */
-	if (IS_KBL_REVID(dev_priv, KBL_REVID_A0, KBL_REVID_E0)) {
-		struct intel_engine_cs *engine;
-		unsigned int tmp;
-
-		for_each_engine(engine, dev_priv, tmp) {
-			if (engine->id == RCS)
-				continue;
-
-			I915_WRITE(RING_SEMA_WAIT_POLL(engine->mmio_base), 1);
-		}
-	}
+	wa_write_or(wal,
+		    GEN9_GAMT_ECO_REG_RW_IA,
+		    GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
 }
 
-static void glk_gt_workarounds_apply(struct drm_i915_private *dev_priv)
+static void glk_gt_workarounds_init(struct drm_i915_private *i915)
 {
-	gen9_gt_workarounds_apply(dev_priv);
+	gen9_gt_workarounds_init(i915);
 }
 
-static void cfl_gt_workarounds_apply(struct drm_i915_private *dev_priv)
+static void cfl_gt_workarounds_init(struct drm_i915_private *i915)
 {
-	gen9_gt_workarounds_apply(dev_priv);
+	struct i915_wa_list *wal = &i915->gt_wa_list;
 
-	/* WaEnableGapsTsvCreditFix:cfl */
-	I915_WRITE(GEN8_GARBCNTL,
-		   I915_READ(GEN8_GARBCNTL) | GEN9_GAPS_TSV_CREDIT_DISABLE);
+	gen9_gt_workarounds_init(i915);
 
 	/* WaDisableGafsUnitClkGating:cfl */
-	I915_WRITE(GEN7_UCGCTL4,
-		   I915_READ(GEN7_UCGCTL4) | GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE);
+	wa_write_or(wal,
+		    GEN7_UCGCTL4,
+		    GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE);
 
 	/* WaInPlaceDecompressionHang:cfl */
-	I915_WRITE(GEN9_GAMT_ECO_REG_RW_IA,
-		   I915_READ(GEN9_GAMT_ECO_REG_RW_IA) |
-		   GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
+	wa_write_or(wal,
+		    GEN9_GAMT_ECO_REG_RW_IA,
+		    GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
 }
 
 static void wa_init_mcr(struct drm_i915_private *dev_priv)
 {
 	const struct sseu_dev_info *sseu = &(INTEL_INFO(dev_priv)->sseu);
-	u32 mcr;
+	struct i915_wa_list *wal = &dev_priv->gt_wa_list;
 	u32 mcr_slice_subslice_mask;
 
 	/*
@@ -770,8 +799,6 @@ static void wa_init_mcr(struct drm_i915_private *dev_priv)
 		WARN_ON((enabled_mask & disabled_mask) != enabled_mask);
 	}
 
-	mcr = I915_READ(GEN8_MCR_SELECTOR);
-
 	if (INTEL_GEN(dev_priv) >= 11)
 		mcr_slice_subslice_mask = GEN11_MCR_SLICE_MASK |
 					  GEN11_MCR_SUBSLICE_MASK;
@@ -789,148 +816,170 @@ static void wa_init_mcr(struct drm_i915_private *dev_priv)
 	 * occasions, such as INSTDONE, where this value is dependent
 	 * on s/ss combo, the read should be done with read_subslice_reg.
 	 */
-	mcr &= ~mcr_slice_subslice_mask;
-	mcr |= intel_calculate_mcr_s_ss_select(dev_priv);
-	I915_WRITE(GEN8_MCR_SELECTOR, mcr);
+	wa_write_masked_or(wal,
+			   GEN8_MCR_SELECTOR,
+			   mcr_slice_subslice_mask,
+			   intel_calculate_mcr_s_ss_select(dev_priv));
 }
 
-static void cnl_gt_workarounds_apply(struct drm_i915_private *dev_priv)
+static void cnl_gt_workarounds_init(struct drm_i915_private *i915)
 {
-	wa_init_mcr(dev_priv);
+	struct i915_wa_list *wal = &i915->gt_wa_list;
+
+	wa_init_mcr(i915);
 
 	/* WaDisableI2mCycleOnWRPort:cnl (pre-prod) */
-	if (IS_CNL_REVID(dev_priv, CNL_REVID_B0, CNL_REVID_B0))
-		I915_WRITE(GAMT_CHKN_BIT_REG,
-			   I915_READ(GAMT_CHKN_BIT_REG) |
-			   GAMT_CHKN_DISABLE_I2M_CYCLE_ON_WR_PORT);
+	if (IS_CNL_REVID(i915, CNL_REVID_B0, CNL_REVID_B0))
+		wa_write_or(wal,
+			    GAMT_CHKN_BIT_REG,
+			    GAMT_CHKN_DISABLE_I2M_CYCLE_ON_WR_PORT);
 
 	/* WaInPlaceDecompressionHang:cnl */
-	I915_WRITE(GEN9_GAMT_ECO_REG_RW_IA,
-		   I915_READ(GEN9_GAMT_ECO_REG_RW_IA) |
-		   GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
-
-	/* WaEnablePreemptionGranularityControlByUMD:cnl */
-	I915_WRITE(GEN7_FF_SLICE_CS_CHICKEN1,
-		   _MASKED_BIT_ENABLE(GEN9_FFSC_PERCTX_PREEMPT_CTRL));
+	wa_write_or(wal,
+		    GEN9_GAMT_ECO_REG_RW_IA,
+		    GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
 }
 
-static void icl_gt_workarounds_apply(struct drm_i915_private *dev_priv)
+static void icl_gt_workarounds_init(struct drm_i915_private *i915)
 {
-	wa_init_mcr(dev_priv);
+	struct i915_wa_list *wal = &i915->gt_wa_list;
 
-	/* This is not an Wa. Enable for better image quality */
-	I915_WRITE(_3D_CHICKEN3,
-		   _MASKED_BIT_ENABLE(_3D_CHICKEN3_AA_LINE_QUALITY_FIX_ENABLE));
+	wa_init_mcr(i915);
 
 	/* WaInPlaceDecompressionHang:icl */
-	I915_WRITE(GEN9_GAMT_ECO_REG_RW_IA, I915_READ(GEN9_GAMT_ECO_REG_RW_IA) |
-					    GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
-
-	/* WaPipelineFlushCoherentLines:icl */
-	I915_WRITE(GEN8_L3SQCREG4, I915_READ(GEN8_L3SQCREG4) |
-				   GEN8_LQSC_FLUSH_COHERENT_LINES);
-
-	/* Wa_1405543622:icl
-	 * Formerly known as WaGAPZPriorityScheme
-	 */
-	I915_WRITE(GEN8_GARBCNTL, I915_READ(GEN8_GARBCNTL) |
-				  GEN11_ARBITRATION_PRIO_ORDER_MASK);
-
-	/* Wa_1604223664:icl
-	 * Formerly known as WaL3BankAddressHashing
-	 */
-	I915_WRITE(GEN8_GARBCNTL,
-		   (I915_READ(GEN8_GARBCNTL) & ~GEN11_HASH_CTRL_EXCL_MASK) |
-		   GEN11_HASH_CTRL_EXCL_BIT0);
-	I915_WRITE(GEN11_GLBLINVL,
-		   (I915_READ(GEN11_GLBLINVL) & ~GEN11_BANK_HASH_ADDR_EXCL_MASK) |
-		   GEN11_BANK_HASH_ADDR_EXCL_BIT0);
+	wa_write_or(wal,
+		    GEN9_GAMT_ECO_REG_RW_IA,
+		    GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
 
 	/* WaModifyGamTlbPartitioning:icl */
-	I915_WRITE(GEN11_GACB_PERF_CTRL,
-		   (I915_READ(GEN11_GACB_PERF_CTRL) & ~GEN11_HASH_CTRL_MASK) |
-		   GEN11_HASH_CTRL_BIT0 | GEN11_HASH_CTRL_BIT4);
-
-	/* Wa_1405733216:icl
-	 * Formerly known as WaDisableCleanEvicts
-	 */
-	I915_WRITE(GEN8_L3SQCREG4, I915_READ(GEN8_L3SQCREG4) |
-				   GEN11_LQSC_CLEAN_EVICT_DISABLE);
+	wa_write_masked_or(wal,
+			   GEN11_GACB_PERF_CTRL,
+			   GEN11_HASH_CTRL_MASK,
+			   GEN11_HASH_CTRL_BIT0 | GEN11_HASH_CTRL_BIT4);
 
 	/* Wa_1405766107:icl
 	 * Formerly known as WaCL2SFHalfMaxAlloc
 	 */
-	I915_WRITE(GEN11_LSN_UNSLCVC, I915_READ(GEN11_LSN_UNSLCVC) |
-				      GEN11_LSN_UNSLCVC_GAFS_HALF_SF_MAXALLOC |
-				      GEN11_LSN_UNSLCVC_GAFS_HALF_CL2_MAXALLOC);
+	wa_write_or(wal,
+		    GEN11_LSN_UNSLCVC,
+		    GEN11_LSN_UNSLCVC_GAFS_HALF_SF_MAXALLOC |
+		    GEN11_LSN_UNSLCVC_GAFS_HALF_CL2_MAXALLOC);
 
 	/* Wa_220166154:icl
 	 * Formerly known as WaDisCtxReload
 	 */
-	I915_WRITE(GAMW_ECO_DEV_RW_IA_REG, I915_READ(GAMW_ECO_DEV_RW_IA_REG) |
-					   GAMW_ECO_DEV_CTX_RELOAD_DISABLE);
+	wa_write_or(wal,
+		    GEN8_GAMW_ECO_DEV_RW_IA,
+		    GAMW_ECO_DEV_CTX_RELOAD_DISABLE);
 
 	/* Wa_1405779004:icl (pre-prod) */
-	if (IS_ICL_REVID(dev_priv, ICL_REVID_A0, ICL_REVID_A0))
-		I915_WRITE(SLICE_UNIT_LEVEL_CLKGATE,
-			   I915_READ(SLICE_UNIT_LEVEL_CLKGATE) |
-			   MSCUNIT_CLKGATE_DIS);
+	if (IS_ICL_REVID(i915, ICL_REVID_A0, ICL_REVID_A0))
+		wa_write_or(wal,
+			    SLICE_UNIT_LEVEL_CLKGATE,
+			    MSCUNIT_CLKGATE_DIS);
 
 	/* Wa_1406680159:icl */
-	I915_WRITE(SUBSLICE_UNIT_LEVEL_CLKGATE,
-		   I915_READ(SUBSLICE_UNIT_LEVEL_CLKGATE) |
-		   GWUNIT_CLKGATE_DIS);
-
-	/* Wa_1604302699:icl */
-	I915_WRITE(GEN10_L3_CHICKEN_MODE_REGISTER,
-		   I915_READ(GEN10_L3_CHICKEN_MODE_REGISTER) |
-		   GEN11_I2M_WRITE_DISABLE);
+	wa_write_or(wal,
+		    SUBSLICE_UNIT_LEVEL_CLKGATE,
+		    GWUNIT_CLKGATE_DIS);
 
 	/* Wa_1406838659:icl (pre-prod) */
-	if (IS_ICL_REVID(dev_priv, ICL_REVID_A0, ICL_REVID_B0))
-		I915_WRITE(INF_UNIT_LEVEL_CLKGATE,
-			   I915_READ(INF_UNIT_LEVEL_CLKGATE) |
-			   CGPSF_CLKGATE_DIS);
-
-	/* WaForwardProgressSoftReset:icl */
-	I915_WRITE(GEN10_SCRATCH_LNCF2,
-		   I915_READ(GEN10_SCRATCH_LNCF2) |
-		   PMFLUSHDONE_LNICRSDROP |
-		   PMFLUSH_GAPL3UNBLOCK |
-		   PMFLUSHDONE_LNEBLK);
+	if (IS_ICL_REVID(i915, ICL_REVID_A0, ICL_REVID_B0))
+		wa_write_or(wal,
+			    INF_UNIT_LEVEL_CLKGATE,
+			    CGPSF_CLKGATE_DIS);
 
 	/* Wa_1406463099:icl
 	 * Formerly known as WaGamTlbPendError
 	 */
-	I915_WRITE(GAMT_CHKN_BIT_REG,
-		   I915_READ(GAMT_CHKN_BIT_REG) |
-		   GAMT_CHKN_DISABLE_L3_COH_PIPE);
+	wa_write_or(wal,
+		    GAMT_CHKN_BIT_REG,
+		    GAMT_CHKN_DISABLE_L3_COH_PIPE);
 }
 
-void intel_gt_workarounds_apply(struct drm_i915_private *dev_priv)
+void intel_gt_init_workarounds(struct drm_i915_private *i915)
 {
-	if (INTEL_GEN(dev_priv) < 8)
+	struct i915_wa_list *wal = &i915->gt_wa_list;
+
+	wa_init_start(wal, "GT");
+
+	if (INTEL_GEN(i915) < 8)
 		return;
-	else if (IS_BROADWELL(dev_priv))
-		bdw_gt_workarounds_apply(dev_priv);
-	else if (IS_CHERRYVIEW(dev_priv))
-		chv_gt_workarounds_apply(dev_priv);
-	else if (IS_SKYLAKE(dev_priv))
-		skl_gt_workarounds_apply(dev_priv);
-	else if (IS_BROXTON(dev_priv))
-		bxt_gt_workarounds_apply(dev_priv);
-	else if (IS_KABYLAKE(dev_priv))
-		kbl_gt_workarounds_apply(dev_priv);
-	else if (IS_GEMINILAKE(dev_priv))
-		glk_gt_workarounds_apply(dev_priv);
-	else if (IS_COFFEELAKE(dev_priv))
-		cfl_gt_workarounds_apply(dev_priv);
-	else if (IS_CANNONLAKE(dev_priv))
-		cnl_gt_workarounds_apply(dev_priv);
-	else if (IS_ICELAKE(dev_priv))
-		icl_gt_workarounds_apply(dev_priv);
+	else if (IS_BROADWELL(i915))
+		return;
+	else if (IS_CHERRYVIEW(i915))
+		return;
+	else if (IS_SKYLAKE(i915))
+		skl_gt_workarounds_init(i915);
+	else if (IS_BROXTON(i915))
+		bxt_gt_workarounds_init(i915);
+	else if (IS_KABYLAKE(i915))
+		kbl_gt_workarounds_init(i915);
+	else if (IS_GEMINILAKE(i915))
+		glk_gt_workarounds_init(i915);
+	else if (IS_COFFEELAKE(i915))
+		cfl_gt_workarounds_init(i915);
+	else if (IS_CANNONLAKE(i915))
+		cnl_gt_workarounds_init(i915);
+	else if (IS_ICELAKE(i915))
+		icl_gt_workarounds_init(i915);
 	else
-		MISSING_CASE(INTEL_GEN(dev_priv));
+		MISSING_CASE(INTEL_GEN(i915));
+
+	wa_init_finish(wal);
+}
+
+static enum forcewake_domains
+wal_get_fw_for_rmw(struct drm_i915_private *dev_priv,
+		   const struct i915_wa_list *wal)
+{
+	enum forcewake_domains fw = 0;
+	struct i915_wa *wa;
+	unsigned int i;
+
+	for (i = 0, wa = wal->list; i < wal->count; i++, wa++)
+		fw |= intel_uncore_forcewake_for_reg(dev_priv,
+						     wa->reg,
+						     FW_REG_READ |
+						     FW_REG_WRITE);
+
+	return fw;
+}
+
+static void
+wa_list_apply(struct drm_i915_private *dev_priv, const struct i915_wa_list *wal)
+{
+	enum forcewake_domains fw;
+	unsigned long flags;
+	struct i915_wa *wa;
+	unsigned int i;
+
+	if (!wal->count)
+		return;
+
+	fw = wal_get_fw_for_rmw(dev_priv, wal);
+
+	spin_lock_irqsave(&dev_priv->uncore.lock, flags);
+	intel_uncore_forcewake_get__locked(dev_priv, fw);
+
+	for (i = 0, wa = wal->list; i < wal->count; i++, wa++) {
+		u32 val = I915_READ_FW(wa->reg);
+
+		val &= ~wa->mask;
+		val |= wa->val;
+
+		I915_WRITE_FW(wa->reg, val);
+	}
+
+	intel_uncore_forcewake_put__locked(dev_priv, fw);
+	spin_unlock_irqrestore(&dev_priv->uncore.lock, flags);
+
+	DRM_DEBUG_DRIVER("Applied %u %s workarounds\n", wal->count, wal->name);
+}
+
+void intel_gt_apply_workarounds(struct drm_i915_private *dev_priv)
+{
+	wa_list_apply(dev_priv, &dev_priv->gt_wa_list);
 }
 
 struct whitelist {
@@ -1077,6 +1126,146 @@ void intel_whitelist_workarounds_apply(struct intel_engine_cs *engine)
 	whitelist_apply(engine, whitelist_build(engine, &w));
 }
 
+static void rcs_engine_wa_init(struct intel_engine_cs *engine)
+{
+	struct drm_i915_private *i915 = engine->i915;
+	struct i915_wa_list *wal = &engine->wa_list;
+
+	if (IS_ICELAKE(i915)) {
+		/* This is not an Wa. Enable for better image quality */
+		wa_masked_en(wal,
+			     _3D_CHICKEN3,
+			     _3D_CHICKEN3_AA_LINE_QUALITY_FIX_ENABLE);
+
+		/* WaPipelineFlushCoherentLines:icl */
+		wa_write_or(wal,
+			    GEN8_L3SQCREG4,
+			    GEN8_LQSC_FLUSH_COHERENT_LINES);
+
+		/*
+		 * Wa_1405543622:icl
+		 * Formerly known as WaGAPZPriorityScheme
+		 */
+		wa_write_or(wal,
+			    GEN8_GARBCNTL,
+			    GEN11_ARBITRATION_PRIO_ORDER_MASK);
+
+		/*
+		 * Wa_1604223664:icl
+		 * Formerly known as WaL3BankAddressHashing
+		 */
+		wa_write_masked_or(wal,
+				   GEN8_GARBCNTL,
+				   GEN11_HASH_CTRL_EXCL_MASK,
+				   GEN11_HASH_CTRL_EXCL_BIT0);
+		wa_write_masked_or(wal,
+				   GEN11_GLBLINVL,
+				   GEN11_BANK_HASH_ADDR_EXCL_MASK,
+				   GEN11_BANK_HASH_ADDR_EXCL_BIT0);
+
+		/*
+		 * Wa_1405733216:icl
+		 * Formerly known as WaDisableCleanEvicts
+		 */
+		wa_write_or(wal,
+			    GEN8_L3SQCREG4,
+			    GEN11_LQSC_CLEAN_EVICT_DISABLE);
+
+		/* Wa_1604302699:icl */
+		wa_write_or(wal,
+			    GEN10_L3_CHICKEN_MODE_REGISTER,
+			    GEN11_I2M_WRITE_DISABLE);
+
+		/* WaForwardProgressSoftReset:icl */
+		wa_write_or(wal,
+			    GEN10_SCRATCH_LNCF2,
+			    PMFLUSHDONE_LNICRSDROP |
+			    PMFLUSH_GAPL3UNBLOCK |
+			    PMFLUSHDONE_LNEBLK);
+	}
+
+	if (IS_GEN9(i915) || IS_CANNONLAKE(i915)) {
+		/* WaEnablePreemptionGranularityControlByUMD:skl,bxt,kbl,cfl,cnl */
+		wa_masked_en(wal,
+			     GEN7_FF_SLICE_CS_CHICKEN1,
+			     GEN9_FFSC_PERCTX_PREEMPT_CTRL);
+	}
+
+	if (IS_SKYLAKE(i915) || IS_KABYLAKE(i915) || IS_COFFEELAKE(i915)) {
+		/* WaEnableGapsTsvCreditFix:skl,kbl,cfl */
+		wa_write_or(wal,
+			    GEN8_GARBCNTL,
+			    GEN9_GAPS_TSV_CREDIT_DISABLE);
+	}
+
+	if (IS_BROXTON(i915)) {
+		/* WaDisablePooledEuLoadBalancingFix:bxt */
+		wa_masked_en(wal,
+			     FF_SLICE_CS_CHICKEN2,
+			     GEN9_POOLED_EU_LOAD_BALANCING_FIX_DISABLE);
+	}
+
+	if (IS_GEN9(i915)) {
+		/* WaContextSwitchWithConcurrentTLBInvalidate:skl,bxt,kbl,glk,cfl */
+		wa_masked_en(wal,
+			     GEN9_CSFE_CHICKEN1_RCS,
+			     GEN9_PREEMPT_GPGPU_SYNC_SWITCH_DISABLE);
+
+		/* WaEnableLbsSlaRetryTimerDecrement:skl,bxt,kbl,glk,cfl */
+		wa_write_or(wal,
+			    BDW_SCRATCH1,
+			    GEN9_LBS_SLA_RETRY_TIMER_DECREMENT_ENABLE);
+
+		/* WaProgramL3SqcReg1DefaultForPerf:bxt,glk */
+		if (IS_GEN9_LP(i915))
+			wa_write_masked_or(wal,
+					   GEN8_L3SQCREG1,
+					   L3_PRIO_CREDITS_MASK,
+					   L3_GENERAL_PRIO_CREDITS(62) |
+					   L3_HIGH_PRIO_CREDITS(2));
+
+		/* WaOCLCoherentLineFlush:skl,bxt,kbl,cfl */
+		wa_write_or(wal,
+			    GEN8_L3SQCREG4,
+			    GEN8_LQSC_FLUSH_COHERENT_LINES);
+	}
+}
+
+static void xcs_engine_wa_init(struct intel_engine_cs *engine)
+{
+	struct drm_i915_private *i915 = engine->i915;
+	struct i915_wa_list *wal = &engine->wa_list;
+
+	/* WaKBLVECSSemaphoreWaitPoll:kbl */
+	if (IS_KBL_REVID(i915, KBL_REVID_A0, KBL_REVID_E0)) {
+		wa_write(wal,
+			 RING_SEMA_WAIT_POLL(engine->mmio_base),
+			 1);
+	}
+}
+
+void intel_engine_init_workarounds(struct intel_engine_cs *engine)
+{
+	struct i915_wa_list *wal = &engine->wa_list;
+
+	if (GEM_WARN_ON(INTEL_GEN(engine->i915) < 8))
+		return;
+
+	wa_init_start(wal, engine->name);
+
+	if (engine->id == RCS)
+		rcs_engine_wa_init(engine);
+	else
+		xcs_engine_wa_init(engine);
+
+	wa_init_finish(wal);
+}
+
+void intel_engine_apply_workarounds(struct intel_engine_cs *engine)
+{
+	wa_list_apply(engine->i915, &engine->wa_list);
+}
+
 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
 #include "selftests/intel_workarounds.c"
 #endif
diff --git a/drivers/gpu/drm/i915/intel_workarounds.h b/drivers/gpu/drm/i915/intel_workarounds.h
index b11d0623e626..979695a53964 100644
--- a/drivers/gpu/drm/i915/intel_workarounds.h
+++ b/drivers/gpu/drm/i915/intel_workarounds.h
@@ -7,11 +7,35 @@
 #ifndef _I915_WORKAROUNDS_H_
 #define _I915_WORKAROUNDS_H_
 
+#include <linux/slab.h>
+
+struct i915_wa {
+	i915_reg_t	  reg;
+	u32		  mask;
+	u32		  val;
+};
+
+struct i915_wa_list {
+	const char	*name;
+	struct i915_wa	*list;
+	unsigned int	count;
+};
+
+static inline void intel_wa_list_free(struct i915_wa_list *wal)
+{
+	kfree(wal->list);
+	memset(wal, 0, sizeof(*wal));
+}
+
 int intel_ctx_workarounds_init(struct drm_i915_private *dev_priv);
 int intel_ctx_workarounds_emit(struct i915_request *rq);
 
-void intel_gt_workarounds_apply(struct drm_i915_private *dev_priv);
+void intel_gt_init_workarounds(struct drm_i915_private *dev_priv);
+void intel_gt_apply_workarounds(struct drm_i915_private *dev_priv);
 
 void intel_whitelist_workarounds_apply(struct intel_engine_cs *engine);
 
+void intel_engine_init_workarounds(struct intel_engine_cs *engine);
+void intel_engine_apply_workarounds(struct intel_engine_cs *engine);
+
 #endif