diff --git a/drivers/gpu/drm/i915/i915_reg_defs.h b/drivers/gpu/drm/i915/i915_reg_defs.h
index d7ffa91f97532eda73e398904099b8e3b6a93787..c8e9e273f466a7433f2fca0c3c9aa360c90a759b 100644
--- a/drivers/gpu/drm/i915/i915_reg_defs.h
+++ b/drivers/gpu/drm/i915/i915_reg_defs.h
@@ -104,13 +104,9 @@ typedef struct {
 
 #define _MMIO(r) ((const i915_reg_t){ .reg = (r) })
 
-#ifdef I915
 typedef struct {
 	u32 reg;
 } i915_mcr_reg_t;
-#else
-#define i915_mcr_reg_t i915_reg_t
-#endif
 
 #define INVALID_MMIO_REG _MMIO(0)
 
diff --git a/drivers/gpu/drm/xe/Makefile b/drivers/gpu/drm/xe/Makefile
index 7e9cd79c38ab5c2ed0bd2916feadcb8ef344a8c0..e168632d7d76019f5bdf3318b9199c881f12e1d9 100644
--- a/drivers/gpu/drm/xe/Makefile
+++ b/drivers/gpu/drm/xe/Makefile
@@ -83,6 +83,7 @@ xe-y += xe_bb.o \
 	xe_pt.o \
 	xe_query.o \
 	xe_reg_sr.o \
+	xe_reg_whitelist.o \
 	xe_rtp.o \
 	xe_ring_ops.o \
 	xe_sa.o \
diff --git a/drivers/gpu/drm/xe/xe_gt.c b/drivers/gpu/drm/xe/xe_gt.c
index 36b5da2c5977f1a2933453e486d47a91e50cda48..20dbc08d368556175fcda9438083dbc8182bc48c 100644
--- a/drivers/gpu/drm/xe/xe_gt.c
+++ b/drivers/gpu/drm/xe/xe_gt.c
@@ -500,6 +500,7 @@ static int all_fw_domain_init(struct xe_gt *gt)
 	if (err)
 		goto err_hw_fence_irq;
 
+	xe_gt_mcr_set_implicit_defaults(gt);
 	xe_reg_sr_apply_mmio(&gt->reg_sr, gt);
 
 	err = xe_gt_clock_init(gt);
@@ -633,6 +634,7 @@ static int do_gt_restart(struct xe_gt *gt)
 
 	setup_private_ppat(gt);
 
+	xe_gt_mcr_set_implicit_defaults(gt);
 	xe_reg_sr_apply_mmio(&gt->reg_sr, gt);
 
 	err = xe_wopcm_init(&gt->uc.wopcm);
@@ -653,7 +655,8 @@ static int do_gt_restart(struct xe_gt *gt)
 
 	for_each_hw_engine(hwe, gt, id) {
 		xe_reg_sr_apply_mmio(&hwe->reg_sr, gt);
-		xe_reg_whitelist_apply(hwe);
+		xe_reg_sr_apply_whitelist(&hwe->reg_whitelist,
+					  hwe->mmio_base, gt);
 	}
 
 	return 0;
diff --git a/drivers/gpu/drm/xe/xe_gt_mcr.c b/drivers/gpu/drm/xe/xe_gt_mcr.c
index ddce2c41c7f51325b619310f13d6620c3fcee1ba..bb71071c3435fe0ffabe3189b6ff250717805736 100644
--- a/drivers/gpu/drm/xe/xe_gt_mcr.c
+++ b/drivers/gpu/drm/xe/xe_gt_mcr.c
@@ -23,12 +23,12 @@
  *
  * MMIO accesses to MCR registers are controlled according to the settings
  * programmed in the platform's MCR_SELECTOR register(s).  MMIO writes to MCR
- * registers can be done in either a (i.e., a single write updates all
+ * registers can be done in either multicast (a single write updates all
  * instances of the register to the same value) or unicast (a write updates only
- * one specific instance).  Reads of MCR registers always operate in a unicast
- * manner regardless of how the multicast/unicast bit is set in MCR_SELECTOR.
- * Selection of a specific MCR instance for unicast operations is referred to
- * as "steering."
+ * one specific instance) form.  Reads of MCR registers always operate in a
+ * unicast manner regardless of how the multicast/unicast bit is set in
+ * MCR_SELECTOR.  Selection of a specific MCR instance for unicast operations is
+ * referred to as "steering."
  *
  * If MCR register operations are steered toward a hardware unit that is
  * fused off or currently powered down due to power gating, the MMIO operation
@@ -155,15 +155,13 @@ static const struct xe_mmio_range xelpmp_oaddrm_steering_table[] = {
 	{},
 };
 
-/*
- * DG2 GAM registers are a special case; this table is checked directly in
- * xe_gt_mcr_get_nonterminated_steering and is not hooked up via
- * gt->steering[].
- */
-static const struct xe_mmio_range dg2_gam_ranges[] = {
-	{ 0x004000, 0x004AFF },
-	{ 0x00C800, 0x00CFFF },
-	{ 0x00F000, 0x00FFFF },
+static const struct xe_mmio_range dg2_implicit_steering_table[] = {
+	{ 0x000B00, 0x000BFF },		/* SF (SQIDI replication) */
+	{ 0x001000, 0x001FFF },		/* SF (SQIDI replication) */
+	{ 0x004000, 0x004AFF },		/* GAM (MSLICE replication) */
+	{ 0x008700, 0x0087FF },		/* MCFG (SQIDI replication) */
+	{ 0x00C800, 0x00CFFF },		/* GAM (MSLICE replication) */
+	{ 0x00F000, 0x00FFFF },		/* GAM (MSLICE replication) */
 	{},
 };
 
@@ -249,18 +247,20 @@ static const struct {
 	const char *name;
 	void (*init)(struct xe_gt *);
 } xe_steering_types[] = {
-	{ "L3BANK",	init_steering_l3bank },
-	{ "MSLICE",	init_steering_mslice },
-	{ "LNCF",	NULL },		/* initialized by mslice init */
-	{ "DSS",	init_steering_dss },
-	{ "OADDRM",	init_steering_oaddrm },
-	{ "INSTANCE 0",	init_steering_inst0 },
+	[L3BANK] =	{ "L3BANK",	init_steering_l3bank },
+	[MSLICE] =	{ "MSLICE",	init_steering_mslice },
+	[LNCF] =	{ "LNCF",	NULL }, /* initialized by mslice init */
+	[DSS] =		{ "DSS",	init_steering_dss },
+	[OADDRM] =	{ "OADDRM",	init_steering_oaddrm },
+	[INSTANCE0] =	{ "INSTANCE 0",	init_steering_inst0 },
+	[IMPLICIT_STEERING] = { "IMPLICIT", NULL },
 };
 
 void xe_gt_mcr_init(struct xe_gt *gt)
 {
 	struct xe_device *xe = gt_to_xe(gt);
 
+	BUILD_BUG_ON(IMPLICIT_STEERING + 1 != NUM_STEERING_TYPES);
 	BUILD_BUG_ON(ARRAY_SIZE(xe_steering_types) != NUM_STEERING_TYPES);
 
 	spin_lock_init(&gt->mcr_lock);
@@ -280,6 +280,7 @@ void xe_gt_mcr_init(struct xe_gt *gt)
 		gt->steering[MSLICE].ranges = xehp_mslice_steering_table;
 		gt->steering[LNCF].ranges = xehp_lncf_steering_table;
 		gt->steering[DSS].ranges = xehp_dss_steering_table;
+		gt->steering[IMPLICIT_STEERING].ranges = dg2_implicit_steering_table;
 	} else {
 		gt->steering[L3BANK].ranges = xelp_l3bank_steering_table;
 		gt->steering[DSS].ranges = xelp_dss_steering_table;
@@ -291,6 +292,33 @@ void xe_gt_mcr_init(struct xe_gt *gt)
 			xe_steering_types[i].init(gt);
 }
 
+/**
+ * xe_gt_mcr_set_implicit_defaults - Initialize steer control registers
+ * @gt: GT structure
+ *
+ * Some register ranges don't need to have their steering control registers
+ * changed on each access - it's sufficient to set them once on initialization.
+ * This function sets those registers for each platform *
+ */
+void xe_gt_mcr_set_implicit_defaults(struct xe_gt *gt)
+{
+	struct xe_device *xe = gt_to_xe(gt);
+
+	if (xe->info.platform == XE_DG2) {
+		u32 steer_val = REG_FIELD_PREP(GEN11_MCR_SLICE_MASK, 0) |
+			REG_FIELD_PREP(GEN11_MCR_SUBSLICE_MASK, 2);
+
+		xe_mmio_write32(gt, MCFG_MCR_SELECTOR.reg, steer_val);
+		xe_mmio_write32(gt, SF_MCR_SELECTOR.reg, steer_val);
+		/*
+		 * For GAM registers, all reads should be directed to instance 1
+		 * (unicast reads against other instances are not allowed),
+		 * and instance 1 is already the hardware's default steering
+		 * target, which we never change
+		 */
+	}
+}
+
 /*
  * xe_gt_mcr_get_nonterminated_steering - find group/instance values that
  *    will steer a register to a non-terminated instance
@@ -305,14 +333,15 @@ void xe_gt_mcr_init(struct xe_gt *gt)
  * steering.
  *
  * Returns true if the caller should steer to the @group/@instance values
- * returned.  Returns false if the caller need not perform any steering (i.e.,
- * the DG2 GAM range special case).
+ * returned.  Returns false if the caller need not perform any steering
  */
 static bool xe_gt_mcr_get_nonterminated_steering(struct xe_gt *gt,
 						 i915_mcr_reg_t reg,
 						 u8 *group, u8 *instance)
 {
-	for (int type = 0; type < NUM_STEERING_TYPES; type++) {
+	const struct xe_mmio_range *implicit_ranges;
+
+	for (int type = 0; type < IMPLICIT_STEERING; type++) {
 		if (!gt->steering[type].ranges)
 			continue;
 
@@ -325,27 +354,15 @@ static bool xe_gt_mcr_get_nonterminated_steering(struct xe_gt *gt,
 		}
 	}
 
-	/*
-	 * All MCR registers should usually be part of one of the steering
-	 * ranges we're tracking.  However there's one special case:  DG2
-	 * GAM registers are technically multicast registers, but are special
-	 * in a number of ways:
-	 *  - they have their own dedicated steering control register (they
-	 *    don't share 0xFDC with other MCR classes)
-	 *  - all reads should be directed to instance 1 (unicast reads against
-	 *    other instances are not allowed), and instance 1 is already the
-	 *    the hardware's default steering target, which we never change
-	 *
-	 * Ultimately this means that we can just treat them as if they were
-	 * unicast registers and all operations will work properly.
-	 */
-	for (int i = 0; dg2_gam_ranges[i].end > 0; i++)
-		if (xe_mmio_in_range(&dg2_gam_ranges[i], reg.reg))
-			return false;
+	implicit_ranges = gt->steering[IMPLICIT_STEERING].ranges;
+	if (implicit_ranges)
+		for (int i = 0; implicit_ranges[i].end > 0; i++)
+			if (xe_mmio_in_range(&implicit_ranges[i], reg.reg))
+				return false;
 
 	/*
-	 * Not found in a steering table and not a DG2 GAM register?  We'll
-	 * just steer to 0/0 as a guess and raise a warning.
+	 * Not found in a steering table and not a register with implicit
+	 * steering. Just steer to 0/0 as a guess and raise a warning.
 	 */
 	drm_WARN(&gt_to_xe(gt)->drm, true,
 		 "Did not find MCR register %#x in any MCR steering table\n",
@@ -467,7 +484,6 @@ u32 xe_gt_mcr_unicast_read_any(struct xe_gt *gt, i915_mcr_reg_t reg)
 					   group, instance, 0);
 		mcr_unlock(gt);
 	} else {
-		/* DG2 GAM special case rules; treat as if unicast */
 		val = xe_mmio_read32(gt, reg.reg);
 	}
 
diff --git a/drivers/gpu/drm/xe/xe_gt_mcr.h b/drivers/gpu/drm/xe/xe_gt_mcr.h
index 62ec6eb654a0b701e5c3caf869f8970e03944a81..c31987d2177c9cfd7684fdef07b845b6ffae0875 100644
--- a/drivers/gpu/drm/xe/xe_gt_mcr.h
+++ b/drivers/gpu/drm/xe/xe_gt_mcr.h
@@ -13,6 +13,8 @@ struct xe_gt;
 
 void xe_gt_mcr_init(struct xe_gt *gt);
 
+void xe_gt_mcr_set_implicit_defaults(struct xe_gt *gt);
+
 u32 xe_gt_mcr_unicast_read(struct xe_gt *gt, i915_mcr_reg_t reg,
 			   int group, int instance);
 u32 xe_gt_mcr_unicast_read_any(struct xe_gt *gt, i915_mcr_reg_t reg);
diff --git a/drivers/gpu/drm/xe/xe_gt_types.h b/drivers/gpu/drm/xe/xe_gt_types.h
index a40fab262ac9e40ffde334aa91b764f8e4384563..b01edd3fdc4d51df244af94b901ea68ddf076147 100644
--- a/drivers/gpu/drm/xe/xe_gt_types.h
+++ b/drivers/gpu/drm/xe/xe_gt_types.h
@@ -66,6 +66,13 @@ enum xe_steering_type {
 	 */
 	INSTANCE0,
 
+	/*
+	 * Register ranges that don't need special steering for each register:
+	 * it's sufficient to keep the HW-default for the selector, or only
+	 * change it once, on GT initialization. This needs to be the last
+	 * steering type.
+	 */
+	IMPLICIT_STEERING,
 	NUM_STEERING_TYPES
 };
 
diff --git a/drivers/gpu/drm/xe/xe_hw_engine.c b/drivers/gpu/drm/xe/xe_hw_engine.c
index 1536d7d024a28b81a35d14064c9ddbae24783871..fd89dd90131c9477316872273ce7c3394e7b0fca 100644
--- a/drivers/gpu/drm/xe/xe_hw_engine.c
+++ b/drivers/gpu/drm/xe/xe_hw_engine.c
@@ -385,7 +385,7 @@ static int hw_engine_init(struct xe_gt *gt, struct xe_hw_engine *hwe,
 	XE_BUG_ON(!(gt->info.engine_mask & BIT(id)));
 
 	xe_reg_sr_apply_mmio(&hwe->reg_sr, gt);
-	xe_reg_whitelist_apply(hwe);
+	xe_reg_sr_apply_whitelist(&hwe->reg_whitelist, hwe->mmio_base, gt);
 
 	hwe->hwsp = xe_bo_create_locked(xe, gt, NULL, SZ_4K, ttm_bo_type_kernel,
 					XE_BO_CREATE_VRAM_IF_DGFX(gt) |
diff --git a/drivers/gpu/drm/xe/xe_reg_sr.c b/drivers/gpu/drm/xe/xe_reg_sr.c
index 8a44d140fbff248832b70362068c4b9dd4373883..f7eceb84e647a36bf2298a56fef33366f9b18c27 100644
--- a/drivers/gpu/drm/xe/xe_reg_sr.c
+++ b/drivers/gpu/drm/xe/xe_reg_sr.c
@@ -12,13 +12,16 @@
 #include <drm/drm_print.h>
 #include <drm/drm_managed.h>
 
+#include "xe_rtp_types.h"
 #include "xe_device_types.h"
 #include "xe_force_wake.h"
 #include "xe_gt.h"
+#include "xe_gt_mcr.h"
 #include "xe_macros.h"
 #include "xe_mmio.h"
 
 #include "gt/intel_engine_regs.h"
+#include "gt/intel_gt_regs.h"
 
 #define XE_REG_SR_GROW_STEP_DEFAULT	16
 
@@ -59,7 +62,7 @@ int xe_reg_sr_dump_kv(struct xe_reg_sr *sr,
 
 	iter = *dst;
 	xa_for_each(&sr->xa, idx, entry) {
-		iter->k = _MMIO(idx);
+		iter->k = idx;
 		iter->v = *entry;
 		iter++;
 	}
@@ -100,13 +103,16 @@ static bool compatible_entries(const struct xe_reg_sr_entry *e1,
 	if (e1->masked_reg != e2->masked_reg)
 		return false;
 
+	if (e1->reg_type != e2->reg_type)
+		return false;
+
 	return true;
 }
 
-int xe_reg_sr_add(struct xe_reg_sr *sr, i915_reg_t reg,
+int xe_reg_sr_add(struct xe_reg_sr *sr, u32 reg,
 		  const struct xe_reg_sr_entry *e)
 {
-	unsigned long idx = i915_mmio_reg_offset(reg);
+	unsigned long idx = reg;
 	struct xe_reg_sr_entry *pentry = xa_load(&sr->xa, idx);
 	int ret;
 
@@ -161,7 +167,9 @@ static void apply_one_mmio(struct xe_gt *gt, u32 reg,
 	if (entry->masked_reg)
 		val = (entry->clr_bits ?: entry->set_bits << 16);
 	else if (entry->clr_bits + 1)
-		val = xe_mmio_read32(gt, reg) & (~entry->clr_bits);
+		val = (entry->reg_type == XE_RTP_REG_MCR ?
+		       xe_gt_mcr_unicast_read_any(gt, MCR_REG(reg)) :
+		       xe_mmio_read32(gt, reg)) & (~entry->clr_bits);
 	else
 		val = 0;
 
@@ -173,7 +181,11 @@ static void apply_one_mmio(struct xe_gt *gt, u32 reg,
 	val |= entry->set_bits;
 
 	drm_dbg(&xe->drm, "REG[0x%x] = 0x%08x", reg, val);
-	xe_mmio_write32(gt, reg, val);
+
+	if (entry->reg_type == XE_RTP_REG_MCR)
+		xe_gt_mcr_multicast_write(gt, MCR_REG(reg), val);
+	else
+		xe_mmio_write32(gt, reg, val);
 }
 
 void xe_reg_sr_apply_mmio(struct xe_reg_sr *sr, struct xe_gt *gt)
@@ -200,3 +212,38 @@ void xe_reg_sr_apply_mmio(struct xe_reg_sr *sr, struct xe_gt *gt)
 err_force_wake:
 	drm_err(&xe->drm, "Failed to apply, err=%d\n", err);
 }
+
+void xe_reg_sr_apply_whitelist(struct xe_reg_sr *sr, u32 mmio_base,
+			       struct xe_gt *gt)
+{
+	struct xe_device *xe = gt_to_xe(gt);
+	struct xe_reg_sr_entry *entry;
+	unsigned long reg;
+	unsigned int slot = 0;
+	int err;
+
+	drm_dbg(&xe->drm, "Whitelisting %s registers\n", sr->name);
+
+	err = xe_force_wake_get(&gt->mmio.fw, XE_FORCEWAKE_ALL);
+	if (err)
+		goto err_force_wake;
+
+	xa_for_each(&sr->xa, reg, entry) {
+		xe_mmio_write32(gt, RING_FORCE_TO_NONPRIV(mmio_base, slot).reg,
+				reg | entry->set_bits);
+		slot++;
+	}
+
+	/* And clear the rest just in case of garbage */
+	for (; slot < RING_MAX_NONPRIV_SLOTS; slot++)
+		xe_mmio_write32(gt, RING_FORCE_TO_NONPRIV(mmio_base, slot).reg,
+				RING_NOPID(mmio_base).reg);
+
+	err = xe_force_wake_put(&gt->mmio.fw, XE_FORCEWAKE_ALL);
+	XE_WARN_ON(err);
+
+	return;
+
+err_force_wake:
+	drm_err(&xe->drm, "Failed to apply, err=%d\n", err);
+}
diff --git a/drivers/gpu/drm/xe/xe_reg_sr.h b/drivers/gpu/drm/xe/xe_reg_sr.h
index 0a15fc4e088ff3b8406c73dbf4d4306d200b3912..c3a9db251e925109bf95ac0e60f6ebfd8211be1f 100644
--- a/drivers/gpu/drm/xe/xe_reg_sr.h
+++ b/drivers/gpu/drm/xe/xe_reg_sr.h
@@ -6,7 +6,6 @@
 #ifndef _XE_REG_SR_
 #define _XE_REG_SR_
 
-#include "i915_reg_defs.h"
 #include "xe_reg_sr_types.h"
 
 /*
@@ -20,8 +19,10 @@ int xe_reg_sr_init(struct xe_reg_sr *sr, const char *name, struct xe_device *xe)
 int xe_reg_sr_dump_kv(struct xe_reg_sr *sr,
 		      struct xe_reg_sr_kv **dst);
 
-int xe_reg_sr_add(struct xe_reg_sr *sr, i915_reg_t reg,
+int xe_reg_sr_add(struct xe_reg_sr *sr, u32 reg,
 		  const struct xe_reg_sr_entry *e);
 void xe_reg_sr_apply_mmio(struct xe_reg_sr *sr, struct xe_gt *gt);
+void xe_reg_sr_apply_whitelist(struct xe_reg_sr *sr, u32 mmio_base,
+			       struct xe_gt *gt);
 
 #endif
diff --git a/drivers/gpu/drm/xe/xe_reg_sr_types.h b/drivers/gpu/drm/xe/xe_reg_sr_types.h
index 6c927d350ff646ad38724d7cf98f5444f44b9a78..2fa7ff3966ba778015f95e378dcf59956acbbb21 100644
--- a/drivers/gpu/drm/xe/xe_reg_sr_types.h
+++ b/drivers/gpu/drm/xe/xe_reg_sr_types.h
@@ -21,11 +21,12 @@ struct xe_reg_sr_entry {
 	 * bits as a mask for the bits that is being updated on the lower 16
 	 * bits when writing to it.
 	 */
-	bool		masked_reg;
+	u8		masked_reg;
+	u8		reg_type;
 };
 
 struct xe_reg_sr_kv {
-	i915_reg_t		k;
+	u32			k;
 	struct xe_reg_sr_entry	v;
 };
 
diff --git a/drivers/gpu/drm/xe/xe_reg_whitelist.c b/drivers/gpu/drm/xe/xe_reg_whitelist.c
new file mode 100644
index 0000000000000000000000000000000000000000..a34617a642ec67b453f65594535470391f296e2a
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_reg_whitelist.c
@@ -0,0 +1,70 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+
+#include "xe_reg_whitelist.h"
+
+#include "xe_platform_types.h"
+#include "xe_gt_types.h"
+#include "xe_rtp.h"
+
+#include "../i915/gt/intel_engine_regs.h"
+#include "../i915/gt/intel_gt_regs.h"
+
+#undef _MMIO
+#undef MCR_REG
+#define _MMIO(x)	_XE_RTP_REG(x)
+#define MCR_REG(x)	_XE_RTP_MCR_REG(x)
+
+static bool match_not_render(const struct xe_gt *gt,
+			     const struct xe_hw_engine *hwe)
+{
+	return hwe->class != XE_ENGINE_CLASS_RENDER;
+}
+
+static const struct xe_rtp_entry register_whitelist[] = {
+	{ XE_RTP_NAME("WaAllowPMDepthAndInvocationCountAccessFromUMD, 1408556865"),
+	  XE_RTP_RULES(GRAPHICS_VERSION_RANGE(1200, 1210), ENGINE_CLASS(RENDER)),
+	  XE_RTP_ACTIONS(WHITELIST(PS_INVOCATION_COUNT,
+				   RING_FORCE_TO_NONPRIV_ACCESS_RD |
+				   RING_FORCE_TO_NONPRIV_RANGE_4))
+	},
+	{ XE_RTP_NAME("1508744258, 14012131227, 1808121037"),
+	  XE_RTP_RULES(GRAPHICS_VERSION_RANGE(1200, 1210), ENGINE_CLASS(RENDER)),
+	  XE_RTP_ACTIONS(WHITELIST(GEN7_COMMON_SLICE_CHICKEN1, 0))
+	},
+	{ XE_RTP_NAME("1806527549"),
+	  XE_RTP_RULES(GRAPHICS_VERSION_RANGE(1200, 1210), ENGINE_CLASS(RENDER)),
+	  XE_RTP_ACTIONS(WHITELIST(HIZ_CHICKEN, 0))
+	},
+	{ XE_RTP_NAME("allow_read_ctx_timestamp"),
+	  XE_RTP_RULES(GRAPHICS_VERSION_RANGE(1200, 1260), FUNC(match_not_render)),
+	  XE_RTP_ACTIONS(WHITELIST(RING_CTX_TIMESTAMP(0),
+				RING_FORCE_TO_NONPRIV_ACCESS_RD,
+				XE_RTP_ACTION_FLAG(ENGINE_BASE)))
+	},
+	{ XE_RTP_NAME("16014440446"),
+	  XE_RTP_RULES(PLATFORM(PVC)),
+	  XE_RTP_ACTIONS(WHITELIST(_MMIO(0x4400),
+				   RING_FORCE_TO_NONPRIV_DENY |
+				   RING_FORCE_TO_NONPRIV_RANGE_64),
+			 WHITELIST(_MMIO(0x4500),
+				   RING_FORCE_TO_NONPRIV_DENY |
+				   RING_FORCE_TO_NONPRIV_RANGE_64))
+	},
+	{}
+};
+
+/**
+ * xe_reg_whitelist_process_engine - process table of registers to whitelist
+ * @hwe: engine instance to process whitelist for
+ *
+ * Process wwhitelist table for this platform, saving in @hwe all the
+ * registers that need to be whitelisted by the hardware so they can be accessed
+ * by userspace.
+ */
+void xe_reg_whitelist_process_engine(struct xe_hw_engine *hwe)
+{
+	xe_rtp_process(register_whitelist, &hwe->reg_whitelist, hwe->gt, hwe);
+}
diff --git a/drivers/gpu/drm/xe/xe_reg_whitelist.h b/drivers/gpu/drm/xe/xe_reg_whitelist.h
new file mode 100644
index 0000000000000000000000000000000000000000..6e861b1bdb01ea189231de2b7a479100727afe97
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_reg_whitelist.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+
+#ifndef _XE_REG_WHITELIST_
+#define _XE_REG_WHITELIST_
+
+struct xe_hw_engine;
+
+void xe_reg_whitelist_process_engine(struct xe_hw_engine *hwe);
+
+#endif
diff --git a/drivers/gpu/drm/xe/xe_rtp.c b/drivers/gpu/drm/xe/xe_rtp.c
index 934b2329c1e174840287856eb945a25bef1a37b4..5b1316b588d8117d4582cfcdd3faeb7fe8230857 100644
--- a/drivers/gpu/drm/xe/xe_rtp.c
+++ b/drivers/gpu/drm/xe/xe_rtp.c
@@ -86,22 +86,44 @@ static bool rule_matches(struct xe_gt *gt,
 	return true;
 }
 
-static void rtp_add_sr_entry(const struct xe_rtp_entry *entry,
+static void rtp_add_sr_entry(const struct xe_rtp_action *action,
 			     struct xe_gt *gt,
 			     u32 mmio_base,
 			     struct xe_reg_sr *sr)
 {
-	i915_reg_t reg = _MMIO(entry->regval.reg.reg + mmio_base);
+	u32 reg = action->reg + mmio_base;
 	struct xe_reg_sr_entry sr_entry = {
-		.clr_bits = entry->regval.clr_bits,
-		.set_bits = entry->regval.set_bits,
-		.read_mask = entry->regval.read_mask,
-		.masked_reg = entry->regval.flags & XE_RTP_FLAG_MASKED_REG,
+		.clr_bits = action->clr_bits,
+		.set_bits = action->set_bits,
+		.read_mask = action->read_mask,
+		.masked_reg = action->flags & XE_RTP_ACTION_FLAG_MASKED_REG,
+		.reg_type = action->reg_type,
 	};
 
 	xe_reg_sr_add(sr, reg, &sr_entry);
 }
 
+static void rtp_process_one(const struct xe_rtp_entry *entry, struct xe_gt *gt,
+			    struct xe_hw_engine *hwe, struct xe_reg_sr *sr)
+{
+	const struct xe_rtp_action *action;
+	u32 mmio_base;
+	unsigned int i;
+
+	if (!rule_matches(gt, hwe, entry))
+		return;
+
+	for (action = &entry->actions[0]; i < entry->n_actions; action++, i++) {
+		if ((entry->flags & XE_RTP_ENTRY_FLAG_FOREACH_ENGINE) ||
+		    (action->flags & XE_RTP_ACTION_FLAG_ENGINE_BASE))
+			mmio_base = hwe->mmio_base;
+		else
+			mmio_base = 0;
+
+		rtp_add_sr_entry(action, gt, mmio_base, sr);
+	}
+}
+
 /**
  * xe_rtp_process - Process all rtp @entries, adding the matching ones to @sr
  * @entries: Table with RTP definitions
@@ -121,23 +143,14 @@ void xe_rtp_process(const struct xe_rtp_entry *entries, struct xe_reg_sr *sr,
 	const struct xe_rtp_entry *entry;
 
 	for (entry = entries; entry && entry->name; entry++) {
-		u32 mmio_base = 0;
-
-		if (entry->regval.flags & XE_RTP_FLAG_FOREACH_ENGINE) {
+		if (entry->flags & XE_RTP_ENTRY_FLAG_FOREACH_ENGINE) {
 			struct xe_hw_engine *each_hwe;
 			enum xe_hw_engine_id id;
 
-			for_each_hw_engine(each_hwe, gt, id) {
-				mmio_base = each_hwe->mmio_base;
-
-				if (rule_matches(gt, each_hwe, entry))
-					rtp_add_sr_entry(entry, gt, mmio_base, sr);
-			}
-		} else if (rule_matches(gt, hwe, entry)) {
-			if (entry->regval.flags & XE_RTP_FLAG_ENGINE_BASE)
-				mmio_base = hwe->mmio_base;
-
-			rtp_add_sr_entry(entry, gt, mmio_base, sr);
+			for_each_hw_engine(each_hwe, gt, id)
+				rtp_process_one(entry, gt, each_hwe, sr);
+		} else {
+			rtp_process_one(entry, gt, hwe, sr);
 		}
 	}
 }
diff --git a/drivers/gpu/drm/xe/xe_rtp.h b/drivers/gpu/drm/xe/xe_rtp.h
index 2cab0849b17407adb915013a40c47844597bd965..1ac3fd1c0734d51f0ab3be2973f2a4d3ecf861ff 100644
--- a/drivers/gpu/drm/xe/xe_rtp.h
+++ b/drivers/gpu/drm/xe/xe_rtp.h
@@ -44,12 +44,17 @@ struct xe_reg_sr;
 #define CALL_FOR_EACH(MACRO_, x, ...)						\
 	_CALL_FOR_EACH(COUNT_ARGS(x, ##__VA_ARGS__), MACRO_, x, ##__VA_ARGS__)
 
+#define _XE_RTP_REG(x_)	(x_), XE_RTP_REG_REGULAR
+#define _XE_RTP_MCR_REG(x_) (x_), XE_RTP_REG_MCR
+
 /*
  * Helper macros for concatenating prefix - do not use them directly outside
  * this header
  */
-#define __ADD_XE_RTP_FLAG_PREFIX(x) CONCATENATE(XE_RTP_FLAG_, x) |
+#define __ADD_XE_RTP_ENTRY_FLAG_PREFIX(x) CONCATENATE(XE_RTP_ENTRY_FLAG_, x) |
+#define __ADD_XE_RTP_ACTION_FLAG_PREFIX(x) CONCATENATE(XE_RTP_ACTION_FLAG_, x) |
 #define __ADD_XE_RTP_RULE_PREFIX(x) CONCATENATE(XE_RTP_RULE_, x) ,
+#define __ADD_XE_RTP_ACTION_PREFIX(x) CONCATENATE(XE_RTP_ACTION_, x) ,
 
 /*
  * Macros to encode rules to match against platform, IP version, stepping, etc.
@@ -191,24 +196,28 @@ struct xe_reg_sr;
 	{ .match_type = XE_RTP_MATCH_DISCRETE }
 
 /**
- * XE_RTP_WR - Helper to write a value to the register, overriding all the bits
+ * XE_RTP_ACTION_WR - Helper to write a value to the register, overriding all
+ *                    the bits
  * @reg_: Register
+ * @reg_type_: Register type - automatically expanded by MCR_REG/_MMIO
  * @val_: Value to set
- * @...: Additional fields to override in the struct xe_rtp_regval entry
+ * @...: Additional fields to override in the struct xe_rtp_action entry
  *
  * The correspondent notation in bspec is:
  *
  *	REGNAME = VALUE
  */
-#define XE_RTP_WR(reg_, val_, ...)						\
-	.regval = { .reg = (reg_), .clr_bits = ~0u, .set_bits = (val_),		\
-		    .read_mask = (~0u), ##__VA_ARGS__ }
+#define XE_RTP_ACTION_WR(reg_, reg_type_, val_, ...)				\
+	{ .reg = (reg_), .reg_type = (reg_type_),				\
+	  .clr_bits = ~0u, .set_bits = (val_),					\
+	  .read_mask = (~0u), ##__VA_ARGS__ }
 
 /**
- * XE_RTP_SET - Set bits from @val_ in the register.
+ * XE_RTP_ACTION_SET - Set bits from @val_ in the register.
  * @reg_: Register
+ * @reg_type_: Register type - automatically expanded by MCR_REG/_MMIO
  * @val_: Bits to set in the register
- * @...: Additional fields to override in the struct xe_rtp_regval entry
+ * @...: Additional fields to override in the struct xe_rtp_action entry
  *
  * For masked registers this translates to a single write, while for other
  * registers it's a RMW. The correspondent bspec notation is (example for bits 2
@@ -217,15 +226,17 @@ struct xe_reg_sr;
  *	REGNAME[2] = 1
  *	REGNAME[5] = 1
  */
-#define XE_RTP_SET(reg_, val_, ...)						\
-	.regval = { .reg = (reg_), .clr_bits = (val_), .set_bits = (val_),	\
-		    .read_mask = (val_), ##__VA_ARGS__ }
+#define XE_RTP_ACTION_SET(reg_, reg_type_, val_, ...)				\
+	{ .reg = (reg_), .reg_type = (reg_type_),				\
+	  .clr_bits = (val_), .set_bits = (val_),				\
+	  .read_mask = (val_), ##__VA_ARGS__ }
 
 /**
- * XE_RTP_CLR: Clear bits from @val_ in the register.
+ * XE_RTP_ACTION_CLR: Clear bits from @val_ in the register.
  * @reg_: Register
+ * @reg_type_: Register type - automatically expanded by MCR_REG/_MMIO
  * @val_: Bits to clear in the register
- * @...: Additional fields to override in the struct xe_rtp_regval entry
+ * @...: Additional fields to override in the struct xe_rtp_action entry
  *
  * For masked registers this translates to a single write, while for other
  * registers it's a RMW. The correspondent bspec notation is (example for bits 2
@@ -234,44 +245,49 @@ struct xe_reg_sr;
  *	REGNAME[2] = 0
  *	REGNAME[5] = 0
  */
-#define XE_RTP_CLR(reg_, val_, ...)						\
-	.regval = { .reg = (reg_), .clr_bits = (val_), .set_bits = 0,		\
-		    .read_mask = (val_), ##__VA_ARGS__ }
+#define XE_RTP_ACTION_CLR(reg_, reg_type_, val_, ...)				\
+	{ .reg = (reg_), .reg_type = (reg_type_),				\
+	  .clr_bits = (val_), .set_bits = 0,					\
+	  .read_mask = (val_), ##__VA_ARGS__ }
 
 /**
- * XE_RTP_FIELD_SET: Set a bit range, defined by @mask_bits_, to the value in
+ * XE_RTP_ACTION_FIELD_SET: Set a bit range
  * @reg_: Register
+ * @reg_type_: Register type - automatically expanded by MCR_REG/_MMIO
  * @mask_bits_: Mask of bits to be changed in the register, forming a field
  * @val_: Value to set in the field denoted by @mask_bits_
- * @...: Additional fields to override in the struct xe_rtp_regval entry
+ * @...: Additional fields to override in the struct xe_rtp_action entry
  *
  * For masked registers this translates to a single write, while for other
  * registers it's a RMW. The correspondent bspec notation is:
  *
  *	REGNAME[<end>:<start>] = VALUE
  */
-#define XE_RTP_FIELD_SET(reg_, mask_bits_, val_, ...)				\
-	.regval = { .reg = (reg_), .clr_bits = (mask_bits_), .set_bits = (val_),\
-		    .read_mask = (mask_bits_), ##__VA_ARGS__ }
+#define XE_RTP_ACTION_FIELD_SET(reg_, reg_type_, mask_bits_, val_, ...)		\
+	{ .reg = (reg_), .reg_type = (reg_type_),				\
+	  .clr_bits = (mask_bits_), .set_bits = (val_),				\
+	  .read_mask = (mask_bits_), ##__VA_ARGS__ }
 
-#define XE_RTP_FIELD_SET_NO_READ_MASK(reg_, mask_bits_, val_, ...)		\
-	.regval = { .reg = (reg_), .clr_bits = (mask_bits_), .set_bits = (val_),\
-		    .read_mask = 0, ##__VA_ARGS__ }
+#define XE_RTP_ACTION_FIELD_SET_NO_READ_MASK(reg_, reg_type_, mask_bits_, val_, ...)	\
+	{ .reg = (reg_), .reg_type = (reg_type_),				\
+	  .clr_bits = (mask_bits_), .set_bits = (val_),				\
+	  .read_mask = 0, ##__VA_ARGS__ }
 
 /**
- * XE_WHITELIST_REGISTER - Add register to userspace whitelist
+ * XE_RTP_ACTION_WHITELIST - Add register to userspace whitelist
  * @reg_: Register
- * @flags_: Whitelist-specific flags to set
- * @...: Additional fields to override in the struct xe_rtp_regval entry
+ * @reg_type_: Register type - automatically expanded by MCR_REG/_MMIO
+ * @val_: Whitelist-specific flags to set
+ * @...: Additional fields to override in the struct xe_rtp_action entry
  *
  * Add a register to the whitelist, allowing userspace to modify the ster with
  * regular user privileges.
  */
-#define XE_WHITELIST_REGISTER(reg_, flags_, ...)				\
+#define XE_RTP_ACTION_WHITELIST(reg_, reg_type_, val_, ...)			\
 	/* TODO fail build if ((flags) & ~(RING_FORCE_TO_NONPRIV_MASK_VALID)) */\
-	.regval = { .reg = (reg_), .set_bits = (flags_),			\
-		    .clr_bits = RING_FORCE_TO_NONPRIV_MASK_VALID,		\
-		    ##__VA_ARGS__ }
+	{ .reg = (reg_), .reg_type = (reg_type_), .set_bits = (val_),		\
+	  .clr_bits = RING_FORCE_TO_NONPRIV_MASK_VALID,				\
+	  ##__VA_ARGS__ }
 
 /**
  * XE_RTP_NAME - Helper to set the name in xe_rtp_entry
@@ -282,26 +298,50 @@ struct xe_reg_sr;
 #define XE_RTP_NAME(s_)	.name = (s_)
 
 /**
- * XE_RTP_FLAG - Helper to add multiple flags to a struct xe_rtp_regval entry
- * @f1_: Last part of a ``XE_RTP_FLAG_*``
+ * XE_RTP_ENTRY_FLAG - Helper to add multiple flags to a struct xe_rtp_entry
+ * @f1_: Last part of a ``XE_RTP_ENTRY_FLAG_*``
  * @...: Additional flags, defined like @f1_
  *
- * Helper to automatically add a ``XE_RTP_FLAG_`` prefix to @f1_ so it can be
- * easily used to define struct xe_rtp_regval entries. Example:
+ * Helper to automatically add a ``XE_RTP_ENTRY_FLAG_`` prefix to @f1_ so it can
+ * be easily used to define struct xe_rtp_action entries. Example:
  *
  * .. code-block:: c
  *
  *	const struct xe_rtp_entry wa_entries[] = {
  *		...
  *		{ XE_RTP_NAME("test-entry"),
- *		  XE_RTP_FLAG(FOREACH_ENGINE, MASKED_REG),
+ *		  ...
+ *		  XE_RTP_ENTRY_FLAG(FOREACH_ENGINE),
  *		  ...
  *		},
  *		...
  *	};
  */
-#define XE_RTP_FLAG(f1_, ...)							\
-	.flags = (CALL_FOR_EACH(__ADD_XE_RTP_FLAG_PREFIX, f1_, ##__VA_ARGS__) 0)
+#define XE_RTP_ENTRY_FLAG(f1_, ...)						\
+	.flags = (CALL_FOR_EACH(__ADD_XE_RTP_ENTRY_FLAG_PREFIX, f1_, ##__VA_ARGS__) 0)
+
+/**
+ * XE_RTP_ACTION_FLAG - Helper to add multiple flags to a struct xe_rtp_action
+ * @f1_: Last part of a ``XE_RTP_ENTRY_*``
+ * @...: Additional flags, defined like @f1_
+ *
+ * Helper to automatically add a ``XE_RTP_ACTION_FLAG_`` prefix to @f1_ so it
+ * can be easily used to define struct xe_rtp_action entries. Example:
+ *
+ * .. code-block:: c
+ *
+ *	const struct xe_rtp_entry wa_entries[] = {
+ *		...
+ *		{ XE_RTP_NAME("test-entry"),
+ *		  ...
+ *		  XE_RTP_ACTION_SET(..., XE_RTP_ACTION_FLAG(FOREACH_ENGINE)),
+ *		  ...
+ *		},
+ *		...
+ *	};
+ */
+#define XE_RTP_ACTION_FLAG(f1_, ...)						\
+	.flags = (CALL_FOR_EACH(__ADD_XE_RTP_ACTION_FLAG_PREFIX, f1_, ##__VA_ARGS__) 0)
 
 /**
  * XE_RTP_RULES - Helper to set multiple rules to a struct xe_rtp_entry entry
@@ -329,6 +369,33 @@ struct xe_reg_sr;
 		CALL_FOR_EACH(__ADD_XE_RTP_RULE_PREFIX, r1, ##__VA_ARGS__)	\
 	}
 
+/**
+ * XE_RTP_ACTIONS - Helper to set multiple actions to a struct xe_rtp_entry
+ * @a1: Action to take. Last part of XE_RTP_ACTION_*
+ * @...: Additional rules, defined like @r1
+ *
+ * At least one rule is needed and up to 4 are supported. Multiple rules are
+ * AND'ed together, i.e. all the rules must evaluate to true for the entry to
+ * be processed. See XE_RTP_MATCH_* for the possible match rules. Example:
+ *
+ * .. code-block:: c
+ *
+ *	const struct xe_rtp_entry wa_entries[] = {
+ *		...
+ *		{ XE_RTP_NAME("test-entry"),
+ *		  XE_RTP_RULES(...),
+ *		  XE_RTP_ACTIONS(SET(..), SET(...), CLR(...)),
+ *		  ...
+ *		},
+ *		...
+ *	};
+ */
+#define XE_RTP_ACTIONS(a1, ...)							\
+	.n_actions = COUNT_ARGS(a1, ##__VA_ARGS__),				\
+	.actions = (struct xe_rtp_action[]) {					\
+		CALL_FOR_EACH(__ADD_XE_RTP_ACTION_PREFIX, a1, ##__VA_ARGS__)	\
+	}
+
 void xe_rtp_process(const struct xe_rtp_entry *entries, struct xe_reg_sr *sr,
 		    struct xe_gt *gt, struct xe_hw_engine *hwe);
 
diff --git a/drivers/gpu/drm/xe/xe_rtp_types.h b/drivers/gpu/drm/xe/xe_rtp_types.h
index 14e5d4dbb4c15474d16a2c872491e8fbea9b9a0e..fac0bd6d5b1e01f328a788301c36cd42b89da210 100644
--- a/drivers/gpu/drm/xe/xe_rtp_types.h
+++ b/drivers/gpu/drm/xe/xe_rtp_types.h
@@ -13,16 +13,20 @@
 struct xe_hw_engine;
 struct xe_gt;
 
+enum {
+	XE_RTP_REG_REGULAR,
+	XE_RTP_REG_MCR,
+};
+
 /**
- * struct xe_rtp_regval - register and value for rtp table
+ * struct xe_rtp_action - action to take for any matching rule
+ *
+ * This struct records what action should be taken in a register that has a
+ * matching rule. Example of actions: set/clear bits.
  */
-struct xe_rtp_regval {
+struct xe_rtp_action {
 	/** @reg: Register */
-	i915_reg_t	reg;
-	/*
-	 * TODO: maybe we need a union here with a func pointer for cases
-	 * that are too specific to be generalized
-	 */
+	u32		reg;
 	/** @clr_bits: bits to clear when updating register */
 	u32		clr_bits;
 	/** @set_bits: bits to set when updating register */
@@ -30,11 +34,12 @@ struct xe_rtp_regval {
 #define XE_RTP_NOCHECK		.read_mask = 0
 	/** @read_mask: mask for bits to consider when reading value back */
 	u32		read_mask;
-#define XE_RTP_FLAG_FOREACH_ENGINE	BIT(0)
-#define XE_RTP_FLAG_MASKED_REG		BIT(1)
-#define XE_RTP_FLAG_ENGINE_BASE		BIT(2)
+#define XE_RTP_ACTION_FLAG_MASKED_REG		BIT(0)
+#define XE_RTP_ACTION_FLAG_ENGINE_BASE		BIT(1)
 	/** @flags: flags to apply on rule evaluation or action */
 	u8		flags;
+	/** @reg_type: register type, see ``XE_RTP_REG_*`` */
+	u8		reg_type;
 };
 
 enum {
@@ -90,9 +95,12 @@ struct xe_rtp_rule {
 /** struct xe_rtp_entry - Entry in an rtp table */
 struct xe_rtp_entry {
 	const char *name;
-	const struct xe_rtp_regval regval;
+	const struct xe_rtp_action *actions;
 	const struct xe_rtp_rule *rules;
-	unsigned int n_rules;
+	u8 n_rules;
+	u8 n_actions;
+#define XE_RTP_ENTRY_FLAG_FOREACH_ENGINE	BIT(0)
+	u8 flags;
 };
 
 #endif
diff --git a/drivers/gpu/drm/xe/xe_tuning.c b/drivers/gpu/drm/xe/xe_tuning.c
index 3127f59481f714ed323fb605bc46d5f374a60331..3cc32e3e7a90fd9673c28ac601830819dfe1dace 100644
--- a/drivers/gpu/drm/xe/xe_tuning.c
+++ b/drivers/gpu/drm/xe/xe_tuning.c
@@ -3,7 +3,7 @@
  * Copyright © 2022 Intel Corporation
  */
 
-#include "xe_wa.h"
+#include "xe_tuning.h"
 
 #include "xe_platform_types.h"
 #include "xe_gt_types.h"
@@ -11,10 +11,15 @@
 
 #include "gt/intel_gt_regs.h"
 
+#undef _MMIO
+#undef MCR_REG
+#define _MMIO(x)	_XE_RTP_REG(x)
+#define MCR_REG(x)	_XE_RTP_MCR_REG(x)
+
 static const struct xe_rtp_entry gt_tunings[] = {
 	{ XE_RTP_NAME("Tuning: 32B Access Enable"),
 	  XE_RTP_RULES(PLATFORM(DG2)),
-	  XE_RTP_SET(XEHP_SQCM, EN_32B_ACCESS)
+	  XE_RTP_ACTIONS(SET(XEHP_SQCM, EN_32B_ACCESS))
 	},
 	{}
 };
@@ -22,8 +27,9 @@ static const struct xe_rtp_entry gt_tunings[] = {
 static const struct xe_rtp_entry context_tunings[] = {
 	{ XE_RTP_NAME("1604555607"),
 	  XE_RTP_RULES(GRAPHICS_VERSION(1200)),
-	  XE_RTP_FIELD_SET_NO_READ_MASK(XEHP_FF_MODE2, FF_MODE2_TDS_TIMER_MASK,
-					FF_MODE2_TDS_TIMER_128)
+	  XE_RTP_ACTIONS(FIELD_SET_NO_READ_MASK(XEHP_FF_MODE2,
+						FF_MODE2_TDS_TIMER_MASK,
+						FF_MODE2_TDS_TIMER_128))
 	},
 	{}
 };
diff --git a/drivers/gpu/drm/xe/xe_wa.c b/drivers/gpu/drm/xe/xe_wa.c
index 94097849cbd1e50a9f261409fc04e4d0445d71d0..9d2e4555091c72575b7fbddbc63515ee3cfc1512 100644
--- a/drivers/gpu/drm/xe/xe_wa.c
+++ b/drivers/gpu/drm/xe/xe_wa.c
@@ -35,9 +35,6 @@
  *   creation to have a "primed golden context", i.e. a context image that
  *   already contains the changes needed to all the registers.
  *
- *   TODO: Although these workarounds are maintained here, they are not
- *   currently being applied.
- *
  * - Engine workarounds: the list of these WAs is applied whenever the specific
  *   engine is reset. It's also possible that a set of engine classes share a
  *   common power domain and they are reset together. This happens on some
@@ -90,103 +87,99 @@
  *    a more declarative approach rather than procedural.
  */
 
+#undef _MMIO
+#undef MCR_REG
+#define _MMIO(x)	_XE_RTP_REG(x)
+#define MCR_REG(x)	_XE_RTP_MCR_REG(x)
+
 static bool match_14011060649(const struct xe_gt *gt,
 			      const struct xe_hw_engine *hwe)
 {
 	return hwe->instance % 2 == 0;
 }
 
-static bool match_not_render(const struct xe_gt *gt,
-			     const struct xe_hw_engine *hwe)
-{
-	return hwe->class != XE_ENGINE_CLASS_RENDER;
-}
-
 static const struct xe_rtp_entry gt_was[] = {
 	{ XE_RTP_NAME("14011060649"),
 	  XE_RTP_RULES(MEDIA_VERSION_RANGE(1200, 1255),
 		       ENGINE_CLASS(VIDEO_DECODE),
 		       FUNC(match_14011060649)),
-	  XE_RTP_SET(VDBOX_CGCTL3F10(0), IECPUNIT_CLKGATE_DIS,
-		     XE_RTP_FLAG(FOREACH_ENGINE))
+	  XE_RTP_ACTIONS(SET(VDBOX_CGCTL3F10(0), IECPUNIT_CLKGATE_DIS)),
+	  XE_RTP_ENTRY_FLAG(FOREACH_ENGINE),
 	},
 	{ XE_RTP_NAME("16010515920"),
 	  XE_RTP_RULES(SUBPLATFORM(DG2, G10),
 		       STEP(A0, B0),
 		       ENGINE_CLASS(VIDEO_DECODE)),
-	  XE_RTP_SET(VDBOX_CGCTL3F18(0), ALNUNIT_CLKGATE_DIS,
-		     XE_RTP_FLAG(FOREACH_ENGINE))
+	  XE_RTP_ACTIONS(SET(VDBOX_CGCTL3F18(0), ALNUNIT_CLKGATE_DIS)),
+	  XE_RTP_ENTRY_FLAG(FOREACH_ENGINE),
 	},
 	{ XE_RTP_NAME("22010523718"),
 	  XE_RTP_RULES(SUBPLATFORM(DG2, G10)),
-	  XE_RTP_SET(UNSLICE_UNIT_LEVEL_CLKGATE, CG3DDISCFEG_CLKGATE_DIS)
+	  XE_RTP_ACTIONS(SET(UNSLICE_UNIT_LEVEL_CLKGATE, CG3DDISCFEG_CLKGATE_DIS))
 	},
 	{ XE_RTP_NAME("14011006942"),
 	  XE_RTP_RULES(SUBPLATFORM(DG2, G10)),
-	  XE_RTP_SET(GEN11_SUBSLICE_UNIT_LEVEL_CLKGATE, DSS_ROUTER_CLKGATE_DIS)
+	  XE_RTP_ACTIONS(SET(GEN11_SUBSLICE_UNIT_LEVEL_CLKGATE, DSS_ROUTER_CLKGATE_DIS))
 	},
 	{ XE_RTP_NAME("14010948348"),
 	  XE_RTP_RULES(SUBPLATFORM(DG2, G10), STEP(A0, B0)),
-	  XE_RTP_SET(UNSLCGCTL9430, MSQDUNIT_CLKGATE_DIS)
+	  XE_RTP_ACTIONS(SET(UNSLCGCTL9430, MSQDUNIT_CLKGATE_DIS))
 	},
 	{ XE_RTP_NAME("14011037102"),
 	  XE_RTP_RULES(SUBPLATFORM(DG2, G10), STEP(A0, B0)),
-	  XE_RTP_SET(UNSLCGCTL9444, LTCDD_CLKGATE_DIS)
+	  XE_RTP_ACTIONS(SET(UNSLCGCTL9444, LTCDD_CLKGATE_DIS))
 	},
 	{ XE_RTP_NAME("14011371254"),
 	  XE_RTP_RULES(SUBPLATFORM(DG2, G10), STEP(A0, B0)),
-	  XE_RTP_SET(GEN11_SLICE_UNIT_LEVEL_CLKGATE, NODEDSS_CLKGATE_DIS)
+	  XE_RTP_ACTIONS(SET(GEN11_SLICE_UNIT_LEVEL_CLKGATE, NODEDSS_CLKGATE_DIS))
 	},
-	{ XE_RTP_NAME("14011431319/0"),
+	{ XE_RTP_NAME("14011431319"),
 	  XE_RTP_RULES(SUBPLATFORM(DG2, G10), STEP(A0, B0)),
-	  XE_RTP_SET(UNSLCGCTL9440,
-		     GAMTLBOACS_CLKGATE_DIS |
-		     GAMTLBVDBOX7_CLKGATE_DIS | GAMTLBVDBOX6_CLKGATE_DIS |
-		     GAMTLBVDBOX5_CLKGATE_DIS | GAMTLBVDBOX4_CLKGATE_DIS |
-		     GAMTLBVDBOX3_CLKGATE_DIS | GAMTLBVDBOX2_CLKGATE_DIS |
-		     GAMTLBVDBOX1_CLKGATE_DIS | GAMTLBVDBOX0_CLKGATE_DIS |
-		     GAMTLBKCR_CLKGATE_DIS | GAMTLBGUC_CLKGATE_DIS |
-		     GAMTLBBLT_CLKGATE_DIS)
-	},
-	{ XE_RTP_NAME("14011431319/1"),
-	  XE_RTP_RULES(SUBPLATFORM(DG2, G10), STEP(A0, B0)),
-	  XE_RTP_SET(UNSLCGCTL9444,
-		     GAMTLBGFXA0_CLKGATE_DIS | GAMTLBGFXA1_CLKGATE_DIS |
-		     GAMTLBCOMPA0_CLKGATE_DIS | GAMTLBCOMPA1_CLKGATE_DIS |
-		     GAMTLBCOMPB0_CLKGATE_DIS | GAMTLBCOMPB1_CLKGATE_DIS |
-		     GAMTLBCOMPC0_CLKGATE_DIS | GAMTLBCOMPC1_CLKGATE_DIS |
-		     GAMTLBCOMPD0_CLKGATE_DIS | GAMTLBCOMPD1_CLKGATE_DIS |
-		     GAMTLBMERT_CLKGATE_DIS |
-		     GAMTLBVEBOX3_CLKGATE_DIS | GAMTLBVEBOX2_CLKGATE_DIS |
-		     GAMTLBVEBOX1_CLKGATE_DIS | GAMTLBVEBOX0_CLKGATE_DIS)
+	  XE_RTP_ACTIONS(SET(UNSLCGCTL9440,
+			     GAMTLBOACS_CLKGATE_DIS |
+			     GAMTLBVDBOX7_CLKGATE_DIS | GAMTLBVDBOX6_CLKGATE_DIS |
+			     GAMTLBVDBOX5_CLKGATE_DIS | GAMTLBVDBOX4_CLKGATE_DIS |
+			     GAMTLBVDBOX3_CLKGATE_DIS | GAMTLBVDBOX2_CLKGATE_DIS |
+			     GAMTLBVDBOX1_CLKGATE_DIS | GAMTLBVDBOX0_CLKGATE_DIS |
+			     GAMTLBKCR_CLKGATE_DIS | GAMTLBGUC_CLKGATE_DIS |
+			     GAMTLBBLT_CLKGATE_DIS),
+			 SET(UNSLCGCTL9444,
+			     GAMTLBGFXA0_CLKGATE_DIS | GAMTLBGFXA1_CLKGATE_DIS |
+			     GAMTLBCOMPA0_CLKGATE_DIS | GAMTLBCOMPA1_CLKGATE_DIS |
+			     GAMTLBCOMPB0_CLKGATE_DIS | GAMTLBCOMPB1_CLKGATE_DIS |
+			     GAMTLBCOMPC0_CLKGATE_DIS | GAMTLBCOMPC1_CLKGATE_DIS |
+			     GAMTLBCOMPD0_CLKGATE_DIS | GAMTLBCOMPD1_CLKGATE_DIS |
+			     GAMTLBMERT_CLKGATE_DIS |
+			     GAMTLBVEBOX3_CLKGATE_DIS | GAMTLBVEBOX2_CLKGATE_DIS |
+			     GAMTLBVEBOX1_CLKGATE_DIS | GAMTLBVEBOX0_CLKGATE_DIS))
 	},
 	{ XE_RTP_NAME("14010569222"),
 	  XE_RTP_RULES(SUBPLATFORM(DG2, G10), STEP(A0, B0)),
-	  XE_RTP_SET(UNSLICE_UNIT_LEVEL_CLKGATE, GAMEDIA_CLKGATE_DIS)
+	  XE_RTP_ACTIONS(SET(UNSLICE_UNIT_LEVEL_CLKGATE, GAMEDIA_CLKGATE_DIS))
 	},
 	{ XE_RTP_NAME("14011028019"),
 	  XE_RTP_RULES(SUBPLATFORM(DG2, G10), STEP(A0, B0)),
-	  XE_RTP_SET(SSMCGCTL9530, RTFUNIT_CLKGATE_DIS)
+	  XE_RTP_ACTIONS(SET(SSMCGCTL9530, RTFUNIT_CLKGATE_DIS))
 	},
 	{ XE_RTP_NAME("14014830051"),
 	  XE_RTP_RULES(PLATFORM(DG2)),
-	  XE_RTP_CLR(SARB_CHICKEN1, COMP_CKN_IN)
+	  XE_RTP_ACTIONS(CLR(SARB_CHICKEN1, COMP_CKN_IN))
 	},
 	{ XE_RTP_NAME("14015795083"),
 	  XE_RTP_RULES(PLATFORM(DG2)),
-	  XE_RTP_CLR(GEN7_MISCCPCTL, GEN12_DOP_CLOCK_GATE_RENDER_ENABLE)
+	  XE_RTP_ACTIONS(CLR(GEN7_MISCCPCTL, GEN12_DOP_CLOCK_GATE_RENDER_ENABLE))
 	},
 	{ XE_RTP_NAME("14011059788"),
 	  XE_RTP_RULES(GRAPHICS_VERSION_RANGE(1200, 1210)),
-	  XE_RTP_SET(GEN10_DFR_RATIO_EN_AND_CHICKEN, DFR_DISABLE)
+	  XE_RTP_ACTIONS(SET(GEN10_DFR_RATIO_EN_AND_CHICKEN, DFR_DISABLE))
 	},
 	{ XE_RTP_NAME("1409420604"),
 	  XE_RTP_RULES(PLATFORM(DG1)),
-	  XE_RTP_SET(SUBSLICE_UNIT_LEVEL_CLKGATE2, CPSSUNIT_CLKGATE_DIS)
+	  XE_RTP_ACTIONS(SET(SUBSLICE_UNIT_LEVEL_CLKGATE2, CPSSUNIT_CLKGATE_DIS))
 	},
 	{ XE_RTP_NAME("1408615072"),
 	  XE_RTP_RULES(PLATFORM(DG1)),
-	  XE_RTP_SET(UNSLICE_UNIT_LEVEL_CLKGATE2, VSUNIT_CLKGATE_DIS_TGL)
+	  XE_RTP_ACTIONS(SET(UNSLICE_UNIT_LEVEL_CLKGATE2, VSUNIT_CLKGATE_DIS_TGL))
 	},
 	{}
 };
@@ -194,62 +187,67 @@ static const struct xe_rtp_entry gt_was[] = {
 static const struct xe_rtp_entry engine_was[] = {
 	{ XE_RTP_NAME("14015227452"),
 	  XE_RTP_RULES(PLATFORM(DG2), ENGINE_CLASS(RENDER)),
-	  XE_RTP_SET(GEN9_ROW_CHICKEN4, XEHP_DIS_BBL_SYSPIPE,
-		     XE_RTP_FLAG(MASKED_REG))
+	  XE_RTP_ACTIONS(SET(GEN9_ROW_CHICKEN4, XEHP_DIS_BBL_SYSPIPE,
+			     XE_RTP_ACTION_FLAG(MASKED_REG)))
 	},
 	{ XE_RTP_NAME("1606931601"),
 	  XE_RTP_RULES(GRAPHICS_VERSION_RANGE(1200, 1210), ENGINE_CLASS(RENDER)),
-	  XE_RTP_SET(GEN7_ROW_CHICKEN2, GEN12_DISABLE_EARLY_READ,
-		     XE_RTP_FLAG(MASKED_REG))
+	  XE_RTP_ACTIONS(SET(GEN7_ROW_CHICKEN2, GEN12_DISABLE_EARLY_READ,
+			     XE_RTP_ACTION_FLAG(MASKED_REG)))
 	},
 	{ XE_RTP_NAME("22010931296, 18011464164, 14010919138"),
 	  XE_RTP_RULES(GRAPHICS_VERSION(1200), ENGINE_CLASS(RENDER)),
-	  XE_RTP_SET(GEN7_FF_THREAD_MODE, GEN12_FF_TESSELATION_DOP_GATE_DISABLE)
+	  XE_RTP_ACTIONS(SET(GEN7_FF_THREAD_MODE,
+			     GEN12_FF_TESSELATION_DOP_GATE_DISABLE))
 	},
 	{ XE_RTP_NAME("14010826681, 1606700617, 22010271021"),
 	  XE_RTP_RULES(GRAPHICS_VERSION_RANGE(1200, 1210), ENGINE_CLASS(RENDER)),
-	  XE_RTP_SET(GEN9_CS_DEBUG_MODE1, FF_DOP_CLOCK_GATE_DISABLE,
-		     XE_RTP_FLAG(MASKED_REG))
+	  XE_RTP_ACTIONS(SET(GEN9_CS_DEBUG_MODE1, FF_DOP_CLOCK_GATE_DISABLE,
+			     XE_RTP_ACTION_FLAG(MASKED_REG)))
 	},
 	{ XE_RTP_NAME("18019627453"),
 	  XE_RTP_RULES(PLATFORM(DG2), ENGINE_CLASS(RENDER)),
-	  XE_RTP_SET(GEN9_CS_DEBUG_MODE1, FF_DOP_CLOCK_GATE_DISABLE,
-		     XE_RTP_FLAG(MASKED_REG))
+	  XE_RTP_ACTIONS(SET(GEN9_CS_DEBUG_MODE1, FF_DOP_CLOCK_GATE_DISABLE,
+			     XE_RTP_ACTION_FLAG(MASKED_REG)))
 	},
 	{ XE_RTP_NAME("1409804808"),
 	  XE_RTP_RULES(GRAPHICS_VERSION(1200),
 		       ENGINE_CLASS(RENDER),
 		       IS_INTEGRATED),
-	  XE_RTP_SET(GEN7_ROW_CHICKEN2, GEN12_PUSH_CONST_DEREF_HOLD_DIS,
-		     XE_RTP_FLAG(MASKED_REG))
+	  XE_RTP_ACTIONS(SET(GEN7_ROW_CHICKEN2, GEN12_PUSH_CONST_DEREF_HOLD_DIS,
+			     XE_RTP_ACTION_FLAG(MASKED_REG)))
 	},
 	{ XE_RTP_NAME("14010229206, 1409085225"),
 	  XE_RTP_RULES(GRAPHICS_VERSION(1200),
 		       ENGINE_CLASS(RENDER),
 		       IS_INTEGRATED),
-	  XE_RTP_SET(GEN9_ROW_CHICKEN4, GEN12_DISABLE_TDL_PUSH,
-		     XE_RTP_FLAG(MASKED_REG))
+	  XE_RTP_ACTIONS(SET(GEN9_ROW_CHICKEN4, GEN12_DISABLE_TDL_PUSH,
+			     XE_RTP_ACTION_FLAG(MASKED_REG)))
 	},
 	{ XE_RTP_NAME("1607297627, 1607030317, 1607186500"),
 	  XE_RTP_RULES(PLATFORM(TIGERLAKE), ENGINE_CLASS(RENDER)),
-	  XE_RTP_SET(RING_PSMI_CTL(RENDER_RING_BASE),
-		     GEN12_WAIT_FOR_EVENT_POWER_DOWN_DISABLE |
-		     GEN8_RC_SEMA_IDLE_MSG_DISABLE, XE_RTP_FLAG(MASKED_REG))
+	  XE_RTP_ACTIONS(SET(RING_PSMI_CTL(RENDER_RING_BASE),
+			     GEN12_WAIT_FOR_EVENT_POWER_DOWN_DISABLE |
+			     GEN8_RC_SEMA_IDLE_MSG_DISABLE,
+			     XE_RTP_ACTION_FLAG(MASKED_REG)))
 	},
 	{ XE_RTP_NAME("1607297627, 1607030317, 1607186500"),
 	  XE_RTP_RULES(PLATFORM(ROCKETLAKE), ENGINE_CLASS(RENDER)),
-	  XE_RTP_SET(RING_PSMI_CTL(RENDER_RING_BASE),
-		     GEN12_WAIT_FOR_EVENT_POWER_DOWN_DISABLE |
-		     GEN8_RC_SEMA_IDLE_MSG_DISABLE, XE_RTP_FLAG(MASKED_REG))
+	  XE_RTP_ACTIONS(SET(RING_PSMI_CTL(RENDER_RING_BASE),
+			     GEN12_WAIT_FOR_EVENT_POWER_DOWN_DISABLE |
+			     GEN8_RC_SEMA_IDLE_MSG_DISABLE,
+			     XE_RTP_ACTION_FLAG(MASKED_REG)))
 	},
 	{ XE_RTP_NAME("1406941453"),
 	  XE_RTP_RULES(GRAPHICS_VERSION_RANGE(1200, 1210), ENGINE_CLASS(RENDER)),
-	  XE_RTP_SET(GEN10_SAMPLER_MODE, ENABLE_SMALLPL, XE_RTP_FLAG(MASKED_REG))
+	  XE_RTP_ACTIONS(SET(GEN10_SAMPLER_MODE, ENABLE_SMALLPL,
+			     XE_RTP_ACTION_FLAG(MASKED_REG)))
 	},
 	{ XE_RTP_NAME("FtrPerCtxtPreemptionGranularityControl"),
 	  XE_RTP_RULES(GRAPHICS_VERSION_RANGE(1200, 1250), ENGINE_CLASS(RENDER)),
-	  XE_RTP_SET(GEN7_FF_SLICE_CS_CHICKEN1, GEN9_FFSC_PERCTX_PREEMPT_CTRL,
-		     XE_RTP_FLAG(MASKED_REG))
+	  XE_RTP_ACTIONS(SET(GEN7_FF_SLICE_CS_CHICKEN1,
+			     GEN9_FFSC_PERCTX_PREEMPT_CTRL,
+			     XE_RTP_ACTION_FLAG(MASKED_REG)))
 	},
 	{}
 };
@@ -257,69 +255,35 @@ static const struct xe_rtp_entry engine_was[] = {
 static const struct xe_rtp_entry lrc_was[] = {
 	{ XE_RTP_NAME("1409342910, 14010698770, 14010443199, 1408979724, 1409178076, 1409207793, 1409217633, 1409252684, 1409347922, 1409142259"),
 	  XE_RTP_RULES(GRAPHICS_VERSION_RANGE(1200, 1210)),
-	  XE_RTP_SET(GEN11_COMMON_SLICE_CHICKEN3,
-		     GEN12_DISABLE_CPS_AWARE_COLOR_PIPE,
-		     XE_RTP_FLAG(MASKED_REG))
+	  XE_RTP_ACTIONS(SET(GEN11_COMMON_SLICE_CHICKEN3,
+			     GEN12_DISABLE_CPS_AWARE_COLOR_PIPE,
+			     XE_RTP_ACTION_FLAG(MASKED_REG)))
 	},
 	{ XE_RTP_NAME("WaDisableGPGPUMidThreadPreemption"),
 	  XE_RTP_RULES(GRAPHICS_VERSION_RANGE(1200, 1210)),
-	  XE_RTP_FIELD_SET(GEN8_CS_CHICKEN1, GEN9_PREEMPT_GPGPU_LEVEL_MASK,
-			   GEN9_PREEMPT_GPGPU_THREAD_GROUP_LEVEL,
-			   XE_RTP_FLAG(MASKED_REG))
+	  XE_RTP_ACTIONS(FIELD_SET(GEN8_CS_CHICKEN1,
+				   GEN9_PREEMPT_GPGPU_LEVEL_MASK,
+				   GEN9_PREEMPT_GPGPU_THREAD_GROUP_LEVEL,
+				   XE_RTP_ACTION_FLAG(MASKED_REG)))
 	},
 	{ XE_RTP_NAME("16011163337"),
 	  XE_RTP_RULES(GRAPHICS_VERSION_RANGE(1200, 1210)),
 	  /* read verification is ignored due to 1608008084. */
-	  XE_RTP_FIELD_SET_NO_READ_MASK(GEN12_FF_MODE2, FF_MODE2_GS_TIMER_MASK,
-					FF_MODE2_GS_TIMER_224)
+	  XE_RTP_ACTIONS(FIELD_SET_NO_READ_MASK(GEN12_FF_MODE2,
+						FF_MODE2_GS_TIMER_MASK,
+						FF_MODE2_GS_TIMER_224))
 	},
 	{ XE_RTP_NAME("1409044764"),
 	  XE_RTP_RULES(PLATFORM(DG1)),
-	  XE_RTP_CLR(GEN11_COMMON_SLICE_CHICKEN3,
-		     DG1_FLOAT_POINT_BLEND_OPT_STRICT_MODE_EN,
-		     XE_RTP_FLAG(MASKED_REG))
+	  XE_RTP_ACTIONS(CLR(GEN11_COMMON_SLICE_CHICKEN3,
+			     DG1_FLOAT_POINT_BLEND_OPT_STRICT_MODE_EN,
+			     XE_RTP_ACTION_FLAG(MASKED_REG)))
 	},
 	{ XE_RTP_NAME("22010493298"),
 	  XE_RTP_RULES(PLATFORM(DG1)),
-	  XE_RTP_SET(HIZ_CHICKEN,
-		     DG1_HZ_READ_SUPPRESSION_OPTIMIZATION_DISABLE,
-		     XE_RTP_FLAG(MASKED_REG))
-	},
-	{}
-};
-
-static const struct xe_rtp_entry register_whitelist[] = {
-	{ XE_RTP_NAME("WaAllowPMDepthAndInvocationCountAccessFromUMD, 1408556865"),
-	  XE_RTP_RULES(GRAPHICS_VERSION_RANGE(1200, 1210), ENGINE_CLASS(RENDER)),
-	  XE_WHITELIST_REGISTER(PS_INVOCATION_COUNT,
-				RING_FORCE_TO_NONPRIV_ACCESS_RD |
-				RING_FORCE_TO_NONPRIV_RANGE_4)
-	},
-	{ XE_RTP_NAME("1508744258, 14012131227, 1808121037"),
-	  XE_RTP_RULES(GRAPHICS_VERSION_RANGE(1200, 1210), ENGINE_CLASS(RENDER)),
-	  XE_WHITELIST_REGISTER(GEN7_COMMON_SLICE_CHICKEN1, 0)
-	},
-	{ XE_RTP_NAME("1806527549"),
-	  XE_RTP_RULES(GRAPHICS_VERSION_RANGE(1200, 1210), ENGINE_CLASS(RENDER)),
-	  XE_WHITELIST_REGISTER(HIZ_CHICKEN, 0)
-	},
-	{ XE_RTP_NAME("allow_read_ctx_timestamp"),
-	  XE_RTP_RULES(GRAPHICS_VERSION_RANGE(1200, 1260), FUNC(match_not_render)),
-	  XE_WHITELIST_REGISTER(RING_CTX_TIMESTAMP(0),
-				RING_FORCE_TO_NONPRIV_ACCESS_RD,
-				XE_RTP_FLAG(ENGINE_BASE))
-	},
-	{ XE_RTP_NAME("16014440446_part_1"),
-	  XE_RTP_RULES(PLATFORM(PVC)),
-	  XE_WHITELIST_REGISTER(_MMIO(0x4400),
-				RING_FORCE_TO_NONPRIV_DENY |
-				RING_FORCE_TO_NONPRIV_RANGE_64)
-	},
-	{ XE_RTP_NAME("16014440446_part_2"),
-	  XE_RTP_RULES(PLATFORM(PVC)),
-	  XE_WHITELIST_REGISTER(_MMIO(0x4500),
-				RING_FORCE_TO_NONPRIV_DENY |
-				RING_FORCE_TO_NONPRIV_RANGE_64)
+	  XE_RTP_ACTIONS(SET(HIZ_CHICKEN,
+			     DG1_HZ_READ_SUPPRESSION_OPTIMIZATION_DISABLE,
+			     XE_RTP_ACTION_FLAG(MASKED_REG)))
 	},
 	{}
 };
@@ -361,53 +325,3 @@ void xe_wa_process_lrc(struct xe_hw_engine *hwe)
 {
 	xe_rtp_process(lrc_was, &hwe->reg_lrc, hwe->gt, hwe);
 }
-
-/**
- * xe_reg_whitelist_process_engine - process table of registers to whitelist
- * @hwe: engine instance to process whitelist for
- *
- * Process wwhitelist table for this platform, saving in @hwe all the
- * registers that need to be whitelisted by the hardware so they can be accessed
- * by userspace.
- */
-void xe_reg_whitelist_process_engine(struct xe_hw_engine *hwe)
-{
-	xe_rtp_process(register_whitelist, &hwe->reg_whitelist, hwe->gt, hwe);
-}
-
-void xe_reg_whitelist_apply(struct xe_hw_engine *hwe)
-{
-	struct xe_gt *gt = hwe->gt;
-	struct xe_device *xe = gt_to_xe(gt);
-	struct xe_reg_sr *sr = &hwe->reg_whitelist;
-	struct xe_reg_sr_entry *entry;
-	const u32 base = hwe->mmio_base;
-	unsigned long reg;
-	unsigned int slot = 0;
-	int err;
-
-	drm_dbg(&xe->drm, "Whitelisting %s registers\n", sr->name);
-
-	err = xe_force_wake_get(&gt->mmio.fw, XE_FORCEWAKE_ALL);
-	if (err)
-		goto err_force_wake;
-
-	xa_for_each(&sr->xa, reg, entry) {
-		xe_mmio_write32(gt, RING_FORCE_TO_NONPRIV(base, slot).reg,
-				reg | entry->set_bits);
-		slot++;
-	}
-
-	/* And clear the rest just in case of garbage */
-	for (; slot < RING_MAX_NONPRIV_SLOTS; slot++)
-		xe_mmio_write32(gt, RING_FORCE_TO_NONPRIV(base, slot).reg,
-				RING_NOPID(base).reg);
-
-	err = xe_force_wake_put(&gt->mmio.fw, XE_FORCEWAKE_ALL);
-	XE_WARN_ON(err);
-
-	return;
-
-err_force_wake:
-	drm_err(&xe->drm, "Failed to apply, err=%d\n", err);
-}
diff --git a/drivers/gpu/drm/xe/xe_wa.h b/drivers/gpu/drm/xe/xe_wa.h
index 1a0659690a32077aa17905a18bd0bddf1dcd0f09..cd2307d587950075ec51ce6499207c91fa78775c 100644
--- a/drivers/gpu/drm/xe/xe_wa.h
+++ b/drivers/gpu/drm/xe/xe_wa.h
@@ -14,6 +14,5 @@ void xe_wa_process_engine(struct xe_hw_engine *hwe);
 void xe_wa_process_lrc(struct xe_hw_engine *hwe);
 
 void xe_reg_whitelist_process_engine(struct xe_hw_engine *hwe);
-void xe_reg_whitelist_apply(struct xe_hw_engine *hwe);
 
 #endif