"MSLICE",
        "LNCF",
        "GAM",
+       "DSS",
        "INSTANCE 0",
 };
 
        {},
 };
 
+static const struct intel_mmio_range xelpg_instance0_steering_table[] = {
+       { 0x000B00, 0x000BFF },         /* SQIDI */
+       { 0x001000, 0x001FFF },         /* SQIDI */
+       { 0x004000, 0x0048FF },         /* GAM */
+       { 0x008700, 0x0087FF },         /* SQIDI */
+       { 0x00B000, 0x00B0FF },         /* NODE */
+       { 0x00C800, 0x00CFFF },         /* GAM */
+       { 0x00D880, 0x00D8FF },         /* NODE */
+       { 0x00DD00, 0x00DDFF },         /* OAAL2 */
+       {},
+};
+
+static const struct intel_mmio_range xelpg_l3bank_steering_table[] = {
+       { 0x00B100, 0x00B3FF },
+       {},
+};
+
+/* DSS steering is used for SLICE ranges as well */
+static const struct intel_mmio_range xelpg_dss_steering_table[] = {
+       { 0x005200, 0x0052FF },         /* SLICE */
+       { 0x005500, 0x007FFF },         /* SLICE */
+       { 0x008140, 0x00815F },         /* SLICE (0x8140-0x814F), DSS (0x8150-0x815F) */
+       { 0x0094D0, 0x00955F },         /* SLICE (0x94D0-0x951F), DSS (0x9520-0x955F) */
+       { 0x009680, 0x0096FF },         /* DSS */
+       { 0x00D800, 0x00D87F },         /* SLICE */
+       { 0x00DC00, 0x00DCFF },         /* SLICE */
+       { 0x00DE80, 0x00E8FF },         /* DSS (0xE000-0xE0FF reserved) */
+};
+
 void intel_gt_mcr_init(struct intel_gt *gt)
 {
        struct drm_i915_private *i915 = gt->i915;
+       unsigned long fuse;
+       int i;
 
        /*
         * An mslice is unavailable only if both the meml3 for the slice is
                        drm_warn(&i915->drm, "mslice mask all zero!\n");
        }
 
-       if (IS_PONTEVECCHIO(i915)) {
+       if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 70) &&
+           gt->type == GT_PRIMARY) {
+               fuse = REG_FIELD_GET(GT_L3_EXC_MASK,
+                                    intel_uncore_read(gt->uncore, XEHP_FUSE4));
+
+               /*
+                * Despite the register field being named "exclude mask" the
+                * bits actually represent enabled banks (two banks per bit).
+                */
+               for_each_set_bit(i, &fuse, 3)
+                       gt->info.l3bank_mask |= 0x3 << 2 * i;
+
+               gt->steering_table[INSTANCE0] = xelpg_instance0_steering_table;
+               gt->steering_table[L3BANK] = xelpg_l3bank_steering_table;
+               gt->steering_table[DSS] = xelpg_dss_steering_table;
+       } else if (IS_PONTEVECCHIO(i915)) {
                gt->steering_table[INSTANCE0] = pvc_instance0_steering_table;
        } else if (IS_DG2(i915)) {
                gt->steering_table[MSLICE] = xehpsdv_mslice_steering_table;
 
        lockdep_assert_held(&uncore->lock);
 
-       if (GRAPHICS_VER(uncore->i915) >= 11) {
+       if (GRAPHICS_VER_FULL(uncore->i915) >= IP_VER(12, 70)) {
+               /*
+                * Always leave the hardware in multicast mode when doing reads
+                * (see comment about Wa_22013088509 below) and only change it
+                * to unicast mode when doing writes of a specific instance.
+                *
+                * No need to save old steering reg value.
+                */
+               intel_uncore_write_fw(uncore, MTL_MCR_SELECTOR,
+                                     REG_FIELD_PREP(MTL_MCR_GROUPID, group) |
+                                     REG_FIELD_PREP(MTL_MCR_INSTANCEID, instance) |
+                                     (rw_flag == FW_REG_READ) ? GEN11_MCR_MULTICAST : 0);
+       } else if (GRAPHICS_VER(uncore->i915) >= 11) {
                mcr_mask = GEN11_MCR_SLICE_MASK | GEN11_MCR_SUBSLICE_MASK;
                mcr_ss = GEN11_MCR_SLICE(group) | GEN11_MCR_SUBSLICE(instance);
 
                 */
                if (rw_flag == FW_REG_WRITE)
                        mcr_mask |= GEN11_MCR_MULTICAST;
+
+               mcr = intel_uncore_read_fw(uncore, GEN8_MCR_SELECTOR);
+               old_mcr = mcr;
+
+               mcr &= ~mcr_mask;
+               mcr |= mcr_ss;
+               intel_uncore_write_fw(uncore, GEN8_MCR_SELECTOR, mcr);
        } else {
                mcr_mask = GEN8_MCR_SLICE_MASK | GEN8_MCR_SUBSLICE_MASK;
                mcr_ss = GEN8_MCR_SLICE(group) | GEN8_MCR_SUBSLICE(instance);
-       }
 
-       old_mcr = mcr = intel_uncore_read_fw(uncore, GEN8_MCR_SELECTOR);
+               mcr = intel_uncore_read_fw(uncore, GEN8_MCR_SELECTOR);
+               old_mcr = mcr;
 
-       mcr &= ~mcr_mask;
-       mcr |= mcr_ss;
-       intel_uncore_write_fw(uncore, GEN8_MCR_SELECTOR, mcr);
+               mcr &= ~mcr_mask;
+               mcr |= mcr_ss;
+               intel_uncore_write_fw(uncore, GEN8_MCR_SELECTOR, mcr);
+       }
 
        if (rw_flag == FW_REG_READ)
                val = intel_uncore_read_fw(uncore, mcr_reg_cast(reg));
        else
                intel_uncore_write_fw(uncore, mcr_reg_cast(reg), value);
 
-       mcr &= ~mcr_mask;
-       mcr |= old_mcr & mcr_mask;
-
-       intel_uncore_write_fw(uncore, GEN8_MCR_SELECTOR, mcr);
+       /*
+        * For pre-MTL platforms, we need to restore the old value of the
+        * steering control register to ensure that implicit steering continues
+        * to behave as expected.  For MTL and beyond, we need only reinstate
+        * the 'multicast' bit (and only if we did a write that cleared it).
+        */
+       if (GRAPHICS_VER_FULL(uncore->i915) >= IP_VER(12, 70) && rw_flag == FW_REG_WRITE)
+               intel_uncore_write_fw(uncore, MTL_MCR_SELECTOR, GEN11_MCR_MULTICAST);
+       else if (GRAPHICS_VER_FULL(uncore->i915) < IP_VER(12, 70))
+               intel_uncore_write_fw(uncore, GEN8_MCR_SELECTOR, old_mcr);
 
        return val;
 }
 void intel_gt_mcr_multicast_write(struct intel_gt *gt,
                                  i915_mcr_reg_t reg, u32 value)
 {
+       /*
+        * Ensure we have multicast behavior, just in case some non-i915 agent
+        * left the hardware in unicast mode.
+        */
+       if (GRAPHICS_VER_FULL(gt->i915) >= IP_VER(12, 70))
+               intel_uncore_write_fw(gt->uncore, MTL_MCR_SELECTOR, GEN11_MCR_MULTICAST);
+
        intel_uncore_write(gt->uncore, mcr_reg_cast(reg), value);
 }
 
  */
 void intel_gt_mcr_multicast_write_fw(struct intel_gt *gt, i915_mcr_reg_t reg, u32 value)
 {
+       /*
+        * Ensure we have multicast behavior, just in case some non-i915 agent
+        * left the hardware in unicast mode.
+        */
+       if (GRAPHICS_VER_FULL(gt->i915) >= IP_VER(12, 70))
+               intel_uncore_write_fw(gt->uncore, MTL_MCR_SELECTOR, GEN11_MCR_MULTICAST);
+
        intel_uncore_write_fw(gt->uncore, mcr_reg_cast(reg), value);
 }
 
                                       enum intel_steering_type type,
                                       u8 *group, u8 *instance)
 {
+       u32 dss;
+
        switch (type) {
        case L3BANK:
                *group = 0;             /* unused */
                *group = IS_DG2(gt->i915) ? 1 : 0;
                *instance = 0;
                break;
+       case DSS:
+               dss = intel_sseu_find_first_xehp_dss(>->info.sseu, 0, 0);
+               *group = dss / GEN_DSS_PER_GSLICE;
+               *instance = dss % GEN_DSS_PER_GSLICE;
+               break;
        case INSTANCE0:
                /*
                 * There are a lot of MCR types for which instance (0, 0)
 void intel_gt_mcr_report_steering(struct drm_printer *p, struct intel_gt *gt,
                                  bool dump_table)
 {
-       drm_printf(p, "Default steering: group=0x%x, instance=0x%x\n",
-                  gt->default_steering.groupid,
-                  gt->default_steering.instanceid);
-
-       if (IS_PONTEVECCHIO(gt->i915)) {
+       /*
+        * Starting with MTL we no longer have default steering;
+        * all ranges are explicitly steered.
+        */
+       if (GRAPHICS_VER_FULL(gt->i915) < IP_VER(12, 70))
+               drm_printf(p, "Default steering: group=0x%x, instance=0x%x\n",
+                          gt->default_steering.groupid,
+                          gt->default_steering.instanceid);
+
+       if (GRAPHICS_VER_FULL(gt->i915) >= IP_VER(12, 70)) {
+               for (int i = 0; i < NUM_STEERING_TYPES; i++)
+                       if (gt->steering_table[i])
+                               report_steering_type(p, gt, i, dump_table);
+       } else if (IS_PONTEVECCHIO(gt->i915)) {
                report_steering_type(p, gt, INSTANCE0, dump_table);
        } else if (HAS_MSLICE_STEERING(gt->i915)) {
                report_steering_type(p, gt, MSLICE, dump_table);
 
        wa_write_clr_set(wal, steering_reg, mcr_mask, mcr);
 }
 
-static void __add_mcr_wa(struct intel_gt *gt, struct i915_wa_list *wal,
-                        unsigned int slice, unsigned int subslice)
+static void debug_dump_steering(struct intel_gt *gt)
 {
        struct drm_printer p = drm_debug_printer("MCR Steering:");
 
+       if (drm_debug_enabled(DRM_UT_DRIVER))
+               intel_gt_mcr_report_steering(&p, gt, false);
+}
+
+static void __add_mcr_wa(struct intel_gt *gt, struct i915_wa_list *wal,
+                        unsigned int slice, unsigned int subslice)
+{
        __set_mcr_steering(wal, GEN8_MCR_SELECTOR, slice, subslice);
 
        gt->default_steering.groupid = slice;
        gt->default_steering.instanceid = subslice;
 
-       if (drm_debug_enabled(DRM_UT_DRIVER))
-               intel_gt_mcr_report_steering(&p, gt, false);
+       debug_dump_steering(gt);
 }
 
 static void
        wa_mcr_write_clr(wal, GEN8_MISCCPCTL, GEN12_DOP_CLOCK_GATE_RENDER_ENABLE);
 }
 
+static void
+xelpg_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
+{
+       /* FIXME: Actual workarounds will be added in future patch(es) */
+
+       /*
+        * Unlike older platforms, we no longer setup implicit steering here;
+        * all MCR accesses are explicitly steered.
+        */
+       debug_dump_steering(gt);
+}
+
 static void
 gt_init_workarounds(struct intel_gt *gt, struct i915_wa_list *wal)
 {
        struct drm_i915_private *i915 = gt->i915;
 
-       if (IS_PONTEVECCHIO(i915))
+       /* FIXME: Media GT handling will be added in an upcoming patch */
+       if (gt->type == GT_MEDIA)
+               return;
+
+       if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 70))
+               xelpg_gt_workarounds_init(gt, wal);
+       else if (IS_PONTEVECCHIO(i915))
                pvc_gt_workarounds_init(gt, wal);
        else if (IS_DG2(i915))
                dg2_gt_workarounds_init(gt, wal);