drm/i915/d12+: Disable DMC firmware flip queue handlers
authorImre Deak <imre.deak@intel.com>
Sat, 21 May 2022 13:08:08 +0000 (16:08 +0300)
committerImre Deak <imre.deak@intel.com>
Mon, 23 May 2022 13:49:24 +0000 (16:49 +0300)
Based on a bspec update the DMC firmware's flip queue handling events
need to be disabled before enabling DC5/6. i915 doesn't use the flip
queue feature atm, so disable it already after loading the firmware.
This removes some overhead of the event handler which runs at a 1 kHz
frequency.

Bspec: 49193, 72486, 72487

v2:
- Fix the DMC pipe A register offsets for GEN12.
- Disable the events on DG2 only on pipe A..D .
v3: (Lucas)
- Add TODO: to clarify the disabling sequence on all D13+
- s/intel_dmc_has_fw_payload/has_dmc_id_fw/
- s/simple_flipq/flipq/
- s/_GEN12,_GEN13/TGL_,ADLP_/
- s/MAINDMC/DMC/
v4:
- Only disable flip queues on TGL/DG2, as on other platforms the
  corresponding event handlers don't exist.

Signed-off-by: Imre Deak <imre.deak@intel.com>
Reviewed-by: Anusha Srivatsa <anusha.srivatsa@intel.com> # v1
Reviewed-by: Lucas De Marchi <lucas.demarchi@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20220521130808.637449-1-imre.deak@intel.com
drivers/gpu/drm/i915/display/intel_dmc.c
drivers/gpu/drm/i915/display/intel_dmc_regs.h

index 34d00f5aff25718b9d23561290fc8cb89d5d40f6..fa9ef591b8853420179d55b307a16c47711b475b 100644 (file)
@@ -248,9 +248,14 @@ struct stepping_info {
        char substepping;
 };
 
+static bool has_dmc_id_fw(struct drm_i915_private *i915, int dmc_id)
+{
+       return i915->dmc.dmc_info[dmc_id].payload;
+}
+
 bool intel_dmc_has_payload(struct drm_i915_private *i915)
 {
-       return i915->dmc.dmc_info[DMC_FW_MAIN].payload;
+       return has_dmc_id_fw(i915, DMC_FW_MAIN);
 }
 
 static const struct stepping_info *
@@ -272,6 +277,85 @@ static void gen9_set_dc_state_debugmask(struct drm_i915_private *dev_priv)
        intel_de_posting_read(dev_priv, DC_STATE_DEBUG);
 }
 
+static void
+disable_flip_queue_event(struct drm_i915_private *i915,
+                        i915_reg_t ctl_reg, i915_reg_t htp_reg)
+{
+       u32 event_ctl;
+       u32 event_htp;
+
+       event_ctl = intel_de_read(i915, ctl_reg);
+       event_htp = intel_de_read(i915, htp_reg);
+       if (event_ctl != (DMC_EVT_CTL_ENABLE |
+                         DMC_EVT_CTL_RECURRING |
+                         REG_FIELD_PREP(DMC_EVT_CTL_TYPE_MASK,
+                                        DMC_EVT_CTL_TYPE_EDGE_0_1) |
+                         REG_FIELD_PREP(DMC_EVT_CTL_EVENT_ID_MASK,
+                                        DMC_EVT_CTL_EVENT_ID_CLK_MSEC)) ||
+           !event_htp) {
+               drm_dbg_kms(&i915->drm,
+                           "Unexpected DMC event configuration (control %08x htp %08x)\n",
+                           event_ctl, event_htp);
+               return;
+       }
+
+       intel_de_write(i915, ctl_reg,
+                      REG_FIELD_PREP(DMC_EVT_CTL_TYPE_MASK,
+                                     DMC_EVT_CTL_TYPE_EDGE_0_1) |
+                      REG_FIELD_PREP(DMC_EVT_CTL_EVENT_ID_MASK,
+                                     DMC_EVT_CTL_EVENT_ID_FALSE));
+       intel_de_write(i915, htp_reg, 0);
+}
+
+static bool
+get_flip_queue_event_regs(struct drm_i915_private *i915, int dmc_id,
+                         i915_reg_t *ctl_reg, i915_reg_t *htp_reg)
+{
+       switch (dmc_id) {
+       case DMC_FW_MAIN:
+               if (DISPLAY_VER(i915) == 12) {
+                       *ctl_reg = DMC_EVT_CTL(i915, dmc_id, 3);
+                       *htp_reg = DMC_EVT_HTP(i915, dmc_id, 3);
+
+                       return true;
+               }
+               break;
+       case DMC_FW_PIPEA ... DMC_FW_PIPED:
+               if (IS_DG2(i915)) {
+                       *ctl_reg = DMC_EVT_CTL(i915, dmc_id, 2);
+                       *htp_reg = DMC_EVT_HTP(i915, dmc_id, 2);
+
+                       return true;
+               }
+               break;
+       }
+
+       return false;
+}
+
+static void
+disable_all_flip_queue_events(struct drm_i915_private *i915)
+{
+       int dmc_id;
+
+       /* TODO: check if the following applies to all D13+ platforms. */
+       if (!IS_DG2(i915) && !IS_TIGERLAKE(i915))
+               return;
+
+       for (dmc_id = 0; dmc_id < DMC_FW_MAX; dmc_id++) {
+               i915_reg_t ctl_reg;
+               i915_reg_t htp_reg;
+
+               if (!has_dmc_id_fw(i915, dmc_id))
+                       continue;
+
+               if (!get_flip_queue_event_regs(i915, dmc_id, &ctl_reg, &htp_reg))
+                       continue;
+
+               disable_flip_queue_event(i915, ctl_reg, htp_reg);
+       }
+}
+
 /**
  * intel_dmc_load_program() - write the firmware from memory to register.
  * @dev_priv: i915 drm device.
@@ -312,6 +396,13 @@ void intel_dmc_load_program(struct drm_i915_private *dev_priv)
        dev_priv->dmc.dc_state = 0;
 
        gen9_set_dc_state_debugmask(dev_priv);
+
+       /*
+        * Flip queue events need to be disabled before enabling DC5/6.
+        * i915 doesn't use the flip queue feature, so disable it already
+        * here.
+        */
+       disable_all_flip_queue_events(dev_priv);
 }
 
 void assert_dmc_loaded(struct drm_i915_private *i915)
index 67e14eb96a7aa748f53831c51405046c063b3940..238620b559662d119d7b43e715e00c2f2dc7779c 100644 (file)
 
 #define DMC_PROGRAM(addr, i)   _MMIO((addr) + (i) * 4)
 #define DMC_SSP_BASE_ADDR_GEN9 0x00002FC0
+
+#define _ADLP_PIPEDMC_REG_MMIO_BASE_A  0x5f000
+#define _TGL_PIPEDMC_REG_MMIO_BASE_A   0x92000
+
+#define __PIPEDMC_REG_MMIO_BASE(i915, dmc_id) \
+       ((DISPLAY_VER(i915) >= 13 ? _ADLP_PIPEDMC_REG_MMIO_BASE_A : \
+                                   _TGL_PIPEDMC_REG_MMIO_BASE_A) + \
+        0x400 * ((dmc_id) - 1))
+
+#define __DMC_REG_MMIO_BASE            0x8f000
+
+#define _DMC_REG_MMIO_BASE(i915, dmc_id) \
+       ((dmc_id) == DMC_FW_MAIN ? __DMC_REG_MMIO_BASE : \
+                                  __PIPEDMC_REG_MMIO_BASE(i915, dmc_id))
+
+#define _DMC_REG(i915, dmc_id, reg) \
+       ((reg) - __DMC_REG_MMIO_BASE + _DMC_REG_MMIO_BASE(i915, dmc_id))
+
+#define _DMC_EVT_HTP_0                 0x8f004
+
+#define DMC_EVT_HTP(i915, dmc_id, handler) \
+       _MMIO(_DMC_REG(i915, dmc_id, _DMC_EVT_HTP_0) + 4 * (handler))
+
+#define _DMC_EVT_CTL_0                 0x8f034
+
+#define DMC_EVT_CTL(i915, dmc_id, handler) \
+       _MMIO(_DMC_REG(i915, dmc_id, _DMC_EVT_CTL_0) + 4 * (handler))
+
+#define DMC_EVT_CTL_ENABLE             REG_BIT(31)
+#define DMC_EVT_CTL_RECURRING          REG_BIT(30)
+#define DMC_EVT_CTL_TYPE_MASK          REG_GENMASK(17, 16)
+#define DMC_EVT_CTL_TYPE_LEVEL_0       0
+#define DMC_EVT_CTL_TYPE_LEVEL_1       1
+#define DMC_EVT_CTL_TYPE_EDGE_1_0      2
+#define DMC_EVT_CTL_TYPE_EDGE_0_1      3
+
+#define DMC_EVT_CTL_EVENT_ID_MASK      REG_GENMASK(15, 8)
+#define DMC_EVT_CTL_EVENT_ID_FALSE     0x01
+/* An event handler scheduled to run at a 1 kHz frequency. */
+#define DMC_EVT_CTL_EVENT_ID_CLK_MSEC  0xbf
+
 #define DMC_HTP_ADDR_SKL       0x00500034
 #define DMC_SSP_BASE           _MMIO(0x8F074)
 #define DMC_HTP_SKL            _MMIO(0x8F004)