drm/i915/mtl: Use primary GT's irq lock for media GT
authorMatt Roper <matthew.d.roper@intel.com>
Tue, 6 Sep 2022 23:49:33 +0000 (16:49 -0700)
committerJoonas Lahtinen <joonas.lahtinen@linux.intel.com>
Mon, 12 Sep 2022 12:23:12 +0000 (15:23 +0300)
When we hook up interrupts (in the next patch), interrupts for the media
GT are still processed as part of the primary GT's interrupt flow.  As
such, we should share the same IRQ lock with the primary GT.  Let's
convert gt->irq_lock into a pointer and just point the media GT's
instance at the same lock the primary GT is using.

v2:
 - Point media's gt->irq_lock at the primary GT lock properly.  (Daniele)
 - Fix jump target for intel_root_gt_init_early errors.  (Daniele)

Cc: Daniele Ceraolo Spurio <daniele.ceraolospurio@intel.com>
Signed-off-by: Matt Roper <matthew.d.roper@intel.com>
Reviewed-by: Daniele Ceraolo Spurio <daniele.ceraolospurio@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20220906234934.3655440-14-matthew.d.roper@intel.com
Signed-off-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
17 files changed:
drivers/gpu/drm/i915/gt/intel_engine_cs.c
drivers/gpu/drm/i915/gt/intel_gt.c
drivers/gpu/drm/i915/gt/intel_gt.h
drivers/gpu/drm/i915/gt/intel_gt_irq.c
drivers/gpu/drm/i915/gt/intel_gt_pm_irq.c
drivers/gpu/drm/i915/gt/intel_gt_types.h
drivers/gpu/drm/i915/gt/intel_rps.c
drivers/gpu/drm/i915/gt/intel_sa_media.c
drivers/gpu/drm/i915/gt/uc/intel_guc.c
drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
drivers/gpu/drm/i915/gt/uc/intel_uc.c
drivers/gpu/drm/i915/i915_driver.c
drivers/gpu/drm/i915/i915_irq.c
drivers/gpu/drm/i915/pxp/intel_pxp.c
drivers/gpu/drm/i915/pxp/intel_pxp_debugfs.c
drivers/gpu/drm/i915/pxp/intel_pxp_irq.c
drivers/gpu/drm/i915/pxp/intel_pxp_session.c

index 41acc285e8bf6cbd1cef9045e3acc58e3e7ef1e6..6e0122b3dca200077d459eb5e5136490bcf19f24 100644 (file)
@@ -1688,9 +1688,9 @@ bool intel_engine_irq_enable(struct intel_engine_cs *engine)
                return false;
 
        /* Caller disables interrupts */
-       spin_lock(&engine->gt->irq_lock);
+       spin_lock(engine->gt->irq_lock);
        engine->irq_enable(engine);
-       spin_unlock(&engine->gt->irq_lock);
+       spin_unlock(engine->gt->irq_lock);
 
        return true;
 }
@@ -1701,9 +1701,9 @@ void intel_engine_irq_disable(struct intel_engine_cs *engine)
                return;
 
        /* Caller disables interrupts */
-       spin_lock(&engine->gt->irq_lock);
+       spin_lock(engine->gt->irq_lock);
        engine->irq_disable(engine);
-       spin_unlock(&engine->gt->irq_lock);
+       spin_unlock(engine->gt->irq_lock);
 }
 
 void intel_engines_reset_default_submission(struct intel_gt *gt)
index 2189ea1d302e4a448e1e911e9c0d68906b5d2a4b..b079af8fc57453de970594b275ca48b299ca2bbf 100644 (file)
@@ -37,7 +37,7 @@
 
 void intel_gt_common_init_early(struct intel_gt *gt)
 {
-       spin_lock_init(&gt->irq_lock);
+       spin_lock_init(gt->irq_lock);
 
        INIT_LIST_HEAD(&gt->closed_vma);
        spin_lock_init(&gt->closed_lock);
@@ -58,14 +58,19 @@ void intel_gt_common_init_early(struct intel_gt *gt)
 }
 
 /* Preliminary initialization of Tile 0 */
-void intel_root_gt_init_early(struct drm_i915_private *i915)
+int intel_root_gt_init_early(struct drm_i915_private *i915)
 {
        struct intel_gt *gt = to_gt(i915);
 
        gt->i915 = i915;
        gt->uncore = &i915->uncore;
+       gt->irq_lock = drmm_kzalloc(&i915->drm, sizeof(*gt->irq_lock), GFP_KERNEL);
+       if (!gt->irq_lock)
+               return -ENOMEM;
 
        intel_gt_common_init_early(gt);
+
+       return 0;
 }
 
 static int intel_gt_probe_lmem(struct intel_gt *gt)
@@ -782,12 +787,18 @@ static int intel_gt_tile_setup(struct intel_gt *gt, phys_addr_t phys_addr)
 
        if (!gt_is_root(gt)) {
                struct intel_uncore *uncore;
+               spinlock_t *irq_lock;
 
                uncore = drmm_kzalloc(&gt->i915->drm, sizeof(*uncore), GFP_KERNEL);
                if (!uncore)
                        return -ENOMEM;
 
+               irq_lock = drmm_kzalloc(&gt->i915->drm, sizeof(*irq_lock), GFP_KERNEL);
+               if (!irq_lock)
+                       return -ENOMEM;
+
                gt->uncore = uncore;
+               gt->irq_lock = irq_lock;
 
                intel_gt_common_init_early(gt);
        }
index c9a359f35d0ff03aca3eb6d6e7fb041f4bffaadb..2ee582e287c8dd7d698d695fd96a77f9f67ad3ee 100644 (file)
@@ -45,7 +45,7 @@ static inline struct intel_gt *gsc_to_gt(struct intel_gsc *gsc)
 }
 
 void intel_gt_common_init_early(struct intel_gt *gt);
-void intel_root_gt_init_early(struct drm_i915_private *i915);
+int intel_root_gt_init_early(struct drm_i915_private *i915);
 int intel_gt_assign_ggtt(struct intel_gt *gt);
 int intel_gt_init_mmio(struct intel_gt *gt);
 int __must_check intel_gt_init_hw(struct intel_gt *gt);
index 3a72d4fd0214e41fa60924b45697265b8143b1bd..0dfd0c42d00da81e3537fdd674c7bd8d443510a3 100644 (file)
@@ -29,7 +29,7 @@ gen11_gt_engine_identity(struct intel_gt *gt,
        u32 timeout_ts;
        u32 ident;
 
-       lockdep_assert_held(&gt->irq_lock);
+       lockdep_assert_held(gt->irq_lock);
 
        raw_reg_write(regs, GEN11_IIR_REG_SELECTOR(bank), BIT(bit));
 
@@ -120,7 +120,7 @@ gen11_gt_bank_handler(struct intel_gt *gt, const unsigned int bank)
        unsigned long intr_dw;
        unsigned int bit;
 
-       lockdep_assert_held(&gt->irq_lock);
+       lockdep_assert_held(gt->irq_lock);
 
        intr_dw = raw_reg_read(regs, GEN11_GT_INTR_DW(bank));
 
@@ -138,14 +138,14 @@ void gen11_gt_irq_handler(struct intel_gt *gt, const u32 master_ctl)
 {
        unsigned int bank;
 
-       spin_lock(&gt->irq_lock);
+       spin_lock(gt->irq_lock);
 
        for (bank = 0; bank < 2; bank++) {
                if (master_ctl & GEN11_GT_DW_IRQ(bank))
                        gen11_gt_bank_handler(gt, bank);
        }
 
-       spin_unlock(&gt->irq_lock);
+       spin_unlock(gt->irq_lock);
 }
 
 bool gen11_gt_reset_one_iir(struct intel_gt *gt,
@@ -154,7 +154,7 @@ bool gen11_gt_reset_one_iir(struct intel_gt *gt,
        void __iomem * const regs = gt->uncore->regs;
        u32 dw;
 
-       lockdep_assert_held(&gt->irq_lock);
+       lockdep_assert_held(gt->irq_lock);
 
        dw = raw_reg_read(regs, GEN11_GT_INTR_DW(bank));
        if (dw & BIT(bit)) {
@@ -310,9 +310,9 @@ static void gen7_parity_error_irq_handler(struct intel_gt *gt, u32 iir)
        if (!HAS_L3_DPF(gt->i915))
                return;
 
-       spin_lock(&gt->irq_lock);
+       spin_lock(gt->irq_lock);
        gen5_gt_disable_irq(gt, GT_PARITY_ERROR(gt->i915));
-       spin_unlock(&gt->irq_lock);
+       spin_unlock(gt->irq_lock);
 
        if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT_S1)
                gt->i915->l3_parity.which_slice |= 1 << 1;
@@ -434,7 +434,7 @@ static void gen5_gt_update_irq(struct intel_gt *gt,
                               u32 interrupt_mask,
                               u32 enabled_irq_mask)
 {
-       lockdep_assert_held(&gt->irq_lock);
+       lockdep_assert_held(gt->irq_lock);
 
        GEM_BUG_ON(enabled_irq_mask & ~interrupt_mask);
 
index 11060f5a4c8984c8aa98f0f598410d243a4335f6..52f2a28b2058e812bba5263a9fcd724b1f449db2 100644 (file)
@@ -37,7 +37,7 @@ static void gen6_gt_pm_update_irq(struct intel_gt *gt,
 
        WARN_ON(enabled_irq_mask & ~interrupt_mask);
 
-       lockdep_assert_held(&gt->irq_lock);
+       lockdep_assert_held(gt->irq_lock);
 
        new_val = gt->pm_imr;
        new_val &= ~interrupt_mask;
@@ -64,7 +64,7 @@ void gen6_gt_pm_reset_iir(struct intel_gt *gt, u32 reset_mask)
        struct intel_uncore *uncore = gt->uncore;
        i915_reg_t reg = GRAPHICS_VER(gt->i915) >= 8 ? GEN8_GT_IIR(2) : GEN6_PMIIR;
 
-       lockdep_assert_held(&gt->irq_lock);
+       lockdep_assert_held(gt->irq_lock);
 
        intel_uncore_write(uncore, reg, reset_mask);
        intel_uncore_write(uncore, reg, reset_mask);
@@ -92,7 +92,7 @@ static void write_pm_ier(struct intel_gt *gt)
 
 void gen6_gt_pm_enable_irq(struct intel_gt *gt, u32 enable_mask)
 {
-       lockdep_assert_held(&gt->irq_lock);
+       lockdep_assert_held(gt->irq_lock);
 
        gt->pm_ier |= enable_mask;
        write_pm_ier(gt);
@@ -101,7 +101,7 @@ void gen6_gt_pm_enable_irq(struct intel_gt *gt, u32 enable_mask)
 
 void gen6_gt_pm_disable_irq(struct intel_gt *gt, u32 disable_mask)
 {
-       lockdep_assert_held(&gt->irq_lock);
+       lockdep_assert_held(gt->irq_lock);
 
        gt->pm_ier &= ~disable_mask;
        gen6_gt_pm_mask_irq(gt, disable_mask);
index 726695936a794683167711f7cd385def03c2800c..184ee9b11a4dac78508c8a24585bcc3a02dfef24 100644 (file)
@@ -163,7 +163,7 @@ struct intel_gt {
        struct intel_rc6 rc6;
        struct intel_rps rps;
 
-       spinlock_t irq_lock;
+       spinlock_t *irq_lock;
        u32 gt_imr;
        u32 pm_ier;
        u32 pm_imr;
index 6fadde4ee7bf76abf317c145c74362b797dadd5c..6b86250c31ab56c44dcbd7cee0e2599defe300a8 100644 (file)
@@ -194,9 +194,9 @@ static void rps_enable_interrupts(struct intel_rps *rps)
 
        rps_reset_ei(rps);
 
-       spin_lock_irq(&gt->irq_lock);
+       spin_lock_irq(gt->irq_lock);
        gen6_gt_pm_enable_irq(gt, rps->pm_events);
-       spin_unlock_irq(&gt->irq_lock);
+       spin_unlock_irq(gt->irq_lock);
 
        intel_uncore_write(gt->uncore,
                           GEN6_PMINTRMSK, rps_pm_mask(rps, rps->last_freq));
@@ -217,14 +217,14 @@ static void rps_reset_interrupts(struct intel_rps *rps)
 {
        struct intel_gt *gt = rps_to_gt(rps);
 
-       spin_lock_irq(&gt->irq_lock);
+       spin_lock_irq(gt->irq_lock);
        if (GRAPHICS_VER(gt->i915) >= 11)
                gen11_rps_reset_interrupts(rps);
        else
                gen6_rps_reset_interrupts(rps);
 
        rps->pm_iir = 0;
-       spin_unlock_irq(&gt->irq_lock);
+       spin_unlock_irq(gt->irq_lock);
 }
 
 static void rps_disable_interrupts(struct intel_rps *rps)
@@ -234,9 +234,9 @@ static void rps_disable_interrupts(struct intel_rps *rps)
        intel_uncore_write(gt->uncore,
                           GEN6_PMINTRMSK, rps_pm_sanitize_mask(rps, ~0u));
 
-       spin_lock_irq(&gt->irq_lock);
+       spin_lock_irq(gt->irq_lock);
        gen6_gt_pm_disable_irq(gt, GEN6_PM_RPS_EVENTS);
-       spin_unlock_irq(&gt->irq_lock);
+       spin_unlock_irq(gt->irq_lock);
 
        intel_synchronize_irq(gt->i915);
 
@@ -1797,10 +1797,10 @@ static void rps_work(struct work_struct *work)
        int new_freq, adj, min, max;
        u32 pm_iir = 0;
 
-       spin_lock_irq(&gt->irq_lock);
+       spin_lock_irq(gt->irq_lock);
        pm_iir = fetch_and_zero(&rps->pm_iir) & rps->pm_events;
        client_boost = atomic_read(&rps->num_waiters);
-       spin_unlock_irq(&gt->irq_lock);
+       spin_unlock_irq(gt->irq_lock);
 
        /* Make sure we didn't queue anything we're not going to process. */
        if (!pm_iir && !client_boost)
@@ -1873,9 +1873,9 @@ static void rps_work(struct work_struct *work)
        mutex_unlock(&rps->lock);
 
 out:
-       spin_lock_irq(&gt->irq_lock);
+       spin_lock_irq(gt->irq_lock);
        gen6_gt_pm_unmask_irq(gt, rps->pm_events);
-       spin_unlock_irq(&gt->irq_lock);
+       spin_unlock_irq(gt->irq_lock);
 }
 
 void gen11_rps_irq_handler(struct intel_rps *rps, u32 pm_iir)
@@ -1883,7 +1883,7 @@ void gen11_rps_irq_handler(struct intel_rps *rps, u32 pm_iir)
        struct intel_gt *gt = rps_to_gt(rps);
        const u32 events = rps->pm_events & pm_iir;
 
-       lockdep_assert_held(&gt->irq_lock);
+       lockdep_assert_held(gt->irq_lock);
 
        if (unlikely(!events))
                return;
@@ -1903,7 +1903,7 @@ void gen6_rps_irq_handler(struct intel_rps *rps, u32 pm_iir)
 
        events = pm_iir & rps->pm_events;
        if (events) {
-               spin_lock(&gt->irq_lock);
+               spin_lock(gt->irq_lock);
 
                GT_TRACE(gt, "irq events:%x\n", events);
 
@@ -1911,7 +1911,7 @@ void gen6_rps_irq_handler(struct intel_rps *rps, u32 pm_iir)
                rps->pm_iir |= events;
 
                schedule_work(&rps->work);
-               spin_unlock(&gt->irq_lock);
+               spin_unlock(gt->irq_lock);
        }
 
        if (GRAPHICS_VER(gt->i915) >= 8)
index 8c5c519457cc254b247cac5e882988568ac03d6d..5516e9c363a48ecbf3cda7eb5738c8ceb3c6b029 100644 (file)
@@ -21,6 +21,7 @@ int intel_sa_mediagt_setup(struct intel_gt *gt, phys_addr_t phys_addr,
 
        uncore->gsi_offset = gsi_offset;
 
+       gt->irq_lock = to_gt(i915)->irq_lock;
        intel_gt_common_init_early(gt);
        intel_uncore_init_early(uncore, gt);
 
index 24451d000a6a615ad848fc96014f93e8d89913d8..bac06e3d6f2cc9cd40360763d46eccbc2d5ef39e 100644 (file)
@@ -82,9 +82,9 @@ static void gen9_reset_guc_interrupts(struct intel_guc *guc)
 
        assert_rpm_wakelock_held(&gt->i915->runtime_pm);
 
-       spin_lock_irq(&gt->irq_lock);
+       spin_lock_irq(gt->irq_lock);
        gen6_gt_pm_reset_iir(gt, gt->pm_guc_events);
-       spin_unlock_irq(&gt->irq_lock);
+       spin_unlock_irq(gt->irq_lock);
 }
 
 static void gen9_enable_guc_interrupts(struct intel_guc *guc)
@@ -93,11 +93,11 @@ static void gen9_enable_guc_interrupts(struct intel_guc *guc)
 
        assert_rpm_wakelock_held(&gt->i915->runtime_pm);
 
-       spin_lock_irq(&gt->irq_lock);
+       spin_lock_irq(gt->irq_lock);
        WARN_ON_ONCE(intel_uncore_read(gt->uncore, GEN8_GT_IIR(2)) &
                     gt->pm_guc_events);
        gen6_gt_pm_enable_irq(gt, gt->pm_guc_events);
-       spin_unlock_irq(&gt->irq_lock);
+       spin_unlock_irq(gt->irq_lock);
 }
 
 static void gen9_disable_guc_interrupts(struct intel_guc *guc)
@@ -106,11 +106,11 @@ static void gen9_disable_guc_interrupts(struct intel_guc *guc)
 
        assert_rpm_wakelock_held(&gt->i915->runtime_pm);
 
-       spin_lock_irq(&gt->irq_lock);
+       spin_lock_irq(gt->irq_lock);
 
        gen6_gt_pm_disable_irq(gt, gt->pm_guc_events);
 
-       spin_unlock_irq(&gt->irq_lock);
+       spin_unlock_irq(gt->irq_lock);
        intel_synchronize_irq(gt->i915);
 
        gen9_reset_guc_interrupts(guc);
@@ -120,9 +120,9 @@ static void gen11_reset_guc_interrupts(struct intel_guc *guc)
 {
        struct intel_gt *gt = guc_to_gt(guc);
 
-       spin_lock_irq(&gt->irq_lock);
+       spin_lock_irq(gt->irq_lock);
        gen11_gt_reset_one_iir(gt, 0, GEN11_GUC);
-       spin_unlock_irq(&gt->irq_lock);
+       spin_unlock_irq(gt->irq_lock);
 }
 
 static void gen11_enable_guc_interrupts(struct intel_guc *guc)
@@ -130,25 +130,25 @@ static void gen11_enable_guc_interrupts(struct intel_guc *guc)
        struct intel_gt *gt = guc_to_gt(guc);
        u32 events = REG_FIELD_PREP(ENGINE1_MASK, GUC_INTR_GUC2HOST);
 
-       spin_lock_irq(&gt->irq_lock);
+       spin_lock_irq(gt->irq_lock);
        WARN_ON_ONCE(gen11_gt_reset_one_iir(gt, 0, GEN11_GUC));
        intel_uncore_write(gt->uncore,
                           GEN11_GUC_SG_INTR_ENABLE, events);
        intel_uncore_write(gt->uncore,
                           GEN11_GUC_SG_INTR_MASK, ~events);
-       spin_unlock_irq(&gt->irq_lock);
+       spin_unlock_irq(gt->irq_lock);
 }
 
 static void gen11_disable_guc_interrupts(struct intel_guc *guc)
 {
        struct intel_gt *gt = guc_to_gt(guc);
 
-       spin_lock_irq(&gt->irq_lock);
+       spin_lock_irq(gt->irq_lock);
 
        intel_uncore_write(gt->uncore, GEN11_GUC_SG_INTR_MASK, ~0);
        intel_uncore_write(gt->uncore, GEN11_GUC_SG_INTR_ENABLE, 0);
 
-       spin_unlock_irq(&gt->irq_lock);
+       spin_unlock_irq(gt->irq_lock);
        intel_synchronize_irq(gt->i915);
 
        gen11_reset_guc_interrupts(guc);
index 707c2253e96aca929382ba9b7e88bf96d24e08d3..22ba66e48a9b01bacd0edd16bd05df59eb5ab99b 100644 (file)
@@ -1537,8 +1537,8 @@ void intel_guc_submission_reset_prepare(struct intel_guc *guc)
        __reset_guc_busyness_stats(guc);
 
        /* Flush IRQ handler */
-       spin_lock_irq(&guc_to_gt(guc)->irq_lock);
-       spin_unlock_irq(&guc_to_gt(guc)->irq_lock);
+       spin_lock_irq(guc_to_gt(guc)->irq_lock);
+       spin_unlock_irq(guc_to_gt(guc)->irq_lock);
 
        guc_flush_submissions(guc);
        guc_flush_destroyed_contexts(guc);
index abf4e142596d055a44249bf6aba01dd12ed3426e..dbd048b77e193be9772d83701c717ed554461356 100644 (file)
@@ -245,9 +245,9 @@ static int guc_enable_communication(struct intel_guc *guc)
        intel_guc_enable_interrupts(guc);
 
        /* check for CT messages received before we enabled interrupts */
-       spin_lock_irq(&gt->irq_lock);
+       spin_lock_irq(gt->irq_lock);
        intel_guc_ct_event_handler(&guc->ct);
-       spin_unlock_irq(&gt->irq_lock);
+       spin_unlock_irq(gt->irq_lock);
 
        drm_dbg(&i915->drm, "GuC communication enabled\n");
 
index 07f51a973783163afc5c2ea78ae20c349c795cd8..01f42777b6e32f347f8816cf219337fbd08d923d 100644 (file)
@@ -370,7 +370,9 @@ static int i915_driver_early_probe(struct drm_i915_private *dev_priv)
 
        intel_wopcm_init_early(&dev_priv->wopcm);
 
-       intel_root_gt_init_early(dev_priv);
+       ret = intel_root_gt_init_early(dev_priv);
+       if (ret < 0)
+               goto err_rootgt;
 
        i915_drm_clients_init(&dev_priv->clients, dev_priv);
 
@@ -395,6 +397,7 @@ err_gem:
        i915_gem_cleanup_early(dev_priv);
        intel_gt_driver_late_release_all(dev_priv);
        i915_drm_clients_fini(&dev_priv->clients);
+err_rootgt:
        intel_region_ttm_device_fini(dev_priv);
 err_ttm:
        vlv_suspend_cleanup(dev_priv);
index 73cebc6aa65072040408a7291b335451e1956873..5652acdcf910485d08d12bb6d7515c9c2b4b028e 100644 (file)
@@ -1104,9 +1104,9 @@ static void ivb_parity_work(struct work_struct *work)
 
 out:
        drm_WARN_ON(&dev_priv->drm, dev_priv->l3_parity.which_slice);
-       spin_lock_irq(&gt->irq_lock);
+       spin_lock_irq(gt->irq_lock);
        gen5_gt_enable_irq(gt, GT_PARITY_ERROR(dev_priv));
-       spin_unlock_irq(&gt->irq_lock);
+       spin_unlock_irq(gt->irq_lock);
 
        mutex_unlock(&dev_priv->drm.struct_mutex);
 }
index 17109c513259a0c890bb40beeac97ffabd489837..69cdaaddc4a9067192e8550a1f4d0c850da7e4e7 100644 (file)
@@ -169,11 +169,11 @@ static void pxp_queue_termination(struct intel_pxp *pxp)
         * We want to get the same effect as if we received a termination
         * interrupt, so just pretend that we did.
         */
-       spin_lock_irq(&gt->irq_lock);
+       spin_lock_irq(gt->irq_lock);
        intel_pxp_mark_termination_in_progress(pxp);
        pxp->session_events |= PXP_TERMINATION_REQUEST;
        queue_work(system_unbound_wq, &pxp->session_work);
-       spin_unlock_irq(&gt->irq_lock);
+       spin_unlock_irq(gt->irq_lock);
 }
 
 static bool pxp_component_bound(struct intel_pxp *pxp)
index c9da1015eb4262fd1b9f418f658af20fd61e1dca..b2d1a0f9e7af5bb6a3c29b7ef8a57da2c792831d 100644 (file)
@@ -46,9 +46,9 @@ static int pxp_terminate_set(void *data, u64 val)
                return -ENODEV;
 
        /* simulate a termination interrupt */
-       spin_lock_irq(&gt->irq_lock);
+       spin_lock_irq(gt->irq_lock);
        intel_pxp_irq_handler(pxp, GEN12_DISPLAY_PXP_STATE_TERMINATED_INTERRUPT);
-       spin_unlock_irq(&gt->irq_lock);
+       spin_unlock_irq(gt->irq_lock);
 
        if (!wait_for_completion_timeout(&pxp->termination,
                                         msecs_to_jiffies(100)))
index 04745f9144074dc8e270608d75bf813aa2ab015c..c28be430718aebb71983775c3f300ace84abd982 100644 (file)
@@ -25,7 +25,7 @@ void intel_pxp_irq_handler(struct intel_pxp *pxp, u16 iir)
        if (GEM_WARN_ON(!intel_pxp_is_enabled(pxp)))
                return;
 
-       lockdep_assert_held(&gt->irq_lock);
+       lockdep_assert_held(gt->irq_lock);
 
        if (unlikely(!iir))
                return;
@@ -55,16 +55,16 @@ static inline void __pxp_set_interrupts(struct intel_gt *gt, u32 interrupts)
 
 static inline void pxp_irq_reset(struct intel_gt *gt)
 {
-       spin_lock_irq(&gt->irq_lock);
+       spin_lock_irq(gt->irq_lock);
        gen11_gt_reset_one_iir(gt, 0, GEN11_KCR);
-       spin_unlock_irq(&gt->irq_lock);
+       spin_unlock_irq(gt->irq_lock);
 }
 
 void intel_pxp_irq_enable(struct intel_pxp *pxp)
 {
        struct intel_gt *gt = pxp_to_gt(pxp);
 
-       spin_lock_irq(&gt->irq_lock);
+       spin_lock_irq(gt->irq_lock);
 
        if (!pxp->irq_enabled)
                WARN_ON_ONCE(gen11_gt_reset_one_iir(gt, 0, GEN11_KCR));
@@ -72,7 +72,7 @@ void intel_pxp_irq_enable(struct intel_pxp *pxp)
        __pxp_set_interrupts(gt, GEN12_PXP_INTERRUPTS);
        pxp->irq_enabled = true;
 
-       spin_unlock_irq(&gt->irq_lock);
+       spin_unlock_irq(gt->irq_lock);
 }
 
 void intel_pxp_irq_disable(struct intel_pxp *pxp)
@@ -88,12 +88,12 @@ void intel_pxp_irq_disable(struct intel_pxp *pxp)
         */
        GEM_WARN_ON(intel_pxp_is_active(pxp));
 
-       spin_lock_irq(&gt->irq_lock);
+       spin_lock_irq(gt->irq_lock);
 
        pxp->irq_enabled = false;
        __pxp_set_interrupts(gt, 0);
 
-       spin_unlock_irq(&gt->irq_lock);
+       spin_unlock_irq(gt->irq_lock);
        intel_synchronize_irq(gt->i915);
 
        pxp_irq_reset(gt);
index 92b00b4de240f4801fa460bd25dad1669ee4fe8f..1bb5b5249157f1e939ea64af98adfe3a7cb07c68 100644 (file)
@@ -144,9 +144,9 @@ void intel_pxp_session_work(struct work_struct *work)
        intel_wakeref_t wakeref;
        u32 events = 0;
 
-       spin_lock_irq(&gt->irq_lock);
+       spin_lock_irq(gt->irq_lock);
        events = fetch_and_zero(&pxp->session_events);
-       spin_unlock_irq(&gt->irq_lock);
+       spin_unlock_irq(gt->irq_lock);
 
        if (!events)
                return;