drm/xe: Fix lockdep warning in xe_force_wake calls
authorAravind Iddamsetty <aravind.iddamsetty@linux.intel.com>
Wed, 6 Dec 2023 09:33:27 +0000 (15:03 +0530)
committerRodrigo Vivi <rodrigo.vivi@intel.com>
Thu, 21 Dec 2023 16:46:08 +0000 (11:46 -0500)
Use spin_lock_irqsave, spin_unlock_irqrestore

Fix for below:
[13994.811263] ========================================================
[13994.811295] WARNING: possible irq lock inversion dependency detected
[13994.811326] 6.6.0-rc3-xe #2 Tainted: G     U
[13994.811358] --------------------------------------------------------
[13994.811388] swapper/0/0 just changed the state of lock:
[13994.811416] ffff895c7e044db8 (&cpuctx_lock){-...}-{2:2}, at:
__perf_event_read+0xb7/0x3a0
[13994.811494] but this lock took another, HARDIRQ-unsafe lock in the
past:
[13994.811528]  (&fw->lock){+.+.}-{2:2}
[13994.811544]

               and interrupts could create inverse lock ordering between
them.

[13994.811606]
               other info that might help us debug this:
[13994.811636]  Possible interrupt unsafe locking scenario:

[13994.811667]        CPU0                    CPU1
[13994.811691]        ----                    ----
[13994.811715]   lock(&fw->lock);
[13994.811744]                                local_irq_disable();
[13994.811773]                                lock(&cpuctx_lock);
[13994.811810]                                lock(&fw->lock);
[13994.811846]   <Interrupt>
[13994.811865]     lock(&cpuctx_lock);
[13994.811895]
                *** DEADLOCK ***

v2: Use spin_lock in atomic context and spin_lock_irq in a non atomic
context (Matthew Brost)

v3: just use spin_lock_irqsave/restore

Cc: Matthew Brost <matthew.brost@intel.com>
Cc: Anshuman Gupta <anshuman.gupta@intel.com>
Cc: Ville Syrjala <ville.syrjala@linux.intel.com>
Reviewed-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
Signed-off-by: Aravind Iddamsetty <aravind.iddamsetty@linux.intel.com>
Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
drivers/gpu/drm/xe/xe_force_wake.c

index 32d6c4dd28079b12dce09074d1ee82560891ec3c..9bbe8a5040daef5df459d0cb8be631522dcfd55a 100644 (file)
@@ -145,9 +145,10 @@ int xe_force_wake_get(struct xe_force_wake *fw,
        struct xe_gt *gt = fw_to_gt(fw);
        struct xe_force_wake_domain *domain;
        enum xe_force_wake_domains tmp, woken = 0;
+       unsigned long flags;
        int ret, ret2 = 0;
 
-       spin_lock(&fw->lock);
+       spin_lock_irqsave(&fw->lock, flags);
        for_each_fw_domain_masked(domain, domains, fw, tmp) {
                if (!domain->ref++) {
                        woken |= BIT(domain->id);
@@ -162,7 +163,7 @@ int xe_force_wake_get(struct xe_force_wake *fw,
                                   domain->id, ret);
        }
        fw->awake_domains |= woken;
-       spin_unlock(&fw->lock);
+       spin_unlock_irqrestore(&fw->lock, flags);
 
        return ret2;
 }
@@ -174,9 +175,10 @@ int xe_force_wake_put(struct xe_force_wake *fw,
        struct xe_gt *gt = fw_to_gt(fw);
        struct xe_force_wake_domain *domain;
        enum xe_force_wake_domains tmp, sleep = 0;
+       unsigned long flags;
        int ret, ret2 = 0;
 
-       spin_lock(&fw->lock);
+       spin_lock_irqsave(&fw->lock, flags);
        for_each_fw_domain_masked(domain, domains, fw, tmp) {
                if (!--domain->ref) {
                        sleep |= BIT(domain->id);
@@ -191,7 +193,7 @@ int xe_force_wake_put(struct xe_force_wake *fw,
                                   domain->id, ret);
        }
        fw->awake_domains &= ~sleep;
-       spin_unlock(&fw->lock);
+       spin_unlock_irqrestore(&fw->lock, flags);
 
        return ret2;
 }