drm/panthor: Call panthor_sched_post_reset() even if the reset failed
authorBoris Brezillon <boris.brezillon@collabora.com>
Thu, 2 May 2024 18:38:12 +0000 (20:38 +0200)
committerBoris Brezillon <boris.brezillon@collabora.com>
Mon, 13 May 2024 07:52:22 +0000 (09:52 +0200)
We need to undo what was done in panthor_sched_pre_reset() even if the
reset failed. We just flag all previously running groups as terminated
when that happens to unblock things.

Signed-off-by: Boris Brezillon <boris.brezillon@collabora.com>
Reviewed-by: Steven Price <steven.price@arm.com>
Reviewed-by: Liviu Dudau <liviu.dudau@arm.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20240502183813.1612017-5-boris.brezillon@collabora.com
drivers/gpu/drm/panthor/panthor_device.c
drivers/gpu/drm/panthor/panthor_sched.c
drivers/gpu/drm/panthor/panthor_sched.h

index 4c5b54e7abb7b65b6c00391411b6ba05d72d436b..4082c8f2951dfdace7f73a24d6fe34e9e7f920eb 100644 (file)
@@ -129,13 +129,8 @@ static void panthor_device_reset_work(struct work_struct *work)
        panthor_gpu_l2_power_on(ptdev);
        panthor_mmu_post_reset(ptdev);
        ret = panthor_fw_post_reset(ptdev);
-       if (ret)
-               goto out_dev_exit;
-
        atomic_set(&ptdev->reset.pending, 0);
-       panthor_sched_post_reset(ptdev);
-
-out_dev_exit:
+       panthor_sched_post_reset(ptdev, ret != 0);
        drm_dev_exit(cookie);
 
        if (ret) {
index 9308596e0812267580359d25ea8ecab64c2761db..79ffcbc41d78e568d5394b6c47b0b7b871839288 100644 (file)
@@ -2733,15 +2733,22 @@ void panthor_sched_pre_reset(struct panthor_device *ptdev)
        mutex_unlock(&sched->reset.lock);
 }
 
-void panthor_sched_post_reset(struct panthor_device *ptdev)
+void panthor_sched_post_reset(struct panthor_device *ptdev, bool reset_failed)
 {
        struct panthor_scheduler *sched = ptdev->scheduler;
        struct panthor_group *group, *group_tmp;
 
        mutex_lock(&sched->reset.lock);
 
-       list_for_each_entry_safe(group, group_tmp, &sched->reset.stopped_groups, run_node)
+       list_for_each_entry_safe(group, group_tmp, &sched->reset.stopped_groups, run_node) {
+               /* Consider all previously running group as terminated if the
+                * reset failed.
+                */
+               if (reset_failed)
+                       group->state = PANTHOR_CS_GROUP_TERMINATED;
+
                panthor_group_start(group);
+       }
 
        /* We're done resetting the GPU, clear the reset.in_progress bit so we can
         * kick the scheduler.
@@ -2749,9 +2756,11 @@ void panthor_sched_post_reset(struct panthor_device *ptdev)
        atomic_set(&sched->reset.in_progress, false);
        mutex_unlock(&sched->reset.lock);
 
-       sched_queue_delayed_work(sched, tick, 0);
-
-       sched_queue_work(sched, sync_upd);
+       /* No need to queue a tick and update syncs if the reset failed. */
+       if (!reset_failed) {
+               sched_queue_delayed_work(sched, tick, 0);
+               sched_queue_work(sched, sync_upd);
+       }
 }
 
 static void group_sync_upd_work(struct work_struct *work)
index 66438b1f331f6915054e6842f22b9d99f1c84f97..3a30d2328b308df6eb5bf6204b07fa2ec33d8539 100644 (file)
@@ -40,7 +40,7 @@ void panthor_group_pool_destroy(struct panthor_file *pfile);
 int panthor_sched_init(struct panthor_device *ptdev);
 void panthor_sched_unplug(struct panthor_device *ptdev);
 void panthor_sched_pre_reset(struct panthor_device *ptdev);
-void panthor_sched_post_reset(struct panthor_device *ptdev);
+void panthor_sched_post_reset(struct panthor_device *ptdev, bool reset_failed);
 void panthor_sched_suspend(struct panthor_device *ptdev);
 void panthor_sched_resume(struct panthor_device *ptdev);