DIV_ROUND_UP(size, NUM_BYTES_PER_CCS_BYTE) : 0;
 }
 
+bool xe_device_mem_access_ongoing(struct xe_device *xe)
+{
+       if (xe_pm_read_callback_task(xe) != NULL)
+               return true;
+
+       return atomic_read(&xe->mem_access.ref);
+}
+
+void xe_device_assert_mem_access(struct xe_device *xe)
+{
+       XE_WARN_ON(!xe_device_mem_access_ongoing(xe));
+}
+
 bool xe_device_mem_access_get_if_ongoing(struct xe_device *xe)
 {
-       return atomic_inc_not_zero(&xe->mem_access.ref);
+       bool active;
+
+       if (xe_pm_read_callback_task(xe) == current)
+               return true;
+
+       active = xe_pm_runtime_get_if_active(xe);
+       if (active) {
+               int ref = atomic_inc_return(&xe->mem_access.ref);
+
+               XE_WARN_ON(ref == S32_MAX);
+       }
+
+       return active;
 }
 
 void xe_device_mem_access_get(struct xe_device *xe)
 {
-       bool resumed = xe_pm_runtime_resume_if_suspended(xe);
-       int ref = atomic_inc_return(&xe->mem_access.ref);
-
-       if (ref == 1)
-               xe->mem_access.hold_rpm = xe_pm_runtime_get_if_active(xe);
+       int ref;
+
+       /*
+        * This looks racy, but should be fine since the pm_callback_task only
+        * transitions from NULL -> current (and back to NULL again), during the
+        * runtime_resume() or runtime_suspend() callbacks, for which there can
+        * only be a single one running for our device. We only need to prevent
+        * recursively calling the runtime_get or runtime_put from those
+        * callbacks, as well as preventing triggering any access_ongoing
+        * asserts.
+        */
+       if (xe_pm_read_callback_task(xe) == current)
+               return;
 
-       /* The usage counter increased if device was immediately resumed */
-       if (resumed)
-               xe_pm_runtime_put(xe);
+       xe_pm_runtime_get(xe);
+       ref = atomic_inc_return(&xe->mem_access.ref);
 
        XE_WARN_ON(ref == S32_MAX);
 }
 
 void xe_device_mem_access_put(struct xe_device *xe)
 {
-       bool hold = xe->mem_access.hold_rpm;
-       int ref = atomic_dec_return(&xe->mem_access.ref);
+       int ref;
+
+       if (xe_pm_read_callback_task(xe) == current)
+               return;
 
-       if (!ref && hold)
-               xe_pm_runtime_put(xe);
+       ref = atomic_dec_return(&xe->mem_access.ref);
+       xe_pm_runtime_put(xe);
 
        XE_WARN_ON(ref < 0);
 }
 
 bool xe_device_mem_access_get_if_ongoing(struct xe_device *xe);
 void xe_device_mem_access_put(struct xe_device *xe);
 
-static inline bool xe_device_mem_access_ongoing(struct xe_device *xe)
-{
-       return atomic_read(&xe->mem_access.ref);
-}
-
-static inline void xe_device_assert_mem_access(struct xe_device *xe)
-{
-       XE_WARN_ON(!xe_device_mem_access_ongoing(xe));
-}
+void xe_device_assert_mem_access(struct xe_device *xe);
+bool xe_device_mem_access_ongoing(struct xe_device *xe);
 
 static inline bool xe_device_in_fault_mode(struct xe_device *xe)
 {
 
        struct {
                /** @ref: ref count of memory accesses */
                atomic_t ref;
-               /** @hold_rpm: need to put rpm ref back at the end */
-               bool hold_rpm;
        } mem_access;
 
        /** @d3cold: Encapsulate d3cold related stuff */
                struct mutex lock;
        } d3cold;
 
+       /**
+        * @pm_callback_task: Track the active task that is running in either
+        * the runtime_suspend or runtime_resume callbacks.
+        */
+       struct task_struct *pm_callback_task;
+
        /* For pcode */
        struct mutex sb_lock;
 
 
 #include "xe_guc.h"
 #include "xe_guc_submit.h"
 #include "xe_map.h"
+#include "xe_pm.h"
 #include "xe_trace.h"
 
 /* Used when a CT send wants to block and / or receive data */
 void xe_guc_ct_fast_path(struct xe_guc_ct *ct)
 {
        struct xe_device *xe = ct_to_xe(ct);
+       bool ongoing;
        int len;
 
-       if (!xe_device_mem_access_get_if_ongoing(xe))
+       ongoing = xe_device_mem_access_get_if_ongoing(ct_to_xe(ct));
+       if (!ongoing && xe_pm_read_callback_task(ct_to_xe(ct)) == NULL)
                return;
 
        spin_lock(&ct->fast_lock);
        } while (len > 0);
        spin_unlock(&ct->fast_lock);
 
-       xe_device_mem_access_put(xe);
+       if (ongoing)
+               xe_device_mem_access_put(xe);
 }
 
 /* Returns less than zero on error, 0 on done, 1 on more available */
 static void g2h_worker_func(struct work_struct *w)
 {
        struct xe_guc_ct *ct = container_of(w, struct xe_guc_ct, g2h_worker);
+       bool ongoing;
        int ret;
 
-       xe_device_mem_access_get(ct_to_xe(ct));
+       /*
+        * Normal users must always hold mem_access.ref around CT calls. However
+        * during the runtime pm callbacks we rely on CT to talk to the GuC, but
+        * at this stage we can't rely on mem_access.ref and even the
+        * callback_task will be different than current.  For such cases we just
+        * need to ensure we always process the responses from any blocking
+        * ct_send requests or where we otherwise expect some response when
+        * initiated from those callbacks (which will need to wait for the below
+        * dequeue_one_g2h()).  The dequeue_one_g2h() will gracefully fail if
+        * the device has suspended to the point that the CT communication has
+        * been disabled.
+        *
+        * If we are inside the runtime pm callback, we can be the only task
+        * still issuing CT requests (since that requires having the
+        * mem_access.ref).  It seems like it might in theory be possible to
+        * receive unsolicited events from the GuC just as we are
+        * suspending-resuming, but those will currently anyway be lost when
+        * eventually exiting from suspend, hence no need to wake up the device
+        * here. If we ever need something stronger than get_if_ongoing() then
+        * we need to be careful with blocking the pm callbacks from getting CT
+        * responses, if the worker here is blocked on those callbacks
+        * completing, creating a deadlock.
+        */
+       ongoing = xe_device_mem_access_get_if_ongoing(ct_to_xe(ct));
+       if (!ongoing && xe_pm_read_callback_task(ct_to_xe(ct)) == NULL)
+               return;
+
        do {
                mutex_lock(&ct->lock);
                ret = dequeue_one_g2h(ct);
                        kick_reset(ct);
                }
        } while (ret == 1);
-       xe_device_mem_access_put(ct_to_xe(ct));
+
+       if (ongoing)
+               xe_device_mem_access_put(ct_to_xe(ct));
 }
 
 static void guc_ctb_snapshot_capture(struct xe_device *xe, struct guc_ctb *ctb,
 
        pm_runtime_forbid(dev);
 }
 
+static void xe_pm_write_callback_task(struct xe_device *xe,
+                                     struct task_struct *task)
+{
+       WRITE_ONCE(xe->pm_callback_task, task);
+
+       /*
+        * Just in case it's somehow possible for our writes to be reordered to
+        * the extent that something else re-uses the task written in
+        * pm_callback_task. For example after returning from the callback, but
+        * before the reordered write that resets pm_callback_task back to NULL.
+        */
+       smp_mb(); /* pairs with xe_pm_read_callback_task */
+}
+
+struct task_struct *xe_pm_read_callback_task(struct xe_device *xe)
+{
+       smp_mb(); /* pairs with xe_pm_write_callback_task */
+
+       return READ_ONCE(xe->pm_callback_task);
+}
+
 int xe_pm_runtime_suspend(struct xe_device *xe)
 {
        struct xe_gt *gt;
        u8 id;
-       int err;
+       int err = 0;
 
-       if (xe->d3cold.allowed) {
-               if (xe_device_mem_access_ongoing(xe))
-                       return -EBUSY;
+       if (xe->d3cold.allowed && xe_device_mem_access_ongoing(xe))
+               return -EBUSY;
+
+       /* Disable access_ongoing asserts and prevent recursive pm calls */
+       xe_pm_write_callback_task(xe, current);
 
+       if (xe->d3cold.allowed) {
                err = xe_bo_evict_all(xe);
                if (err)
-                       return err;
+                       goto out;
        }
 
        for_each_gt(gt, xe, id) {
                err = xe_gt_suspend(gt);
                if (err)
-                       return err;
+                       goto out;
        }
 
        xe_irq_suspend(xe);
-
-       return 0;
+out:
+       xe_pm_write_callback_task(xe, NULL);
+       return err;
 }
 
 int xe_pm_runtime_resume(struct xe_device *xe)
 {
        struct xe_gt *gt;
        u8 id;
-       int err;
+       int err = 0;
+
+       /* Disable access_ongoing asserts and prevent recursive pm calls */
+       xe_pm_write_callback_task(xe, current);
 
        /*
         * It can be possible that xe has allowed d3cold but other pcie devices
                for_each_gt(gt, xe, id) {
                        err = xe_pcode_init(gt);
                        if (err)
-                               return err;
+                               goto out;
                }
 
                /*
                 */
                err = xe_bo_restore_kernel(xe);
                if (err)
-                       return err;
+                       goto out;
        }
 
        xe_irq_resume(xe);
        if (xe->d3cold.allowed && xe->d3cold.power_lost) {
                err = xe_bo_restore_user(xe);
                if (err)
-                       return err;
+                       goto out;
        }
-
-       return 0;
+out:
+       xe_pm_write_callback_task(xe, NULL);
+       return err;
 }
 
 int xe_pm_runtime_get(struct xe_device *xe)
        return pm_runtime_put_autosuspend(xe->drm.dev);
 }
 
-/* Return true if resume operation happened and usage count was increased */
-bool xe_pm_runtime_resume_if_suspended(struct xe_device *xe)
-{
-       /* In case we are suspended we need to immediately wake up */
-       if (pm_runtime_suspended(xe->drm.dev))
-               return !pm_runtime_resume_and_get(xe->drm.dev);
-
-       return false;
-}
-
 int xe_pm_runtime_get_if_active(struct xe_device *xe)
 {
-       WARN_ON(pm_runtime_suspended(xe->drm.dev));
        return pm_runtime_get_if_active(xe->drm.dev, true);
 }
 
 
 int xe_pm_runtime_resume(struct xe_device *xe);
 int xe_pm_runtime_get(struct xe_device *xe);
 int xe_pm_runtime_put(struct xe_device *xe);
-bool xe_pm_runtime_resume_if_suspended(struct xe_device *xe);
 int xe_pm_runtime_get_if_active(struct xe_device *xe);
 void xe_pm_assert_unbounded_bridge(struct xe_device *xe);
 int xe_pm_set_vram_threshold(struct xe_device *xe, u32 threshold);
 void xe_pm_d3cold_allowed_toggle(struct xe_device *xe);
+struct task_struct *xe_pm_read_callback_task(struct xe_device *xe);
 
 #endif