drm/i915: Extract active lookup engine to a helper
authorTvrtko Ursulin <tvrtko.ursulin@intel.com>
Wed, 24 Mar 2021 12:13:29 +0000 (12:13 +0000)
committerDaniel Vetter <daniel.vetter@ffwll.ch>
Thu, 25 Mar 2021 23:48:08 +0000 (00:48 +0100)
Move active engine lookup to exported i915_request_active_engine.

Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Reviewed-by: Matthew Auld <matthew.auld@intel.com>
[danvet: Slight rebase, engine->sched.lock is still called
engine->active.lock.]
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Link: https://patchwork.freedesktop.org/patch/msgid/20210324121335.2307063-2-tvrtko.ursulin@linux.intel.com
drivers/gpu/drm/i915/gem/i915_gem_context.c
drivers/gpu/drm/i915/i915_request.c
drivers/gpu/drm/i915/i915_request.h

index 4d2f40cf237bd455fa90f6d28375e8deb30e8b64..9b031d88046f6d1971535942dfc9264863d205d7 100644 (file)
@@ -386,38 +386,6 @@ static bool __cancel_engine(struct intel_engine_cs *engine)
        return intel_engine_pulse(engine) == 0;
 }
 
-static bool
-__active_engine(struct i915_request *rq, struct intel_engine_cs **active)
-{
-       struct intel_engine_cs *engine, *locked;
-       bool ret = false;
-
-       /*
-        * Serialise with __i915_request_submit() so that it sees
-        * is-banned?, or we know the request is already inflight.
-        *
-        * Note that rq->engine is unstable, and so we double
-        * check that we have acquired the lock on the final engine.
-        */
-       locked = READ_ONCE(rq->engine);
-       spin_lock_irq(&locked->active.lock);
-       while (unlikely(locked != (engine = READ_ONCE(rq->engine)))) {
-               spin_unlock(&locked->active.lock);
-               locked = engine;
-               spin_lock(&locked->active.lock);
-       }
-
-       if (i915_request_is_active(rq)) {
-               if (!__i915_request_is_complete(rq))
-                       *active = locked;
-               ret = true;
-       }
-
-       spin_unlock_irq(&locked->active.lock);
-
-       return ret;
-}
-
 static struct intel_engine_cs *active_engine(struct intel_context *ce)
 {
        struct intel_engine_cs *engine = NULL;
@@ -445,7 +413,7 @@ static struct intel_engine_cs *active_engine(struct intel_context *ce)
                /* Check with the backend if the request is inflight */
                found = true;
                if (likely(rcu_access_pointer(rq->timeline) == ce->timeline))
-                       found = __active_engine(rq, &engine);
+                       found = i915_request_active_engine(rq, &engine);
 
                i915_request_put(rq);
                if (found)
index 021535f2a718245f1cb8a934e51a53c4ae079a72..d23186016fc6e769909378f872dcff43bdee36e9 100644 (file)
@@ -244,6 +244,50 @@ static void __i915_request_fill(struct i915_request *rq, u8 val)
        memset(vaddr + head, val, rq->postfix - head);
 }
 
+/**
+ * i915_request_active_engine
+ * @rq: request to inspect
+ * @active: pointer in which to return the active engine
+ *
+ * Fills the currently active engine to the @active pointer if the request
+ * is active and still not completed.
+ *
+ * Returns true if request was active or false otherwise.
+ */
+bool
+i915_request_active_engine(struct i915_request *rq,
+                          struct intel_engine_cs **active)
+{
+       struct intel_engine_cs *engine, *locked;
+       bool ret = false;
+
+       /*
+        * Serialise with __i915_request_submit() so that it sees
+        * is-banned?, or we know the request is already inflight.
+        *
+        * Note that rq->engine is unstable, and so we double
+        * check that we have acquired the lock on the final engine.
+        */
+       locked = READ_ONCE(rq->engine);
+       spin_lock_irq(&locked->active.lock);
+       while (unlikely(locked != (engine = READ_ONCE(rq->engine)))) {
+               spin_unlock(&locked->active.lock);
+               locked = engine;
+               spin_lock(&locked->active.lock);
+       }
+
+       if (i915_request_is_active(rq)) {
+               if (!__i915_request_is_complete(rq))
+                       *active = locked;
+               ret = true;
+       }
+
+       spin_unlock_irq(&locked->active.lock);
+
+       return ret;
+}
+
+
 static void remove_from_engine(struct i915_request *rq)
 {
        struct intel_engine_cs *engine, *locked;
index ce773c0336421a06497915e8459c740b4f3b7e10..cf4bd07f749e72a178b8c09dc20818a44d30405d 100644 (file)
@@ -627,4 +627,8 @@ i915_request_active_seqno(const struct i915_request *rq)
        return hwsp_phys_base + hwsp_relative_offset;
 }
 
+bool
+i915_request_active_engine(struct i915_request *rq,
+                          struct intel_engine_cs **active);
+
 #endif /* I915_REQUEST_H */