drm/xe/guc: Use GuC ID Manager in submission code
authorMichal Wajdeczko <michal.wajdeczko@intel.com>
Wed, 13 Mar 2024 22:11:12 +0000 (23:11 +0100)
committerMichal Wajdeczko <michal.wajdeczko@intel.com>
Wed, 27 Mar 2024 19:19:29 +0000 (20:19 +0100)
We are ready to replace private guc_ids management code with
separate GuC ID Manager that can be shared with upcoming SR-IOV
PF provisioning code.

Cc: Matthew Brost <matthew.brost@intel.com>
Signed-off-by: Michal Wajdeczko <michal.wajdeczko@intel.com>
Reviewed-by: Matthew Brost <matthew.brost@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20240313221112.1089-5-michal.wajdeczko@intel.com
drivers/gpu/drm/xe/xe_guc_submit.c
drivers/gpu/drm/xe/xe_guc_types.h

index 03c1676243408395a449b1e4fd4c5a40c03ce319..13b7e195c7b57a5df8d6163738c6f43c78aa2227 100644 (file)
@@ -27,6 +27,7 @@
 #include "xe_guc.h"
 #include "xe_guc_ct.h"
 #include "xe_guc_exec_queue_types.h"
+#include "xe_guc_id_mgr.h"
 #include "xe_guc_submit_types.h"
 #include "xe_hw_engine.h"
 #include "xe_hw_fence.h"
@@ -236,16 +237,10 @@ static void guc_submit_fini(struct drm_device *drm, void *arg)
        struct xe_guc *guc = arg;
 
        xa_destroy(&guc->submission_state.exec_queue_lookup);
-       ida_destroy(&guc->submission_state.guc_ids);
-       bitmap_free(guc->submission_state.guc_ids_bitmap);
        free_submit_wq(guc);
        mutex_destroy(&guc->submission_state.lock);
 }
 
-#define GUC_ID_NUMBER_MLRC     4096
-#define GUC_ID_NUMBER_SLRC     (GUC_ID_MAX - GUC_ID_NUMBER_MLRC)
-#define GUC_ID_START_MLRC      GUC_ID_NUMBER_SLRC
-
 static const struct xe_exec_queue_ops guc_exec_queue_ops;
 
 static void primelockdep(struct xe_guc *guc)
@@ -268,22 +263,14 @@ int xe_guc_submit_init(struct xe_guc *guc)
        struct xe_gt *gt = guc_to_gt(guc);
        int err;
 
-       guc->submission_state.guc_ids_bitmap =
-               bitmap_zalloc(GUC_ID_NUMBER_MLRC, GFP_KERNEL);
-       if (!guc->submission_state.guc_ids_bitmap)
-               return -ENOMEM;
-
        err = alloc_submit_wq(guc);
-       if (err) {
-               bitmap_free(guc->submission_state.guc_ids_bitmap);
+       if (err)
                return err;
-       }
 
        gt->exec_queue_ops = &guc_exec_queue_ops;
 
        mutex_init(&guc->submission_state.lock);
        xa_init(&guc->submission_state.exec_queue_lookup);
-       ida_init(&guc->submission_state.guc_ids);
 
        spin_lock_init(&guc->submission_state.suspend.lock);
        guc->submission_state.suspend.context = dma_fence_context_alloc(1);
@@ -294,6 +281,10 @@ int xe_guc_submit_init(struct xe_guc *guc)
        if (err)
                return err;
 
+       err = xe_guc_id_mgr_init(&guc->submission_state.idm, ~0);
+       if (err)
+               return err;
+
        return 0;
 }
 
@@ -306,12 +297,8 @@ static void __release_guc_id(struct xe_guc *guc, struct xe_exec_queue *q, u32 xa
        for (i = 0; i < xa_count; ++i)
                xa_erase(&guc->submission_state.exec_queue_lookup, q->guc->id + i);
 
-       if (xe_exec_queue_is_parallel(q))
-               bitmap_release_region(guc->submission_state.guc_ids_bitmap,
-                                     q->guc->id - GUC_ID_START_MLRC,
-                                     order_base_2(q->width));
-       else
-               ida_free(&guc->submission_state.guc_ids, q->guc->id);
+       xe_guc_id_mgr_release_locked(&guc->submission_state.idm,
+                                    q->guc->id, q->width);
 }
 
 static int alloc_guc_id(struct xe_guc *guc, struct xe_exec_queue *q)
@@ -329,21 +316,12 @@ static int alloc_guc_id(struct xe_guc *guc, struct xe_exec_queue *q)
         */
        lockdep_assert_held(&guc->submission_state.lock);
 
-       if (xe_exec_queue_is_parallel(q)) {
-               void *bitmap = guc->submission_state.guc_ids_bitmap;
-
-               ret = bitmap_find_free_region(bitmap, GUC_ID_NUMBER_MLRC,
-                                             order_base_2(q->width));
-       } else {
-               ret = ida_alloc_max(&guc->submission_state.guc_ids,
-                                   GUC_ID_NUMBER_SLRC - 1, GFP_NOWAIT);
-       }
+       ret = xe_guc_id_mgr_reserve_locked(&guc->submission_state.idm,
+                                          q->width);
        if (ret < 0)
                return ret;
 
        q->guc->id = ret;
-       if (xe_exec_queue_is_parallel(q))
-               q->guc->id += GUC_ID_START_MLRC;
 
        for (i = 0; i < q->width; ++i) {
                ptr = xa_store(&guc->submission_state.exec_queue_lookup,
index 69be1fb830474899aca7e8a50a815c816773fc97..82bd93f7867d1344ff477571e9192bba921207bb 100644 (file)
@@ -68,10 +68,6 @@ struct xe_guc {
                struct xe_guc_id_mgr idm;
                /** @submission_state.exec_queue_lookup: Lookup an xe_engine from guc_id */
                struct xarray exec_queue_lookup;
-               /** @submission_state.guc_ids: used to allocate new guc_ids, single-lrc */
-               struct ida guc_ids;
-               /** @submission_state.guc_ids_bitmap: used to allocate new guc_ids, multi-lrc */
-               unsigned long *guc_ids_bitmap;
                /** @submission_state.stopped: submissions are stopped */
                atomic_t stopped;
                /** @submission_state.lock: protects submission state */