drm/xe: Let primary and media GT share a kernel_bb_pool
authorMatt Roper <matthew.d.roper@intel.com>
Mon, 10 Apr 2023 20:02:29 +0000 (13:02 -0700)
committerRodrigo Vivi <rodrigo.vivi@intel.com>
Tue, 19 Dec 2023 23:31:40 +0000 (18:31 -0500)
The media GT requires a valid gt->kernel_bb_pool during driver probe to
allocate the WA and NOOP batchbuffers used to record default context
images.  Dynamically allocate the bb_pools so that the primary and media
GT can use the same pool during driver init.

The media GT still shouldn't be need the USM pool, so only hook up the
kernel_bb_pool for now.

Cc: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
Reviewed-by: Matthew Brost <matthew.brost@intel.com>
Link: https://lore.kernel.org/r/20230410200229.2726648-1-matthew.d.roper@intel.com
Signed-off-by: Matt Roper <matthew.d.roper@intel.com>
Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
drivers/gpu/drm/xe/xe_bb.c
drivers/gpu/drm/xe/xe_gt.c
drivers/gpu/drm/xe/xe_gt_debugfs.c
drivers/gpu/drm/xe/xe_gt_types.h
drivers/gpu/drm/xe/xe_migrate.c
drivers/gpu/drm/xe/xe_sa.c
drivers/gpu/drm/xe/xe_sa.h

index 7172801ee57053a9fe60ec18fa0ff147f985597e..3deb2d55f42184dc47d3fd870339fc7b524b1f92 100644 (file)
@@ -42,7 +42,7 @@ struct xe_bb *xe_bb_new(struct xe_gt *gt, u32 dwords, bool usm)
         * space to accomodate the platform-specific hardware prefetch
         * requirements.
         */
-       bb->bo = xe_sa_bo_new(!usm ? &gt->kernel_bb_pool : &gt->usm.bb_pool,
+       bb->bo = xe_sa_bo_new(!usm ? gt->kernel_bb_pool : gt->usm.bb_pool,
                              4 * (dwords + 1) + bb_prefetch(gt));
        if (IS_ERR(bb->bo)) {
                err = PTR_ERR(bb->bo);
index daaf93e23bbfc4ed6e8c50714588571a8bc21094..4186f7f0d42f58201f02f0e20dc1b129ed60a168 100644 (file)
@@ -137,7 +137,7 @@ static int emit_nop_job(struct xe_gt *gt, struct xe_engine *e)
        if (IS_ERR(bb))
                return PTR_ERR(bb);
 
-       batch_ofs = xe_bo_ggtt_addr(gt->kernel_bb_pool.bo);
+       batch_ofs = xe_bo_ggtt_addr(gt->kernel_bb_pool->bo);
        job = xe_bb_create_wa_job(e, bb, batch_ofs);
        if (IS_ERR(job)) {
                xe_bb_free(bb, NULL);
@@ -186,7 +186,7 @@ static int emit_wa_job(struct xe_gt *gt, struct xe_engine *e)
                }
        }
 
-       batch_ofs = xe_bo_ggtt_addr(gt->kernel_bb_pool.bo);
+       batch_ofs = xe_bo_ggtt_addr(gt->kernel_bb_pool->bo);
        job = xe_bb_create_wa_job(e, bb, batch_ofs);
        if (IS_ERR(job)) {
                xe_bb_free(bb, NULL);
@@ -439,26 +439,32 @@ static int all_fw_domain_init(struct xe_gt *gt)
        if (err)
                goto err_force_wake;
 
-       /*
-        * FIXME: This should be ok as SA should only be used by gt->migrate and
-        * vm->gt->migrate and both should be pointing to a non-media GT. But to
-        * realy safe, convert gt->kernel_bb_pool to a pointer and point a media
-        * GT to the kernel_bb_pool on a real tile.
-        */
        if (!xe_gt_is_media_type(gt)) {
-               err = xe_sa_bo_manager_init(gt, &gt->kernel_bb_pool, SZ_1M, 16);
-               if (err)
+               gt->kernel_bb_pool = xe_sa_bo_manager_init(gt, SZ_1M, 16);
+               if (IS_ERR(gt->kernel_bb_pool)) {
+                       err = PTR_ERR(gt->kernel_bb_pool);
                        goto err_force_wake;
+               }
 
                /*
                 * USM has its only SA pool to non-block behind user operations
                 */
                if (gt_to_xe(gt)->info.supports_usm) {
-                       err = xe_sa_bo_manager_init(gt, &gt->usm.bb_pool,
-                                                   SZ_1M, 16);
-                       if (err)
+                       gt->usm.bb_pool = xe_sa_bo_manager_init(gt, SZ_1M, 16);
+                       if (IS_ERR(gt->usm.bb_pool)) {
+                               err = PTR_ERR(gt->usm.bb_pool);
                                goto err_force_wake;
+                       }
                }
+       } else {
+               struct xe_gt *full_gt = xe_find_full_gt(gt);
+
+               /*
+                * Media GT's kernel_bb_pool is only used while recording the
+                * default context during GT init.  The USM pool should never
+                * be needed on the media GT.
+                */
+               gt->kernel_bb_pool = full_gt->kernel_bb_pool;
        }
 
        if (!xe_gt_is_media_type(gt)) {
index 9fab8017490f2bee789c25ec36ecd1deba8ab008..c45486c2015a9068f75964f6cd4ba3e62735b999 100644 (file)
@@ -66,8 +66,8 @@ static int sa_info(struct seq_file *m, void *data)
        struct xe_gt *gt = node_to_gt(m->private);
        struct drm_printer p = drm_seq_file_printer(m);
 
-       drm_suballoc_dump_debug_info(&gt->kernel_bb_pool.base, &p,
-                                    gt->kernel_bb_pool.gpu_addr);
+       drm_suballoc_dump_debug_info(&gt->kernel_bb_pool->base, &p,
+                                    gt->kernel_bb_pool->gpu_addr);
 
        return 0;
 }
index 9d3117fad2e4b27282c2119184b783e7acb21543..7c47d67aa8be86e701e5ac1b65508a5dcea5780c 100644 (file)
@@ -214,7 +214,7 @@ struct xe_gt {
                 * behind any user operations which may have resulted in a
                 * fault.
                 */
-               struct xe_sa_manager bb_pool;
+               struct xe_sa_manager *bb_pool;
                /**
                 * @reserved_bcs_instance: reserved BCS instance used for USM
                 * operations (e.g. mmigrations, fixing page tables)
@@ -304,8 +304,12 @@ struct xe_gt {
        /** @hw_engines: hardware engines on the GT */
        struct xe_hw_engine hw_engines[XE_NUM_HW_ENGINES];
 
-       /** @kernel_bb_pool: Pool from which batchbuffers are allocated */
-       struct xe_sa_manager kernel_bb_pool;
+       /**
+        * @kernel_bb_pool: Pool from which batchbuffers are allocated.
+        *
+        * Media GT shares a pool with its primary GT.
+        */
+       struct xe_sa_manager *kernel_bb_pool;
 
        /** @migrate: Migration helper for vram blits and clearing */
        struct xe_migrate *migrate;
index 13cfb7ad28500a870647091ed9dd6e97850d487b..2169d687ba3fd3c690a840f8aa71cf1d76baee6d 100644 (file)
@@ -161,7 +161,7 @@ static int xe_migrate_prepare_vm(struct xe_gt *gt, struct xe_migrate *m,
        u32 num_entries = NUM_PT_SLOTS, num_level = vm->pt_root[id]->level;
        u32 map_ofs, level, i;
        struct xe_device *xe = gt_to_xe(m->gt);
-       struct xe_bo *bo, *batch = gt->kernel_bb_pool.bo;
+       struct xe_bo *bo, *batch = gt->kernel_bb_pool->bo;
        u64 entry;
        int ret;
 
@@ -229,7 +229,7 @@ static int xe_migrate_prepare_vm(struct xe_gt *gt, struct xe_migrate *m,
                m->batch_base_ofs = xe_migrate_vram_ofs(batch_addr);
 
                if (xe->info.supports_usm) {
-                       batch = gt->usm.bb_pool.bo;
+                       batch = gt->usm.bb_pool->bo;
                        batch_addr = xe_bo_addr(batch, 0, GEN8_PAGE_SIZE,
                                                &is_vram);
                        m->usm_batch_base_ofs = xe_migrate_vram_ofs(batch_addr);
index 96c4b0ef24fe8f91a3d2bbd9cff621b16d636eb3..c16f7c14ff5264ecbdeaa1cff0c4eb299577ca02 100644 (file)
@@ -33,13 +33,18 @@ static void xe_sa_bo_manager_fini(struct drm_device *drm, void *arg)
        sa_manager->bo = NULL;
 }
 
-int xe_sa_bo_manager_init(struct xe_gt *gt,
-                         struct xe_sa_manager *sa_manager,
-                         u32 size, u32 align)
+struct xe_sa_manager *xe_sa_bo_manager_init(struct xe_gt *gt, u32 size, u32 align)
 {
        struct xe_device *xe = gt_to_xe(gt);
        u32 managed_size = size - SZ_4K;
        struct xe_bo *bo;
+       int ret;
+
+       struct xe_sa_manager *sa_manager = drmm_kzalloc(&gt_to_xe(gt)->drm,
+                                                       sizeof(*sa_manager),
+                                                       GFP_KERNEL);
+       if (!sa_manager)
+               return ERR_PTR(-ENOMEM);
 
        sa_manager->bo = NULL;
 
@@ -49,7 +54,7 @@ int xe_sa_bo_manager_init(struct xe_gt *gt,
        if (IS_ERR(bo)) {
                drm_err(&xe->drm, "failed to allocate bo for sa manager: %ld\n",
                        PTR_ERR(bo));
-               return PTR_ERR(bo);
+               return (struct xe_sa_manager *)bo;
        }
        sa_manager->bo = bo;
 
@@ -61,15 +66,19 @@ int xe_sa_bo_manager_init(struct xe_gt *gt,
                if (!sa_manager->cpu_ptr) {
                        xe_bo_unpin_map_no_vm(sa_manager->bo);
                        sa_manager->bo = NULL;
-                       return -ENOMEM;
+                       return ERR_PTR(-ENOMEM);
                }
        } else {
                sa_manager->cpu_ptr = bo->vmap.vaddr;
                memset(sa_manager->cpu_ptr, 0, bo->ttm.base.size);
        }
 
-       return drmm_add_action_or_reset(&xe->drm, xe_sa_bo_manager_fini,
-                                       sa_manager);
+       ret = drmm_add_action_or_reset(&xe->drm, xe_sa_bo_manager_fini,
+                                      sa_manager);
+       if (ret)
+               return ERR_PTR(ret);
+
+       return sa_manager;
 }
 
 struct drm_suballoc *xe_sa_bo_new(struct xe_sa_manager *sa_manager,
index 742282ef71792f32c6b9c7f917b658b771b33f08..3063fb34c720cab7ed63eaaec615a8b0e596a644 100644 (file)
@@ -11,9 +11,7 @@ struct dma_fence;
 struct xe_bo;
 struct xe_gt;
 
-int xe_sa_bo_manager_init(struct xe_gt *gt,
-                         struct xe_sa_manager *sa_manager,
-                         u32 size, u32 align);
+struct xe_sa_manager *xe_sa_bo_manager_init(struct xe_gt *gt, u32 size, u32 align);
 
 struct drm_suballoc *xe_sa_bo_new(struct xe_sa_manager *sa_manager,
                                  u32 size);