drm/xe: Allocate GT dynamically
authorMatt Roper <matthew.d.roper@intel.com>
Thu, 1 Jun 2023 21:52:31 +0000 (14:52 -0700)
committerRodrigo Vivi <rodrigo.vivi@intel.com>
Tue, 19 Dec 2023 23:34:15 +0000 (18:34 -0500)
In preparation for re-adding media GT support, switch the primary GT
within the tile to a dynamic allocation.

Reviewed-by: Lucas De Marchi <lucas.demarchi@intel.com>
Link: https://lore.kernel.org/r/20230601215244.678611-19-matthew.d.roper@intel.com
Signed-off-by: Matt Roper <matthew.d.roper@intel.com>
Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
13 files changed:
drivers/gpu/drm/xe/tests/xe_migrate.c
drivers/gpu/drm/xe/tests/xe_rtp_test.c
drivers/gpu/drm/xe/xe_device.c
drivers/gpu/drm/xe/xe_device.h
drivers/gpu/drm/xe/xe_device_types.h
drivers/gpu/drm/xe/xe_ggtt.c
drivers/gpu/drm/xe/xe_gt.c
drivers/gpu/drm/xe/xe_gt.h
drivers/gpu/drm/xe/xe_migrate.c
drivers/gpu/drm/xe/xe_mmio.c
drivers/gpu/drm/xe/xe_pci.c
drivers/gpu/drm/xe/xe_pt.c
drivers/gpu/drm/xe/xe_vm.c

index d9f1f31c92d27b936c08413e2279c4d41dd5b736..60266fea7faa210cd9d074f2527dbd7ff1d4d6a0 100644 (file)
@@ -286,7 +286,7 @@ static void xe_migrate_sanity_test(struct xe_migrate *m, struct kunit *test)
                goto free_pt;
        }
 
-       bb = xe_bb_new(&tile->primary_gt, 32, xe->info.supports_usm);
+       bb = xe_bb_new(tile->primary_gt, 32, xe->info.supports_usm);
        if (IS_ERR(bb)) {
                KUNIT_FAIL(test, "Failed to create batchbuffer: %li\n",
                           PTR_ERR(bb));
@@ -323,7 +323,7 @@ static void xe_migrate_sanity_test(struct xe_migrate *m, struct kunit *test)
        xe_map_wr(xe, &pt->vmap, 0, u32, 0xdeaddead);
        expected = 0;
 
-       emit_clear(&tile->primary_gt, bb, xe_migrate_vm_addr(NUM_KERNEL_PDE - 1, 0), 4, 4,
+       emit_clear(tile->primary_gt, bb, xe_migrate_vm_addr(NUM_KERNEL_PDE - 1, 0), 4, 4,
                   IS_DGFX(xe));
        run_sanity_job(m, xe, bb, 1, "Writing to our newly mapped pagetable",
                       test);
index 45f2614f91ec11e451f790a3188a629644138703..b2beba0019cd7e32ef075b5212c2b1825a5e9072 100644 (file)
@@ -237,7 +237,7 @@ static void xe_rtp_process_tests(struct kunit *test)
 {
        const struct rtp_test_case *param = test->param_value;
        struct xe_device *xe = test->priv;
-       struct xe_gt *gt = &xe_device_get_root_tile(xe)->primary_gt;
+       struct xe_gt *gt = xe_device_get_root_tile(xe)->primary_gt;
        struct xe_reg_sr *reg_sr = &gt->reg_sr;
        const struct xe_reg_sr_entry *sre, *sr_entry = NULL;
        struct xe_rtp_process_ctx ctx = XE_RTP_PROCESS_CTX_INITIALIZER(gt);
index 0ff3b94bd66204f0ae4be89624b204d615ffdeae..a4fc5bc54d02d20eb50bc7a987425a9b17739ff6 100644 (file)
@@ -249,10 +249,6 @@ int xe_device_probe(struct xe_device *xe)
                err = xe_tile_alloc(tile);
                if (err)
                        return err;
-
-               err = xe_gt_alloc(xe, &tile->primary_gt);
-               if (err)
-                       return err;
        }
 
        err = xe_mmio_init(xe);
index e88f685f3f215a00eb4d524a65883646b7d70ae1..f2d8479f6ff6206aa7201ebba73fefc43ec97ee0 100644 (file)
@@ -58,7 +58,11 @@ static inline struct xe_gt *xe_device_get_gt(struct xe_device *xe, u8 gt_id)
        struct xe_gt *gt;
 
        XE_BUG_ON(gt_id > XE_MAX_TILES_PER_DEVICE);
-       gt = &xe->tiles[gt_id].primary_gt;
+
+       gt = xe->tiles[gt_id].primary_gt;
+       if (drm_WARN_ON(&xe->drm, !gt))
+               return NULL;
+
        XE_BUG_ON(gt->info.id != gt_id);
        XE_BUG_ON(gt->info.type == XE_GT_TYPE_UNINITIALIZED);
 
@@ -79,7 +83,7 @@ static inline struct xe_gt *xe_device_get_gt(struct xe_device *xe, u8 gt_id)
  */
 static inline struct xe_gt *xe_root_mmio_gt(struct xe_device *xe)
 {
-       return &xe_device_get_root_tile(xe)->primary_gt;
+       return xe_device_get_root_tile(xe)->primary_gt;
 }
 
 static inline bool xe_device_guc_submission_enabled(struct xe_device *xe)
index dfcf0a787e01ac2602ba3e9bcd10825bbdb180cb..5cb3fa9e8086af75112ab4207f3af45ce8f324bc 100644 (file)
@@ -74,7 +74,7 @@ struct xe_tile {
        /**
         * @primary_gt: Primary GT
         */
-       struct xe_gt primary_gt;
+       struct xe_gt *primary_gt;
 
        /* TODO: Add media GT here */
 
index d395d6fc1af66bcea11f56ab62f8b8cdf8aac6e9..8d3638826860b92f0d51e066d15e3e63937edaf2 100644 (file)
@@ -196,7 +196,7 @@ void xe_ggtt_invalidate(struct xe_ggtt *ggtt)
         * TODO: Loop over each GT in tile once media GT support is
         * re-added
         */
-       struct xe_gt *gt = &ggtt->tile->primary_gt;
+       struct xe_gt *gt = ggtt->tile->primary_gt;
 
        /* TODO: vfunc for GuC vs. non-GuC */
 
index aa047db4c937e9f40b65b92c928df77ee89b7a28..f00b82e90106d6a74d8856fce802e0063d955977 100644 (file)
 #include "xe_wa.h"
 #include "xe_wopcm.h"
 
-int xe_gt_alloc(struct xe_device *xe, struct xe_gt *gt)
+struct xe_gt *xe_gt_alloc(struct xe_tile *tile)
 {
-       XE_BUG_ON(gt->info.type == XE_GT_TYPE_UNINITIALIZED);
+       struct xe_gt *gt;
 
+       gt = drmm_kzalloc(&tile_to_xe(tile)->drm, sizeof(*gt), GFP_KERNEL);
+       if (!gt)
+               return ERR_PTR(-ENOMEM);
+
+       gt->tile = tile;
        gt->ordered_wq = alloc_ordered_workqueue("gt-ordered-wq", 0);
 
-       return 0;
+       return gt;
 }
 
 void xe_gt_sanitize(struct xe_gt *gt)
index 01c1d226faeb59ce90da689fa236c0709b2e4434..21d9044088def0fe449369ffe190f46cfd6b485c 100644 (file)
@@ -16,7 +16,7 @@
             for_each_if (((hwe__) = (gt__)->hw_engines + (id__)) && \
                          xe_hw_engine_is_valid((hwe__)))
 
-int xe_gt_alloc(struct xe_device *xe, struct xe_gt *gt);
+struct xe_gt *xe_gt_alloc(struct xe_tile *tile);
 int xe_gt_init_early(struct xe_gt *gt);
 int xe_gt_init(struct xe_gt *gt);
 int xe_gt_record_default_lrcs(struct xe_gt *gt);
index 794c5c68589d8c06b7e21800f3e6cfb1ddc9431e..f50484759866d3620fdef5a59b8242d7466c6096 100644 (file)
@@ -229,7 +229,7 @@ static int xe_migrate_prepare_vm(struct xe_tile *tile, struct xe_migrate *m,
                m->batch_base_ofs = xe_migrate_vram_ofs(batch_addr);
 
                if (xe->info.supports_usm) {
-                       batch = tile->primary_gt.usm.bb_pool->bo;
+                       batch = tile->primary_gt->usm.bb_pool->bo;
                        batch_addr = xe_bo_addr(batch, 0, XE_PAGE_SIZE,
                                                &is_vram);
                        m->usm_batch_base_ofs = xe_migrate_vram_ofs(batch_addr);
@@ -313,7 +313,7 @@ static int xe_migrate_prepare_vm(struct xe_tile *tile, struct xe_migrate *m,
 struct xe_migrate *xe_migrate_init(struct xe_tile *tile)
 {
        struct xe_device *xe = tile_to_xe(tile);
-       struct xe_gt *primary_gt = &tile->primary_gt;
+       struct xe_gt *primary_gt = tile->primary_gt;
        struct xe_migrate *m;
        struct xe_vm *vm;
        struct ww_acquire_ctx ww;
@@ -546,7 +546,7 @@ static u32 xe_migrate_ccs_copy(struct xe_migrate *m,
                               u64 dst_ofs, bool dst_is_vram, u32 dst_size,
                               u64 ccs_ofs, bool copy_ccs)
 {
-       struct xe_gt *gt = &m->tile->primary_gt;
+       struct xe_gt *gt = m->tile->primary_gt;
        u32 flush_flags = 0;
 
        if (xe_device_has_flat_ccs(gt_to_xe(gt)) && !copy_ccs && dst_is_vram) {
@@ -610,7 +610,7 @@ struct dma_fence *xe_migrate_copy(struct xe_migrate *m,
                                  struct ttm_resource *src,
                                  struct ttm_resource *dst)
 {
-       struct xe_gt *gt = &m->tile->primary_gt;
+       struct xe_gt *gt = m->tile->primary_gt;
        struct xe_device *xe = gt_to_xe(gt);
        struct dma_fence *fence = NULL;
        u64 size = src_bo->size;
@@ -873,7 +873,7 @@ struct dma_fence *xe_migrate_clear(struct xe_migrate *m,
                                   struct ttm_resource *dst)
 {
        bool clear_vram = mem_type_is_vram(dst->mem_type);
-       struct xe_gt *gt = &m->tile->primary_gt;
+       struct xe_gt *gt = m->tile->primary_gt;
        struct xe_device *xe = gt_to_xe(gt);
        struct dma_fence *fence = NULL;
        u64 size = bo->size;
@@ -1148,7 +1148,7 @@ xe_migrate_update_pgtables(struct xe_migrate *m,
 {
        const struct xe_migrate_pt_update_ops *ops = pt_update->ops;
        struct xe_tile *tile = m->tile;
-       struct xe_gt *gt = &tile->primary_gt;
+       struct xe_gt *gt = tile->primary_gt;
        struct xe_device *xe = tile_to_xe(tile);
        struct xe_sched_job *job;
        struct dma_fence *fence;
index 7739282d364d70b435e2ddaecc93072a5b09b7ec..475b14fe435681944fff6646a88828e8a4aab744 100644 (file)
@@ -209,7 +209,7 @@ static int xe_determine_lmem_bar_size(struct xe_device *xe)
 int xe_mmio_tile_vram_size(struct xe_tile *tile, u64 *vram_size, u64 *tile_size, u64 *tile_offset)
 {
        struct xe_device *xe = tile_to_xe(tile);
-       struct xe_gt *gt = &tile->primary_gt;
+       struct xe_gt *gt = tile->primary_gt;
        u64 offset;
        int err;
        u32 reg;
index 4fbcbfb8a93afa9df124237084d7b5597a2dc4c1..be51c9e97a79630fd5d39ef2802dbaa6322ed808 100644 (file)
@@ -18,6 +18,7 @@
 #include "regs/xe_gt_regs.h"
 #include "xe_device.h"
 #include "xe_drv.h"
+#include "xe_gt.h"
 #include "xe_macros.h"
 #include "xe_module.h"
 #include "xe_pci_types.h"
@@ -529,9 +530,12 @@ static int xe_info_init(struct xe_device *xe,
                tile->xe = xe;
                tile->id = id;
 
-               gt = &tile->primary_gt;
+               tile->primary_gt = xe_gt_alloc(tile);
+               if (IS_ERR(tile->primary_gt))
+                       return PTR_ERR(tile->primary_gt);
+
+               gt = tile->primary_gt;
                gt->info.id = id;       /* FIXME: Determine sensible numbering */
-               gt->tile = tile;
                gt->info.type = XE_GT_TYPE_MAIN;
                gt->info.__engine_mask = graphics_desc->hw_engine_mask;
                if (MEDIA_VER(xe) < 13 && media_desc)
index 41b2be028b8a4c6d17ebbd7baba675d113df7b3c..bef265715000c874f1f1cbc971fcbbe102840419 100644 (file)
@@ -1314,7 +1314,7 @@ __xe_pt_bind_vma(struct xe_tile *tile, struct xe_vma *vma, struct xe_engine *e,
 
                /* TLB invalidation must be done before signaling rebind */
                if (rebind && !xe_vm_no_dma_fences(vma->vm)) {
-                       int err = invalidation_fence_init(&tile->primary_gt, ifence, fence,
+                       int err = invalidation_fence_init(tile->primary_gt, ifence, fence,
                                                          vma);
                        if (err) {
                                dma_fence_put(fence);
@@ -1634,7 +1634,7 @@ __xe_pt_unbind_vma(struct xe_tile *tile, struct xe_vma *vma, struct xe_engine *e
                int err;
 
                /* TLB invalidation must be done before signaling unbind */
-               err = invalidation_fence_init(&tile->primary_gt, ifence, fence, vma);
+               err = invalidation_fence_init(tile->primary_gt, ifence, fence, vma);
                if (err) {
                        dma_fence_put(fence);
                        kfree(ifence);
index 7d4c7a66a35fecbd75717e1b0a4973685fa233f8..44ad4577614156fd158a3c9cac72594af4b86a6e 100644 (file)
@@ -1260,7 +1260,7 @@ struct xe_vm *xe_vm_create(struct xe_device *xe, u32 flags)
        /* Kernel migration VM shouldn't have a circular loop.. */
        if (!(flags & XE_VM_FLAG_MIGRATION)) {
                for_each_tile(tile, xe, id) {
-                       struct xe_gt *gt = &tile->primary_gt;
+                       struct xe_gt *gt = tile->primary_gt;
                        struct xe_vm *migrate_vm;
                        struct xe_engine *eng;
 
@@ -3410,7 +3410,7 @@ int xe_vm_invalidate_vma(struct xe_vma *vma)
                         * FIXME: We potentially need to invalidate multiple
                         * GTs within the tile
                         */
-                       seqno[id] = xe_gt_tlb_invalidation_vma(&tile->primary_gt, NULL, vma);
+                       seqno[id] = xe_gt_tlb_invalidation_vma(tile->primary_gt, NULL, vma);
                        if (seqno[id] < 0)
                                return seqno[id];
                }
@@ -3418,7 +3418,7 @@ int xe_vm_invalidate_vma(struct xe_vma *vma)
 
        for_each_tile(tile, xe, id) {
                if (tile_needs_invalidate & BIT(id)) {
-                       ret = xe_gt_tlb_invalidation_wait(&tile->primary_gt, seqno[id]);
+                       ret = xe_gt_tlb_invalidation_wait(tile->primary_gt, seqno[id]);
                        if (ret < 0)
                                return ret;
                }