drm/xe/buddy: remove the virtualized start
authorMatthew Auld <matthew.auld@intel.com>
Tue, 14 Mar 2023 08:58:40 +0000 (08:58 +0000)
committerRodrigo Vivi <rodrigo.vivi@intel.com>
Tue, 19 Dec 2023 23:30:20 +0000 (18:30 -0500)
Hopefully not needed anymore. We can add a .compatible() hook once we
need to differentiate between mappable and non-mappable vram. If the
allocation is not contiguous then the start value is kind of
meaningless, so rather just mark as invalid.

In upstream, TTM wants to eventually remove the ttm_resource.start
usage.

References: 544432703b2f ("drm/ttm: Add new callbacks to ttm res mgr")
Signed-off-by: Matthew Auld <matthew.auld@intel.com>
Cc: Lucas De Marchi <lucas.demarchi@intel.com>
Reviewed-by: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
Reviewed-by: Gwan-gyeong Mun <gwan-gyeong.mun@intel.com>
Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
drivers/gpu/drm/xe/xe_bo.c
drivers/gpu/drm/xe/xe_ttm_vram_mgr.c

index 5e309b26f75c5a2dd5277dd9637605c20de03c4f..3ca28f84dff754464271ab2ff790bfb221995040 100644 (file)
@@ -672,6 +672,12 @@ static int xe_bo_move(struct ttm_buffer_object *ttm_bo, bool evict,
                                void *new_addr = gt->mem.vram.mapping +
                                        (new_mem->start << PAGE_SHIFT);
 
+                               if (XE_WARN_ON(new_mem->start == XE_BO_INVALID_OFFSET)) {
+                                       ret = -EINVAL;
+                                       xe_device_mem_access_put(xe);
+                                       goto out;
+                               }
+
                                XE_BUG_ON(new_mem->start !=
                                          bo->placements->fpfn);
 
index 159ca7105df1052a6aa75869f05aa6e4ff588d23..bafcadaed6b07ce473eeaf05c6f8019224ee83e5 100644 (file)
@@ -54,7 +54,6 @@ static int xe_ttm_vram_mgr_new(struct ttm_resource_manager *man,
        struct xe_ttm_vram_mgr_resource *vres;
        u64 size, remaining_size, lpfn, fpfn;
        struct drm_buddy *mm = &mgr->mm;
-       struct drm_buddy_block *block;
        unsigned long pages_per_block;
        int r;
 
@@ -186,24 +185,24 @@ static int xe_ttm_vram_mgr_new(struct ttm_resource_manager *man,
                        list_splice_tail(trim_list, &vres->blocks);
        }
 
-       vres->base.start = 0;
-       list_for_each_entry(block, &vres->blocks, link) {
-               unsigned long start;
+       if (!(vres->base.placement & TTM_PL_FLAG_CONTIGUOUS) &&
+           xe_is_vram_mgr_blocks_contiguous(mm, &vres->blocks))
+               vres->base.placement |= TTM_PL_FLAG_CONTIGUOUS;
 
-               start = drm_buddy_block_offset(block) +
-                       drm_buddy_block_size(mm, block);
-               start >>= PAGE_SHIFT;
+       /*
+        * For some kernel objects we still rely on the start when io mapping
+        * the object.
+        */
+       if (vres->base.placement & TTM_PL_FLAG_CONTIGUOUS) {
+               struct drm_buddy_block *block = list_first_entry(&vres->blocks,
+                                                                typeof(*block),
+                                                                link);
 
-               if (start > PFN_UP(vres->base.size))
-                       start -= PFN_UP(vres->base.size);
-               else
-                       start = 0;
-               vres->base.start = max(vres->base.start, start);
+               vres->base.start = drm_buddy_block_offset(block) >> PAGE_SHIFT;
+       } else {
+               vres->base.start = XE_BO_INVALID_OFFSET;
        }
 
-       if (xe_is_vram_mgr_blocks_contiguous(mm, &vres->blocks))
-               vres->base.placement |= TTM_PL_FLAG_CONTIGUOUS;
-
        *res = &vres->base;
        return 0;