From 4e03b584143e18eabd091061a1716515da928dcb Mon Sep 17 00:00:00 2001 From: Mauro Carvalho Chehab Date: Wed, 22 Nov 2023 14:38:23 +0000 Subject: [PATCH] drm/xe/uapi: Reject bo creation of unaligned size MIME-Version: 1.0 Content-Type: text/plain; charset=utf8 Content-Transfer-Encoding: 8bit For xe bo creation we request passing size which matches system or vram minimum page alignment. This way we want to ensure userspace is aware of region constraints and not aligned allocations will be rejected returning EINVAL. v2: - Rebase, Update uAPI documentation. (Thomas) v3: - Adjust the dma-buf kunit test accordingly. (Thomas) v4: - Fixed rebase conflicts and updated commit message. (Francois) Signed-off-by: Mauro Carvalho Chehab Signed-off-by: Zbigniew Kempczyński Signed-off-by: Thomas Hellström Reviewed-by: Maarten Lankhorst Signed-off-by: Francois Dugast Reviewed-by: José Roberto de Souza Signed-off-by: Rodrigo Vivi --- drivers/gpu/drm/xe/tests/xe_dma_buf.c | 10 ++++++++-- drivers/gpu/drm/xe/xe_bo.c | 26 +++++++++++++++++--------- include/uapi/drm/xe_drm.h | 17 +++++++++-------- 3 files changed, 34 insertions(+), 19 deletions(-) diff --git a/drivers/gpu/drm/xe/tests/xe_dma_buf.c b/drivers/gpu/drm/xe/tests/xe_dma_buf.c index 81f12422a5873..bb6f6424e06f6 100644 --- a/drivers/gpu/drm/xe/tests/xe_dma_buf.c +++ b/drivers/gpu/drm/xe/tests/xe_dma_buf.c @@ -109,15 +109,21 @@ static void xe_test_dmabuf_import_same_driver(struct xe_device *xe) struct drm_gem_object *import; struct dma_buf *dmabuf; struct xe_bo *bo; + size_t size; /* No VRAM on this device? */ if (!ttm_manager_type(&xe->ttm, XE_PL_VRAM0) && (params->mem_mask & XE_BO_CREATE_VRAM0_BIT)) return; + size = PAGE_SIZE; + if ((params->mem_mask & XE_BO_CREATE_VRAM0_BIT) && + xe->info.vram_flags & XE_VRAM_FLAGS_NEED64K) + size = SZ_64K; + kunit_info(test, "running %s\n", __func__); - bo = xe_bo_create_user(xe, NULL, NULL, PAGE_SIZE, DRM_XE_GEM_CPU_CACHING_WC, - ttm_bo_type_device, params->mem_mask); + bo = xe_bo_create_user(xe, NULL, NULL, size, DRM_XE_GEM_CPU_CACHING_WC, + ttm_bo_type_device, XE_BO_CREATE_USER_BIT | params->mem_mask); if (IS_ERR(bo)) { KUNIT_FAIL(test, "xe_bo_create() failed with err=%ld\n", PTR_ERR(bo)); diff --git a/drivers/gpu/drm/xe/xe_bo.c b/drivers/gpu/drm/xe/xe_bo.c index fd516ad7478c8..0bd1b3581945a 100644 --- a/drivers/gpu/drm/xe/xe_bo.c +++ b/drivers/gpu/drm/xe/xe_bo.c @@ -1222,6 +1222,7 @@ struct xe_bo *___xe_bo_create_locked(struct xe_device *xe, struct xe_bo *bo, }; struct ttm_placement *placement; uint32_t alignment; + size_t aligned_size; int err; /* Only kernel objects should set GT */ @@ -1232,23 +1233,30 @@ struct xe_bo *___xe_bo_create_locked(struct xe_device *xe, struct xe_bo *bo, return ERR_PTR(-EINVAL); } - if (!bo) { - bo = xe_bo_alloc(); - if (IS_ERR(bo)) - return bo; - } - if (flags & (XE_BO_CREATE_VRAM_MASK | XE_BO_CREATE_STOLEN_BIT) && !(flags & XE_BO_CREATE_IGNORE_MIN_PAGE_SIZE_BIT) && xe->info.vram_flags & XE_VRAM_FLAGS_NEED64K) { - size = ALIGN(size, SZ_64K); + aligned_size = ALIGN(size, SZ_64K); + if (type != ttm_bo_type_device) + size = ALIGN(size, SZ_64K); flags |= XE_BO_INTERNAL_64K; alignment = SZ_64K >> PAGE_SHIFT; + } else { - size = ALIGN(size, PAGE_SIZE); + aligned_size = ALIGN(size, SZ_4K); + flags &= ~XE_BO_INTERNAL_64K; alignment = SZ_4K >> PAGE_SHIFT; } + if (type == ttm_bo_type_device && aligned_size != size) + return ERR_PTR(-EINVAL); + + if (!bo) { + bo = xe_bo_alloc(); + if (IS_ERR(bo)) + return bo; + } + bo->tile = tile; bo->size = size; bo->flags = flags; @@ -1566,7 +1574,7 @@ struct xe_bo *xe_managed_bo_create_pin_map(struct xe_device *xe, struct xe_tile struct xe_bo *xe_managed_bo_create_from_data(struct xe_device *xe, struct xe_tile *tile, const void *data, size_t size, u32 flags) { - struct xe_bo *bo = xe_managed_bo_create_pin_map(xe, tile, size, flags); + struct xe_bo *bo = xe_managed_bo_create_pin_map(xe, tile, ALIGN(size, PAGE_SIZE), flags); if (IS_ERR(bo)) return bo; diff --git a/include/uapi/drm/xe_drm.h b/include/uapi/drm/xe_drm.h index dc657ae9db182..d7918f6e760fa 100644 --- a/include/uapi/drm/xe_drm.h +++ b/include/uapi/drm/xe_drm.h @@ -207,11 +207,13 @@ struct drm_xe_query_mem_region { * * When the kernel allocates memory for this region, the * underlying pages will be at least @min_page_size in size. - * - * Important note: When userspace allocates a GTT address which - * can point to memory allocated from this region, it must also - * respect this minimum alignment. This is enforced by the - * kernel. + * Buffer objects with an allowable placement in this region must be + * created with a size aligned to this value. + * GPU virtual address mappings of (parts of) buffer objects that + * may be placed in this region must also have their GPU virtual + * address and range aligned to this value. + * Affected IOCTLS will return %-EINVAL if alignment restrictions are + * not met. */ __u32 min_page_size; /** @@ -517,9 +519,8 @@ struct drm_xe_gem_create { __u64 extensions; /** - * @size: Requested size for the object - * - * The (page-aligned) allocated size for the object will be returned. + * @size: Size of the object to be created, must match region + * (system or vram) minimum alignment (&min_page_size). */ __u64 size; -- 2.30.2