From 1b1d3710380d5f0517dcaabe1b96b6401f68ec37 Mon Sep 17 00:00:00 2001 From: Niranjana Vishwanathapura Date: Tue, 16 May 2023 03:26:53 +0000 Subject: [PATCH] drm/xe: Apply upper limit to sg element size The iommu_dma_map_sg() function ensures iova allocation doesn't cross dma segment boundary. It does so by padding some sg elements. This can cause overflow, ending up with sg->length being set to 0. Avoid this by halving the maximum segment size (rounded down to PAGE_SIZE). Specify maximum segment size for sg elements by using sg_alloc_table_from_pages_segment() to allocate sg_table. v2: Use correct max segment size in dma_set_max_seg_size() call Signed-off-by: Niranjana Vishwanathapura Reviewed-by: Bruce Chang Signed-off-by: Rodrigo Vivi --- drivers/gpu/drm/xe/xe_bo.c | 8 +++++--- drivers/gpu/drm/xe/xe_bo.h | 24 ++++++++++++++++++++++++ drivers/gpu/drm/xe/xe_mmio.c | 7 ++----- drivers/gpu/drm/xe/xe_vm.c | 8 +++++--- 4 files changed, 36 insertions(+), 11 deletions(-) diff --git a/drivers/gpu/drm/xe/xe_bo.c b/drivers/gpu/drm/xe/xe_bo.c index 4693372ec82e9..7a5118bf4dc02 100644 --- a/drivers/gpu/drm/xe/xe_bo.c +++ b/drivers/gpu/drm/xe/xe_bo.c @@ -251,9 +251,11 @@ static int xe_tt_map_sg(struct ttm_tt *tt) if (xe_tt->sg) return 0; - ret = sg_alloc_table_from_pages(&xe_tt->sgt, tt->pages, num_pages, - 0, (u64)num_pages << PAGE_SHIFT, - GFP_KERNEL); + ret = sg_alloc_table_from_pages_segment(&xe_tt->sgt, tt->pages, + num_pages, 0, + (u64)num_pages << PAGE_SHIFT, + xe_sg_segment_size(xe_tt->dev), + GFP_KERNEL); if (ret) return ret; diff --git a/drivers/gpu/drm/xe/xe_bo.h b/drivers/gpu/drm/xe/xe_bo.h index 7e111332c35aa..2d08622f58a7d 100644 --- a/drivers/gpu/drm/xe/xe_bo.h +++ b/drivers/gpu/drm/xe/xe_bo.h @@ -296,6 +296,30 @@ void xe_bo_put_commit(struct llist_head *deferred); struct sg_table *xe_bo_get_sg(struct xe_bo *bo); +/* + * xe_sg_segment_size() - Provides upper limit for sg segment size. + * @dev: device pointer + * + * Returns the maximum segment size for the 'struct scatterlist' + * elements. + */ +static inline unsigned int xe_sg_segment_size(struct device *dev) +{ + struct scatterlist __maybe_unused sg; + size_t max = BIT_ULL(sizeof(sg.length) * 8) - 1; + + max = min_t(size_t, max, dma_max_mapping_size(dev)); + + /* + * The iommu_dma_map_sg() function ensures iova allocation doesn't + * cross dma segment boundary. It does so by padding some sg elements. + * This can cause overflow, ending up with sg->length being set to 0. + * Avoid this by ensuring maximum segment size is half of 'max' + * rounded down to PAGE_SIZE. + */ + return round_down(max / 2, PAGE_SIZE); +} + #if IS_ENABLED(CONFIG_DRM_XE_KUNIT_TEST) /** * xe_bo_is_mem_type - Whether the bo currently resides in the given diff --git a/drivers/gpu/drm/xe/xe_mmio.c b/drivers/gpu/drm/xe/xe_mmio.c index c7fbb1cc1f64a..4c270a07136ed 100644 --- a/drivers/gpu/drm/xe/xe_mmio.c +++ b/drivers/gpu/drm/xe/xe_mmio.c @@ -11,6 +11,7 @@ #include "regs/xe_engine_regs.h" #include "regs/xe_gt_regs.h" #include "regs/xe_regs.h" +#include "xe_bo.h" #include "xe_device.h" #include "xe_gt.h" #include "xe_gt_mcr.h" @@ -26,11 +27,7 @@ static int xe_set_dma_info(struct xe_device *xe) unsigned int mask_size = xe->info.dma_mask_size; int err; - /* - * We don't have a max segment size, so set it to the max so sg's - * debugging layer doesn't complain - */ - dma_set_max_seg_size(xe->drm.dev, UINT_MAX); + dma_set_max_seg_size(xe->drm.dev, xe_sg_segment_size(xe->drm.dev)); err = dma_set_mask(xe->drm.dev, DMA_BIT_MASK(mask_size)); if (err) diff --git a/drivers/gpu/drm/xe/xe_vm.c b/drivers/gpu/drm/xe/xe_vm.c index 40295beea3a2e..25a61735aac81 100644 --- a/drivers/gpu/drm/xe/xe_vm.c +++ b/drivers/gpu/drm/xe/xe_vm.c @@ -117,9 +117,11 @@ mm_closed: if (ret) goto out; - ret = sg_alloc_table_from_pages(&vma->userptr.sgt, pages, pinned, - 0, (u64)pinned << PAGE_SHIFT, - GFP_KERNEL); + ret = sg_alloc_table_from_pages_segment(&vma->userptr.sgt, pages, + pinned, 0, + (u64)pinned << PAGE_SHIFT, + xe_sg_segment_size(xe->drm.dev), + GFP_KERNEL); if (ret) { vma->userptr.sg = NULL; goto out; -- 2.30.2