drm/xe: Make TLB invalidation fences unordered
authorThomas Hellström <thomas.hellstrom@linux.intel.com>
Wed, 27 Mar 2024 09:11:35 +0000 (10:11 +0100)
committerThomas Hellström <thomas.hellstrom@linux.intel.com>
Thu, 28 Mar 2024 07:39:29 +0000 (08:39 +0100)
They can actually complete out-of-order, so allocate a unique
fence context for each fence.

Fixes: 5387e865d90e ("drm/xe: Add TLB invalidation fence after rebinds issued from execs")
Cc: Matthew Brost <matthew.brost@intel.com>
Cc: <stable@vger.kernel.org> # v6.8+
Signed-off-by: Thomas Hellström <thomas.hellstrom@linux.intel.com>
Reviewed-by: Matthew Brost <matthew.brost@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20240327091136.3271-4-thomas.hellstrom@linux.intel.com
drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c
drivers/gpu/drm/xe/xe_gt_types.h
drivers/gpu/drm/xe/xe_pt.c

index 25b4111097bcd19e28a5b0154efb0a1e82f0dd5f..93df2d7969b33a9802d104c107f6d06f22b27ea3 100644 (file)
@@ -63,7 +63,6 @@ int xe_gt_tlb_invalidation_init(struct xe_gt *gt)
        INIT_LIST_HEAD(&gt->tlb_invalidation.pending_fences);
        spin_lock_init(&gt->tlb_invalidation.pending_lock);
        spin_lock_init(&gt->tlb_invalidation.lock);
-       gt->tlb_invalidation.fence_context = dma_fence_context_alloc(1);
        INIT_DELAYED_WORK(&gt->tlb_invalidation.fence_tdr,
                          xe_gt_tlb_fence_timeout);
 
index f6da2ad9719fb67f3644ca17ad140a3631e25b6d..2143dffcaf115c0c33bd37e95ad45570b3ada13e 100644 (file)
@@ -179,13 +179,6 @@ struct xe_gt {
                 * xe_gt_tlb_fence_timeout after the timeut interval is over.
                 */
                struct delayed_work fence_tdr;
-               /** @tlb_invalidation.fence_context: context for TLB invalidation fences */
-               u64 fence_context;
-               /**
-                * @tlb_invalidation.fence_seqno: seqno to TLB invalidation fences, protected by
-                * tlb_invalidation.lock
-                */
-               u32 fence_seqno;
                /** @tlb_invalidation.lock: protects TLB invalidation fences */
                spinlock_t lock;
        } tlb_invalidation;
index 632c1919471d45c32766f08698265322a05d023f..d1b999dbc906daf9a858e27aea0e356662143f35 100644 (file)
@@ -1135,8 +1135,7 @@ static int invalidation_fence_init(struct xe_gt *gt,
        spin_lock_irq(&gt->tlb_invalidation.lock);
        dma_fence_init(&ifence->base.base, &invalidation_fence_ops,
                       &gt->tlb_invalidation.lock,
-                      gt->tlb_invalidation.fence_context,
-                      ++gt->tlb_invalidation.fence_seqno);
+                      dma_fence_context_alloc(1), 1);
        spin_unlock_irq(&gt->tlb_invalidation.lock);
 
        INIT_LIST_HEAD(&ifence->base.link);