drm/xe: Break of TLB invalidation into its own file
authorMatthew Brost <matthew.brost@intel.com>
Wed, 18 Jan 2023 04:31:24 +0000 (20:31 -0800)
committerRodrigo Vivi <rodrigo.vivi@intel.com>
Tue, 19 Dec 2023 23:27:45 +0000 (18:27 -0500)
TLB invalidation is used by more than USM (page faults) so break this
code out into its own file.

Signed-off-by: Matthew Brost <matthew.brost@intel.com>
Reviewed-by: Niranjana Vishwanathapura <niranjana.vishwanathapura@intel.com>
Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
drivers/gpu/drm/xe/Makefile
drivers/gpu/drm/xe/xe_gt.c
drivers/gpu/drm/xe/xe_gt_debugfs.c
drivers/gpu/drm/xe/xe_gt_pagefault.c
drivers/gpu/drm/xe/xe_gt_pagefault.h
drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c [new file with mode: 0644]
drivers/gpu/drm/xe/xe_gt_tlb_invalidation.h [new file with mode: 0644]
drivers/gpu/drm/xe/xe_guc_ct.c
drivers/gpu/drm/xe/xe_vm.c

index f8da32b550bcdfe46f4669b142b1049565e00fd5..998f7044b04707a7449f5a7a69acbd7300c4091c 100644 (file)
@@ -57,6 +57,7 @@ xe-y += xe_bb.o \
        xe_gt_mcr.o \
        xe_gt_pagefault.o \
        xe_gt_sysfs.o \
+       xe_gt_tlb_invalidation.o \
        xe_gt_topology.o \
        xe_guc.o \
        xe_guc_ads.o \
index 61a6430cb435a200d85b91e5d2957d4e77671839..96136f130eda77d5b24142daafb246b4ac44313a 100644 (file)
@@ -19,6 +19,7 @@
 #include "xe_gt_mcr.h"
 #include "xe_gt_pagefault.h"
 #include "xe_gt_sysfs.h"
+#include "xe_gt_tlb_invalidation.h"
 #include "xe_gt_topology.h"
 #include "xe_hw_fence.h"
 #include "xe_irq.h"
@@ -571,6 +572,10 @@ int xe_gt_init(struct xe_gt *gt)
                xe_hw_fence_irq_init(&gt->fence_irq[i]);
        }
 
+       err = xe_gt_tlb_invalidation_init(gt);
+       if (err)
+               return err;
+
        err = xe_gt_pagefault_init(gt);
        if (err)
                return err;
index cd188878414102cfabe88cd01cc750a28a5d3344..01303bbe073c24c5cc2c5cbab7ea2dfd1d4df9ca 100644 (file)
@@ -12,6 +12,7 @@
 #include "xe_gt_debugfs.h"
 #include "xe_gt_mcr.h"
 #include "xe_gt_pagefault.h"
+#include "xe_gt_tlb_invalidation.h"
 #include "xe_gt_topology.h"
 #include "xe_hw_engine.h"
 #include "xe_macros.h"
index 7125113b7390f53d44aa005fc8841cebd4c993ba..93a8efe5d0a026b53b3cf163606c49b19fa29ba4 100644 (file)
 
 #include "xe_bo.h"
 #include "xe_gt.h"
+#include "xe_gt_pagefault.h"
+#include "xe_gt_tlb_invalidation.h"
 #include "xe_guc.h"
 #include "xe_guc_ct.h"
-#include "xe_gt_pagefault.h"
 #include "xe_migrate.h"
 #include "xe_pt.h"
 #include "xe_trace.h"
@@ -61,40 +62,6 @@ guc_to_gt(struct xe_guc *guc)
        return container_of(guc, struct xe_gt, uc.guc);
 }
 
-static int send_tlb_invalidation(struct xe_guc *guc)
-{
-       struct xe_gt *gt = guc_to_gt(guc);
-       u32 action[] = {
-               XE_GUC_ACTION_TLB_INVALIDATION,
-               0,
-               XE_GUC_TLB_INVAL_FULL << XE_GUC_TLB_INVAL_TYPE_SHIFT |
-               XE_GUC_TLB_INVAL_MODE_HEAVY << XE_GUC_TLB_INVAL_MODE_SHIFT |
-               XE_GUC_TLB_INVAL_FLUSH_CACHE,
-       };
-       int seqno;
-       int ret;
-
-       /*
-        * XXX: The seqno algorithm relies on TLB invalidation being processed
-        * in order which they currently are, if that changes the algorithm will
-        * need to be updated.
-        */
-       mutex_lock(&guc->ct.lock);
-       seqno = gt->usm.tlb_invalidation_seqno;
-       action[1] = seqno;
-       gt->usm.tlb_invalidation_seqno = (gt->usm.tlb_invalidation_seqno + 1) %
-               TLB_INVALIDATION_SEQNO_MAX;
-       if (!gt->usm.tlb_invalidation_seqno)
-               gt->usm.tlb_invalidation_seqno = 1;
-       ret = xe_guc_ct_send_locked(&guc->ct, action, ARRAY_SIZE(action),
-                                   G2H_LEN_DW_TLB_INVALIDATE, 1);
-       if (!ret)
-               ret = seqno;
-       mutex_unlock(&guc->ct.lock);
-
-       return ret;
-}
-
 static bool access_is_atomic(enum access_type access_type)
 {
        return access_type == ACCESS_TYPE_ATOMIC;
@@ -278,7 +245,7 @@ unlock_vm:
                 * defer TLB invalidate + fault response to a callback of fence
                 * too
                 */
-               ret = send_tlb_invalidation(&gt->uc.guc);
+               ret = xe_gt_tlb_invalidation(gt);
                if (ret >= 0)
                        ret = 0;
        }
@@ -433,7 +400,6 @@ int xe_gt_pagefault_init(struct xe_gt *gt)
        if (!xe->info.supports_usm)
                return 0;
 
-       gt->usm.tlb_invalidation_seqno = 1;
        for (i = 0; i < NUM_PF_QUEUE; ++i) {
                gt->usm.pf_queue[i].gt = gt;
                spin_lock_init(&gt->usm.pf_queue[i].lock);
@@ -482,65 +448,6 @@ void xe_gt_pagefault_reset(struct xe_gt *gt)
        }
 }
 
-int xe_gt_tlb_invalidation(struct xe_gt *gt)
-{
-       return send_tlb_invalidation(&gt->uc.guc);
-}
-
-static bool tlb_invalidation_seqno_past(struct xe_gt *gt, int seqno)
-{
-       if (gt->usm.tlb_invalidation_seqno_recv >= seqno)
-               return true;
-
-       if (seqno - gt->usm.tlb_invalidation_seqno_recv >
-           (TLB_INVALIDATION_SEQNO_MAX / 2))
-               return true;
-
-       return false;
-}
-
-int xe_gt_tlb_invalidation_wait(struct xe_gt *gt, int seqno)
-{
-       struct xe_device *xe = gt_to_xe(gt);
-       struct xe_guc *guc = &gt->uc.guc;
-       int ret;
-
-       /*
-        * XXX: See above, this algorithm only works if seqno are always in
-        * order
-        */
-       ret = wait_event_timeout(guc->ct.wq,
-                                tlb_invalidation_seqno_past(gt, seqno),
-                                HZ / 5);
-       if (!ret) {
-               drm_err(&xe->drm, "TLB invalidation time'd out, seqno=%d, recv=%d\n",
-                       seqno, gt->usm.tlb_invalidation_seqno_recv);
-               return -ETIME;
-       }
-
-       return 0;
-}
-
-int xe_guc_tlb_invalidation_done_handler(struct xe_guc *guc, u32 *msg, u32 len)
-{
-       struct xe_gt *gt = guc_to_gt(guc);
-       int expected_seqno;
-
-       if (unlikely(len != 1))
-               return -EPROTO;
-
-       /* Sanity check on seqno */
-       expected_seqno = (gt->usm.tlb_invalidation_seqno_recv + 1) %
-               TLB_INVALIDATION_SEQNO_MAX;
-       XE_WARN_ON(expected_seqno != msg[0]);
-
-       gt->usm.tlb_invalidation_seqno_recv = msg[0];
-       smp_wmb();
-       wake_up_all(&guc->ct.wq);
-
-       return 0;
-}
-
 static int granularity_in_byte(int val)
 {
        switch (val) {
index 35f68027cc9ce956a8c5d7d5beb4631eb9c99f28..839c065a5e4c3c092cc24a575c87f3f829a7f889 100644 (file)
@@ -13,10 +13,7 @@ struct xe_guc;
 
 int xe_gt_pagefault_init(struct xe_gt *gt);
 void xe_gt_pagefault_reset(struct xe_gt *gt);
-int xe_gt_tlb_invalidation(struct xe_gt *gt);
-int xe_gt_tlb_invalidation_wait(struct xe_gt *gt, int seqno);
 int xe_guc_pagefault_handler(struct xe_guc *guc, u32 *msg, u32 len);
-int xe_guc_tlb_invalidation_done_handler(struct xe_guc *guc, u32 *msg, u32 len);
 int xe_guc_access_counter_notify_handler(struct xe_guc *guc, u32 *msg, u32 len);
 
 #endif /* _XE_GT_PAGEFAULT_ */
diff --git a/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c b/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c
new file mode 100644 (file)
index 0000000..fea7a55
--- /dev/null
@@ -0,0 +1,115 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+
+#include "xe_gt.h"
+#include "xe_gt_tlb_invalidation.h"
+#include "xe_guc.h"
+#include "xe_guc_ct.h"
+
+static struct xe_gt *
+guc_to_gt(struct xe_guc *guc)
+{
+       return container_of(guc, struct xe_gt, uc.guc);
+}
+
+int xe_gt_tlb_invalidation_init(struct xe_gt *gt)
+{
+       gt->usm.tlb_invalidation_seqno = 1;
+
+       return 0;
+}
+
+static int send_tlb_invalidation(struct xe_guc *guc)
+{
+       struct xe_gt *gt = guc_to_gt(guc);
+       u32 action[] = {
+               XE_GUC_ACTION_TLB_INVALIDATION,
+               0,
+               XE_GUC_TLB_INVAL_FULL << XE_GUC_TLB_INVAL_TYPE_SHIFT |
+               XE_GUC_TLB_INVAL_MODE_HEAVY << XE_GUC_TLB_INVAL_MODE_SHIFT |
+               XE_GUC_TLB_INVAL_FLUSH_CACHE,
+       };
+       int seqno;
+       int ret;
+
+       /*
+        * XXX: The seqno algorithm relies on TLB invalidation being processed
+        * in order which they currently are, if that changes the algorithm will
+        * need to be updated.
+        */
+       mutex_lock(&guc->ct.lock);
+       seqno = gt->usm.tlb_invalidation_seqno;
+       action[1] = seqno;
+       gt->usm.tlb_invalidation_seqno = (gt->usm.tlb_invalidation_seqno + 1) %
+               TLB_INVALIDATION_SEQNO_MAX;
+       if (!gt->usm.tlb_invalidation_seqno)
+               gt->usm.tlb_invalidation_seqno = 1;
+       ret = xe_guc_ct_send_locked(&guc->ct, action, ARRAY_SIZE(action),
+                                   G2H_LEN_DW_TLB_INVALIDATE, 1);
+       if (!ret)
+               ret = seqno;
+       mutex_unlock(&guc->ct.lock);
+
+       return ret;
+}
+
+int xe_gt_tlb_invalidation(struct xe_gt *gt)
+{
+       return send_tlb_invalidation(&gt->uc.guc);
+}
+
+static bool tlb_invalidation_seqno_past(struct xe_gt *gt, int seqno)
+{
+       if (gt->usm.tlb_invalidation_seqno_recv >= seqno)
+               return true;
+
+       if (seqno - gt->usm.tlb_invalidation_seqno_recv >
+           (TLB_INVALIDATION_SEQNO_MAX / 2))
+               return true;
+
+       return false;
+}
+
+int xe_gt_tlb_invalidation_wait(struct xe_gt *gt, int seqno)
+{
+       struct xe_device *xe = gt_to_xe(gt);
+       struct xe_guc *guc = &gt->uc.guc;
+       int ret;
+
+       /*
+        * XXX: See above, this algorithm only works if seqno are always in
+        * order
+        */
+       ret = wait_event_timeout(guc->ct.wq,
+                                tlb_invalidation_seqno_past(gt, seqno),
+                                HZ / 5);
+       if (!ret) {
+               drm_err(&xe->drm, "TLB invalidation time'd out, seqno=%d, recv=%d\n",
+                       seqno, gt->usm.tlb_invalidation_seqno_recv);
+               return -ETIME;
+       }
+
+       return 0;
+}
+
+int xe_guc_tlb_invalidation_done_handler(struct xe_guc *guc, u32 *msg, u32 len)
+{
+       struct xe_gt *gt = guc_to_gt(guc);
+       int expected_seqno;
+
+       if (unlikely(len != 1))
+               return -EPROTO;
+
+       /* Sanity check on seqno */
+       expected_seqno = (gt->usm.tlb_invalidation_seqno_recv + 1) %
+               TLB_INVALIDATION_SEQNO_MAX;
+       XE_WARN_ON(expected_seqno != msg[0]);
+
+       gt->usm.tlb_invalidation_seqno_recv = msg[0];
+       smp_wmb();
+       wake_up_all(&guc->ct.wq);
+
+       return 0;
+}
diff --git a/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.h b/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.h
new file mode 100644 (file)
index 0000000..f1c3b34
--- /dev/null
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+
+#ifndef _XE_GT_TLB_INVALIDATION_H_
+#define _XE_GT_TLB_INVALIDATION_H_
+
+#include <linux/types.h>
+
+struct xe_gt;
+struct xe_guc;
+
+int xe_gt_tlb_invalidation_init(struct xe_gt *gt);
+int xe_gt_tlb_invalidation(struct xe_gt *gt);
+int xe_gt_tlb_invalidation_wait(struct xe_gt *gt, int seqno);
+int xe_guc_tlb_invalidation_done_handler(struct xe_guc *guc, u32 *msg, u32 len);
+
+#endif /* _XE_GT_TLB_INVALIDATION_ */
index 6e25c1d5d43eadd1f0d27759223072c242f103c1..84d4302d4e721f7beacd19f94a55dcf98d744a02 100644 (file)
@@ -15,6 +15,7 @@
 #include "xe_guc.h"
 #include "xe_guc_ct.h"
 #include "xe_gt_pagefault.h"
+#include "xe_gt_tlb_invalidation.h"
 #include "xe_guc_submit.h"
 #include "xe_map.h"
 #include "xe_trace.h"
index d47a8617c5b6771dfb2121b18059df3848a47134..c548cd04f9cf618f7903a5c2fe7f302d0b491116 100644 (file)
@@ -19,6 +19,7 @@
 #include "xe_engine.h"
 #include "xe_gt.h"
 #include "xe_gt_pagefault.h"
+#include "xe_gt_tlb_invalidation.h"
 #include "xe_migrate.h"
 #include "xe_pm.h"
 #include "xe_preempt_fence.h"