xe_gt_mcr.o \
xe_gt_pagefault.o \
xe_gt_sysfs.o \
+ xe_gt_tlb_invalidation.o \
xe_gt_topology.o \
xe_guc.o \
xe_guc_ads.o \
#include "xe_gt_mcr.h"
#include "xe_gt_pagefault.h"
#include "xe_gt_sysfs.h"
+#include "xe_gt_tlb_invalidation.h"
#include "xe_gt_topology.h"
#include "xe_hw_fence.h"
#include "xe_irq.h"
xe_hw_fence_irq_init(>->fence_irq[i]);
}
+ err = xe_gt_tlb_invalidation_init(gt);
+ if (err)
+ return err;
+
err = xe_gt_pagefault_init(gt);
if (err)
return err;
#include "xe_gt_debugfs.h"
#include "xe_gt_mcr.h"
#include "xe_gt_pagefault.h"
+#include "xe_gt_tlb_invalidation.h"
#include "xe_gt_topology.h"
#include "xe_hw_engine.h"
#include "xe_macros.h"
#include "xe_bo.h"
#include "xe_gt.h"
+#include "xe_gt_pagefault.h"
+#include "xe_gt_tlb_invalidation.h"
#include "xe_guc.h"
#include "xe_guc_ct.h"
-#include "xe_gt_pagefault.h"
#include "xe_migrate.h"
#include "xe_pt.h"
#include "xe_trace.h"
return container_of(guc, struct xe_gt, uc.guc);
}
-static int send_tlb_invalidation(struct xe_guc *guc)
-{
- struct xe_gt *gt = guc_to_gt(guc);
- u32 action[] = {
- XE_GUC_ACTION_TLB_INVALIDATION,
- 0,
- XE_GUC_TLB_INVAL_FULL << XE_GUC_TLB_INVAL_TYPE_SHIFT |
- XE_GUC_TLB_INVAL_MODE_HEAVY << XE_GUC_TLB_INVAL_MODE_SHIFT |
- XE_GUC_TLB_INVAL_FLUSH_CACHE,
- };
- int seqno;
- int ret;
-
- /*
- * XXX: The seqno algorithm relies on TLB invalidation being processed
- * in order which they currently are, if that changes the algorithm will
- * need to be updated.
- */
- mutex_lock(&guc->ct.lock);
- seqno = gt->usm.tlb_invalidation_seqno;
- action[1] = seqno;
- gt->usm.tlb_invalidation_seqno = (gt->usm.tlb_invalidation_seqno + 1) %
- TLB_INVALIDATION_SEQNO_MAX;
- if (!gt->usm.tlb_invalidation_seqno)
- gt->usm.tlb_invalidation_seqno = 1;
- ret = xe_guc_ct_send_locked(&guc->ct, action, ARRAY_SIZE(action),
- G2H_LEN_DW_TLB_INVALIDATE, 1);
- if (!ret)
- ret = seqno;
- mutex_unlock(&guc->ct.lock);
-
- return ret;
-}
-
static bool access_is_atomic(enum access_type access_type)
{
return access_type == ACCESS_TYPE_ATOMIC;
* defer TLB invalidate + fault response to a callback of fence
* too
*/
- ret = send_tlb_invalidation(>->uc.guc);
+ ret = xe_gt_tlb_invalidation(gt);
if (ret >= 0)
ret = 0;
}
if (!xe->info.supports_usm)
return 0;
- gt->usm.tlb_invalidation_seqno = 1;
for (i = 0; i < NUM_PF_QUEUE; ++i) {
gt->usm.pf_queue[i].gt = gt;
spin_lock_init(>->usm.pf_queue[i].lock);
}
}
-int xe_gt_tlb_invalidation(struct xe_gt *gt)
-{
- return send_tlb_invalidation(>->uc.guc);
-}
-
-static bool tlb_invalidation_seqno_past(struct xe_gt *gt, int seqno)
-{
- if (gt->usm.tlb_invalidation_seqno_recv >= seqno)
- return true;
-
- if (seqno - gt->usm.tlb_invalidation_seqno_recv >
- (TLB_INVALIDATION_SEQNO_MAX / 2))
- return true;
-
- return false;
-}
-
-int xe_gt_tlb_invalidation_wait(struct xe_gt *gt, int seqno)
-{
- struct xe_device *xe = gt_to_xe(gt);
- struct xe_guc *guc = >->uc.guc;
- int ret;
-
- /*
- * XXX: See above, this algorithm only works if seqno are always in
- * order
- */
- ret = wait_event_timeout(guc->ct.wq,
- tlb_invalidation_seqno_past(gt, seqno),
- HZ / 5);
- if (!ret) {
- drm_err(&xe->drm, "TLB invalidation time'd out, seqno=%d, recv=%d\n",
- seqno, gt->usm.tlb_invalidation_seqno_recv);
- return -ETIME;
- }
-
- return 0;
-}
-
-int xe_guc_tlb_invalidation_done_handler(struct xe_guc *guc, u32 *msg, u32 len)
-{
- struct xe_gt *gt = guc_to_gt(guc);
- int expected_seqno;
-
- if (unlikely(len != 1))
- return -EPROTO;
-
- /* Sanity check on seqno */
- expected_seqno = (gt->usm.tlb_invalidation_seqno_recv + 1) %
- TLB_INVALIDATION_SEQNO_MAX;
- XE_WARN_ON(expected_seqno != msg[0]);
-
- gt->usm.tlb_invalidation_seqno_recv = msg[0];
- smp_wmb();
- wake_up_all(&guc->ct.wq);
-
- return 0;
-}
-
static int granularity_in_byte(int val)
{
switch (val) {
int xe_gt_pagefault_init(struct xe_gt *gt);
void xe_gt_pagefault_reset(struct xe_gt *gt);
-int xe_gt_tlb_invalidation(struct xe_gt *gt);
-int xe_gt_tlb_invalidation_wait(struct xe_gt *gt, int seqno);
int xe_guc_pagefault_handler(struct xe_guc *guc, u32 *msg, u32 len);
-int xe_guc_tlb_invalidation_done_handler(struct xe_guc *guc, u32 *msg, u32 len);
int xe_guc_access_counter_notify_handler(struct xe_guc *guc, u32 *msg, u32 len);
#endif /* _XE_GT_PAGEFAULT_ */
--- /dev/null
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+
+#include "xe_gt.h"
+#include "xe_gt_tlb_invalidation.h"
+#include "xe_guc.h"
+#include "xe_guc_ct.h"
+
+static struct xe_gt *
+guc_to_gt(struct xe_guc *guc)
+{
+ return container_of(guc, struct xe_gt, uc.guc);
+}
+
+int xe_gt_tlb_invalidation_init(struct xe_gt *gt)
+{
+ gt->usm.tlb_invalidation_seqno = 1;
+
+ return 0;
+}
+
+static int send_tlb_invalidation(struct xe_guc *guc)
+{
+ struct xe_gt *gt = guc_to_gt(guc);
+ u32 action[] = {
+ XE_GUC_ACTION_TLB_INVALIDATION,
+ 0,
+ XE_GUC_TLB_INVAL_FULL << XE_GUC_TLB_INVAL_TYPE_SHIFT |
+ XE_GUC_TLB_INVAL_MODE_HEAVY << XE_GUC_TLB_INVAL_MODE_SHIFT |
+ XE_GUC_TLB_INVAL_FLUSH_CACHE,
+ };
+ int seqno;
+ int ret;
+
+ /*
+ * XXX: The seqno algorithm relies on TLB invalidation being processed
+ * in order which they currently are, if that changes the algorithm will
+ * need to be updated.
+ */
+ mutex_lock(&guc->ct.lock);
+ seqno = gt->usm.tlb_invalidation_seqno;
+ action[1] = seqno;
+ gt->usm.tlb_invalidation_seqno = (gt->usm.tlb_invalidation_seqno + 1) %
+ TLB_INVALIDATION_SEQNO_MAX;
+ if (!gt->usm.tlb_invalidation_seqno)
+ gt->usm.tlb_invalidation_seqno = 1;
+ ret = xe_guc_ct_send_locked(&guc->ct, action, ARRAY_SIZE(action),
+ G2H_LEN_DW_TLB_INVALIDATE, 1);
+ if (!ret)
+ ret = seqno;
+ mutex_unlock(&guc->ct.lock);
+
+ return ret;
+}
+
+int xe_gt_tlb_invalidation(struct xe_gt *gt)
+{
+ return send_tlb_invalidation(>->uc.guc);
+}
+
+static bool tlb_invalidation_seqno_past(struct xe_gt *gt, int seqno)
+{
+ if (gt->usm.tlb_invalidation_seqno_recv >= seqno)
+ return true;
+
+ if (seqno - gt->usm.tlb_invalidation_seqno_recv >
+ (TLB_INVALIDATION_SEQNO_MAX / 2))
+ return true;
+
+ return false;
+}
+
+int xe_gt_tlb_invalidation_wait(struct xe_gt *gt, int seqno)
+{
+ struct xe_device *xe = gt_to_xe(gt);
+ struct xe_guc *guc = >->uc.guc;
+ int ret;
+
+ /*
+ * XXX: See above, this algorithm only works if seqno are always in
+ * order
+ */
+ ret = wait_event_timeout(guc->ct.wq,
+ tlb_invalidation_seqno_past(gt, seqno),
+ HZ / 5);
+ if (!ret) {
+ drm_err(&xe->drm, "TLB invalidation time'd out, seqno=%d, recv=%d\n",
+ seqno, gt->usm.tlb_invalidation_seqno_recv);
+ return -ETIME;
+ }
+
+ return 0;
+}
+
+int xe_guc_tlb_invalidation_done_handler(struct xe_guc *guc, u32 *msg, u32 len)
+{
+ struct xe_gt *gt = guc_to_gt(guc);
+ int expected_seqno;
+
+ if (unlikely(len != 1))
+ return -EPROTO;
+
+ /* Sanity check on seqno */
+ expected_seqno = (gt->usm.tlb_invalidation_seqno_recv + 1) %
+ TLB_INVALIDATION_SEQNO_MAX;
+ XE_WARN_ON(expected_seqno != msg[0]);
+
+ gt->usm.tlb_invalidation_seqno_recv = msg[0];
+ smp_wmb();
+ wake_up_all(&guc->ct.wq);
+
+ return 0;
+}
--- /dev/null
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+
+#ifndef _XE_GT_TLB_INVALIDATION_H_
+#define _XE_GT_TLB_INVALIDATION_H_
+
+#include <linux/types.h>
+
+struct xe_gt;
+struct xe_guc;
+
+int xe_gt_tlb_invalidation_init(struct xe_gt *gt);
+int xe_gt_tlb_invalidation(struct xe_gt *gt);
+int xe_gt_tlb_invalidation_wait(struct xe_gt *gt, int seqno);
+int xe_guc_tlb_invalidation_done_handler(struct xe_guc *guc, u32 *msg, u32 len);
+
+#endif /* _XE_GT_TLB_INVALIDATION_ */
#include "xe_guc.h"
#include "xe_guc_ct.h"
#include "xe_gt_pagefault.h"
+#include "xe_gt_tlb_invalidation.h"
#include "xe_guc_submit.h"
#include "xe_map.h"
#include "xe_trace.h"
#include "xe_engine.h"
#include "xe_gt.h"
#include "xe_gt_pagefault.h"
+#include "xe_gt_tlb_invalidation.h"
#include "xe_migrate.h"
#include "xe_pm.h"
#include "xe_preempt_fence.h"