#define INTR_ENGINE_CLASS(x) REG_FIELD_GET(GENMASK(18, 16), x)
#define INTR_ENGINE_INTR(x) REG_FIELD_GET(GENMASK(15, 0), x)
#define OTHER_GUC_INSTANCE 0
+#define OTHER_GSC_HECI2_INSTANCE 3
#define OTHER_GSC_INSTANCE 6
#define IIR_REG_SELECTOR(x) XE_REG(0x190070 + ((x) * 4))
#define VCS0_VCS1_INTR_MASK XE_REG(0x1900a8)
#define VCS2_VCS3_INTR_MASK XE_REG(0x1900ac)
#define VECS0_VECS1_INTR_MASK XE_REG(0x1900d0)
+#define HECI2_RSVD_INTR_MASK XE_REG(0x1900e4)
#define GUC_SG_INTR_MASK XE_REG(0x1900e8)
#define GPM_WGBOXPERF_INTR_MASK XE_REG(0x1900ec)
#define GUNIT_GSC_INTR_MASK XE_REG(0x1900f4)
struct xe_gsc *gsc = container_of(work, typeof(*gsc), work);
struct xe_gt *gt = gsc_to_gt(gsc);
struct xe_device *xe = gt_to_xe(gt);
+ u32 actions;
int ret;
+ spin_lock_irq(&gsc->lock);
+ actions = gsc->work_actions;
+ gsc->work_actions = 0;
+ spin_unlock_irq(&gsc->lock);
+
xe_device_mem_access_get(xe);
xe_force_wake_get(gt_to_fw(gt), XE_FW_GSC);
- ret = gsc_upload_and_init(gsc);
- if (ret && ret != -EEXIST)
- xe_uc_fw_change_status(&gsc->fw, XE_UC_FIRMWARE_LOAD_FAIL);
- else
- xe_uc_fw_change_status(&gsc->fw, XE_UC_FIRMWARE_RUNNING);
+ if (actions & GSC_ACTION_FW_LOAD) {
+ ret = gsc_upload_and_init(gsc);
+ if (ret && ret != -EEXIST)
+ xe_uc_fw_change_status(&gsc->fw, XE_UC_FIRMWARE_LOAD_FAIL);
+ else
+ xe_uc_fw_change_status(&gsc->fw, XE_UC_FIRMWARE_RUNNING);
+ }
+
+ if (actions & GSC_ACTION_SW_PROXY)
+ xe_gsc_proxy_request_handler(gsc);
xe_force_wake_put(gt_to_fw(gt), XE_FW_GSC);
xe_device_mem_access_put(xe);
gsc->fw.type = XE_UC_FW_TYPE_GSC;
INIT_WORK(&gsc->work, gsc_work);
+ spin_lock_init(&gsc->lock);
/* The GSC uC is only available on the media GT */
if (tile->media_gt && (gt != tile->media_gt)) {
return;
}
+ spin_lock_irq(&gsc->lock);
+ gsc->work_actions |= GSC_ACTION_FW_LOAD;
+ spin_unlock_irq(&gsc->lock);
+
queue_work(gsc->wq, &gsc->work);
}
#include "xe_gt_printk.h"
#include "xe_map.h"
#include "xe_mmio.h"
+#include "xe_pm.h"
/*
* GSC proxy:
HECI1_FWSTS1_PROXY_STATE_NORMAL;
}
+static void __gsc_proxy_irq_rmw(struct xe_gsc *gsc, u32 clr, u32 set)
+{
+ struct xe_gt *gt = gsc_to_gt(gsc);
+
+ /* make sure we never accidentally write the RST bit */
+ clr |= HECI_H_CSR_RST;
+
+ xe_mmio_rmw32(gt, HECI_H_CSR(MTL_GSC_HECI2_BASE), clr, set);
+}
+
+static void gsc_proxy_irq_clear(struct xe_gsc *gsc)
+{
+ /* The status bit is cleared by writing to it */
+ __gsc_proxy_irq_rmw(gsc, 0, HECI_H_CSR_IS);
+}
+
+static void gsc_proxy_irq_toggle(struct xe_gsc *gsc, bool enabled)
+{
+ u32 set = enabled ? HECI_H_CSR_IE : 0;
+ u32 clr = enabled ? 0 : HECI_H_CSR_IE;
+
+ __gsc_proxy_irq_rmw(gsc, clr, set);
+}
+
static int proxy_send_to_csme(struct xe_gsc *gsc, u32 size)
{
struct xe_gt *gt = gsc_to_gt(gsc);
return ret < 0 ? ret : 0;
}
-static int gsc_proxy_request_handler(struct xe_gsc *gsc)
+int xe_gsc_proxy_request_handler(struct xe_gsc *gsc)
{
struct xe_gt *gt = gsc_to_gt(gsc);
int slept;
xe_gt_err(gt, "GSC proxy component not bound!\n");
err = -EIO;
} else {
+ /*
+ * clear the pending interrupt and allow new proxy requests to
+ * be generated while we handle the current one
+ */
+ gsc_proxy_irq_clear(gsc);
err = proxy_query(gsc);
}
mutex_unlock(&gsc->proxy.mutex);
return err;
}
+void xe_gsc_proxy_irq_handler(struct xe_gsc *gsc, u32 iir)
+{
+ struct xe_gt *gt = gsc_to_gt(gsc);
+
+ if (unlikely(!iir))
+ return;
+
+ if (!gsc->proxy.component) {
+ xe_gt_err(gt, "GSC proxy irq received without the component being bound!\n");
+ return;
+ }
+
+ spin_lock(&gsc->lock);
+ gsc->work_actions |= GSC_ACTION_SW_PROXY;
+ spin_unlock(&gsc->lock);
+
+ queue_work(gsc->wq, &gsc->work);
+}
+
static int xe_gsc_proxy_component_bind(struct device *xe_kdev,
struct device *mei_kdev, void *data)
{
{
struct xe_gt *gt = gsc_to_gt(gsc);
struct xe_device *xe = gt_to_xe(gt);
+ int err = 0;
- if (gsc->proxy.component_added) {
- component_del(xe->drm.dev, &xe_gsc_proxy_component_ops);
- gsc->proxy.component_added = false;
- }
+ if (!gsc->proxy.component_added)
+ return;
+
+ /* disable HECI2 IRQs */
+ xe_pm_runtime_get(xe);
+ err = xe_force_wake_get(gt_to_fw(gt), XE_FW_GSC);
+ if (err)
+ xe_gt_err(gt, "failed to get forcewake to disable GSC interrupts\n");
+
+ /* try do disable irq even if forcewake failed */
+ gsc_proxy_irq_toggle(gsc, false);
+
+ if (!err)
+ xe_force_wake_put(gt_to_fw(gt), XE_FW_GSC);
+ xe_pm_runtime_put(xe);
+
+ xe_gsc_wait_for_worker_completion(gsc);
+
+ component_del(xe->drm.dev, &xe_gsc_proxy_component_ops);
+ gsc->proxy.component_added = false;
}
/**
{
int err;
+ /* enable the proxy interrupt in the GSC shim layer */
+ gsc_proxy_irq_toggle(gsc, true);
+
/*
* The handling of the first proxy request must be manually triggered to
* notify the GSC that we're ready to support the proxy flow.
*/
- err = gsc_proxy_request_handler(gsc);
+ err = xe_gsc_proxy_request_handler(gsc);
if (err)
return err;
void xe_gsc_proxy_remove(struct xe_gsc *gsc);
int xe_gsc_proxy_start(struct xe_gsc *gsc);
+int xe_gsc_proxy_request_handler(struct xe_gsc *gsc);
+void xe_gsc_proxy_irq_handler(struct xe_gsc *gsc, u32 iir);
+
#endif
#include <linux/iosys-map.h>
#include <linux/mutex.h>
+#include <linux/spinlock.h>
#include <linux/types.h>
#include <linux/workqueue.h>
/** @work: delayed load and proxy handling work */
struct work_struct work;
+ /** @lock: protects access to the work_actions mask */
+ spinlock_t lock;
+
+ /** @work_actions: mask of actions to be performed in the work */
+ u32 work_actions;
+#define GSC_ACTION_FW_LOAD BIT(0)
+#define GSC_ACTION_SW_PROXY BIT(1)
+
/** @proxy: sub-structure containing the SW proxy-related variables */
struct {
/** @component: struct for communication with mei component */
#include "xe_device.h"
#include "xe_display.h"
#include "xe_drv.h"
+#include "xe_gsc_proxy.h"
#include "xe_gt.h"
#include "xe_guc.h"
#include "xe_hw_engine.h"
u32 ccs_mask, bcs_mask;
u32 irqs, dmask, smask;
u32 gsc_mask = 0;
+ u32 heci_mask = 0;
if (xe_device_uc_enabled(xe)) {
irqs = GT_RENDER_USER_INTERRUPT |
xe_mmio_write32(gt, VCS2_VCS3_INTR_MASK, ~dmask);
xe_mmio_write32(gt, VECS0_VECS1_INTR_MASK, ~dmask);
- if (xe_hw_engine_mask_per_class(gt, XE_ENGINE_CLASS_OTHER))
+ /*
+ * the heci2 interrupt is enabled via the same register as the
+ * GSCCS interrupts, but it has its own mask register.
+ */
+ if (xe_hw_engine_mask_per_class(gt, XE_ENGINE_CLASS_OTHER)) {
gsc_mask = irqs;
- else if (HAS_HECI_GSCFI(xe))
+ heci_mask = GSC_IRQ_INTF(1);
+ } else if (HAS_HECI_GSCFI(xe)) {
gsc_mask = GSC_IRQ_INTF(1);
+ }
+
if (gsc_mask) {
- xe_mmio_write32(gt, GUNIT_GSC_INTR_ENABLE, gsc_mask);
+ xe_mmio_write32(gt, GUNIT_GSC_INTR_ENABLE, gsc_mask | heci_mask);
xe_mmio_write32(gt, GUNIT_GSC_INTR_MASK, ~gsc_mask);
}
+ if (heci_mask)
+ xe_mmio_write32(gt, HECI2_RSVD_INTR_MASK, ~(heci_mask << 16));
}
}
return xe_guc_irq_handler(>->uc.guc, iir);
if (instance == OTHER_MEDIA_GUC_INSTANCE && xe_gt_is_media_type(gt))
return xe_guc_irq_handler(>->uc.guc, iir);
+ if (instance == OTHER_GSC_HECI2_INSTANCE && xe_gt_is_media_type(gt))
+ return xe_gsc_proxy_irq_handler(>->uc.gsc, iir);
if (instance != OTHER_GUC_INSTANCE &&
instance != OTHER_MEDIA_GUC_INSTANCE) {
if (MEDIA_VER(xe) < 13)
return tile->primary_gt;
- if (class == XE_ENGINE_CLASS_VIDEO_DECODE ||
- class == XE_ENGINE_CLASS_VIDEO_ENHANCE)
+ switch (class) {
+ case XE_ENGINE_CLASS_VIDEO_DECODE:
+ case XE_ENGINE_CLASS_VIDEO_ENHANCE:
return tile->media_gt;
-
- if (class == XE_ENGINE_CLASS_OTHER &&
- (instance == OTHER_MEDIA_GUC_INSTANCE || instance == OTHER_GSC_INSTANCE))
- return tile->media_gt;
-
- return tile->primary_gt;
+ case XE_ENGINE_CLASS_OTHER:
+ switch (instance) {
+ case OTHER_MEDIA_GUC_INSTANCE:
+ case OTHER_GSC_INSTANCE:
+ case OTHER_GSC_HECI2_INSTANCE:
+ return tile->media_gt;
+ default:
+ break;
+ };
+ fallthrough;
+ default:
+ return tile->primary_gt;
+ }
}
static void gt_irq_handler(struct xe_tile *tile,
HAS_HECI_GSCFI(tile_to_xe(tile))) {
xe_mmio_write32(mmio, GUNIT_GSC_INTR_ENABLE, 0);
xe_mmio_write32(mmio, GUNIT_GSC_INTR_MASK, ~0);
+ xe_mmio_write32(mmio, HECI2_RSVD_INTR_MASK, ~0);
}
xe_mmio_write32(mmio, GPM_WGBOXPERF_INTR_ENABLE, 0);