struct xe_force_wake_domain *domain)
{
return xe_mmio_wait32(gt, domain->reg_ack, domain->val, domain->val,
- XE_FORCE_WAKE_ACK_TIMEOUT_MS);
+ XE_FORCE_WAKE_ACK_TIMEOUT_MS, NULL);
}
static void domain_sleep(struct xe_gt *gt, struct xe_force_wake_domain *domain)
struct xe_force_wake_domain *domain)
{
return xe_mmio_wait32(gt, domain->reg_ack, 0, domain->val,
- XE_FORCE_WAKE_ACK_TIMEOUT_MS);
+ XE_FORCE_WAKE_ACK_TIMEOUT_MS, NULL);
}
#define for_each_fw_domain_masked(domain__, mask__, fw__, tmp__) \
int err;
xe_mmio_write32(gt, GEN6_GDRST.reg, GEN11_GRDOM_FULL);
- err = xe_mmio_wait32(gt, GEN6_GDRST.reg, 0, GEN11_GRDOM_FULL, 5);
+ err = xe_mmio_wait32(gt, GEN6_GDRST.reg, 0, GEN11_GRDOM_FULL, 5, NULL);
if (err)
drm_err(&xe->drm,
"GT reset failed to clear GEN11_GRDOM_FULL\n");
{
struct xe_device *xe = guc_to_xe(guc);
struct xe_gt *gt = guc_to_gt(guc);
- u32 guc_status;
+ u32 guc_status, gdrst;
int ret;
xe_force_wake_assert_held(gt_to_fw(gt), XE_FW_GT);
xe_mmio_write32(gt, GEN6_GDRST.reg, GEN11_GRDOM_GUC);
- ret = xe_mmio_wait32(gt, GEN6_GDRST.reg, 0, GEN11_GRDOM_GUC, 5);
+ ret = xe_mmio_wait32(gt, GEN6_GDRST.reg, 0, GEN11_GRDOM_GUC, 5, &gdrst);
if (ret) {
drm_err(&xe->drm, "GuC reset timed out, GEN6_GDRST=0x%8x\n",
- xe_mmio_read32(gt, GEN6_GDRST.reg));
+ gdrst);
goto err_out;
}
{
struct xe_device *xe = guc_to_xe(guc);
struct xe_gt *gt = guc_to_gt(guc);
- u32 header;
+ u32 header, reply;
u32 reply_reg = xe_gt_is_media_type(gt) ?
MEDIA_SOFT_SCRATCH(0).reg : GEN11_SOFT_SCRATCH(0).reg;
int ret;
ret = xe_mmio_wait32(gt, reply_reg,
FIELD_PREP(GUC_HXG_MSG_0_ORIGIN,
GUC_HXG_ORIGIN_GUC),
- GUC_HXG_MSG_0_ORIGIN,
- 50);
+ GUC_HXG_MSG_0_ORIGIN, 50, &reply);
if (ret) {
timeout:
drm_err(&xe->drm, "mmio request 0x%08x: no reply 0x%08x\n",
- request[0], xe_mmio_read32(gt, reply_reg));
+ request[0], reply);
return ret;
}
ret = xe_mmio_wait32(gt, GEN11_HUC_KERNEL_LOAD_INFO.reg,
HUC_LOAD_SUCCESSFUL,
- HUC_LOAD_SUCCESSFUL, 100);
+ HUC_LOAD_SUCCESSFUL, 100, NULL);
if (ret) {
drm_err(&xe->drm, "HuC: Firmware not verified %d\n", ret);
goto fail;
return (reg_val & mask) != eval ? -EINVAL : 0;
}
-static inline int xe_mmio_wait32(struct xe_gt *gt,
- u32 reg, u32 val,
- u32 mask, u32 timeout_ms)
+static inline int xe_mmio_wait32(struct xe_gt *gt, u32 reg, u32 val,
+ u32 mask, u32 timeout_ms, u32 *out_val)
{
ktime_t cur = ktime_get_raw();
const ktime_t end = ktime_add_ms(cur, timeout_ms);
+ int ret = -ETIMEDOUT;
s64 wait = 10;
+ u32 read;
for (;;) {
if ((xe_mmio_read32(gt, reg) & mask) == val)
return 0;
+ read = xe_mmio_read32(gt, reg);
+ if ((read & mask) == val) {
+ ret = 0;
+ break;
+ }
+
cur = ktime_get_raw();
if (!ktime_before(cur, end))
- return -ETIMEDOUT;
+ break;
if (ktime_after(ktime_add_us(cur, wait), end))
wait = ktime_us_delta(end, cur);
wait <<= 1;
}
- return -ETIMEDOUT;
+ if (out_val)
+ *out_val = read;
+
+ return ret;
}
int xe_mmio_ioctl(struct drm_device *dev, void *data,
{
struct xe_device *xe = uc_fw_to_xe(uc_fw);
struct xe_gt *gt = uc_fw_to_gt(uc_fw);
- u32 src_offset;
+ u32 src_offset, dma_ctrl;
int ret;
xe_force_wake_assert_held(gt_to_fw(gt), XE_FW_GT);
_MASKED_BIT_ENABLE(dma_flags | START_DMA));
/* Wait for DMA to finish */
- ret = xe_mmio_wait32(gt, DMA_CTRL.reg, 0, START_DMA, 100);
+ ret = xe_mmio_wait32(gt, DMA_CTRL.reg, 0, START_DMA, 100, &dma_ctrl);
if (ret)
drm_err(&xe->drm, "DMA for %s fw failed, DMA_CTRL=%u\n",
- xe_uc_fw_type_repr(uc_fw->type),
- xe_mmio_read32(gt, DMA_CTRL.reg));
+ xe_uc_fw_type_repr(uc_fw->type), dma_ctrl);
/* Disable the bits once DMA is over */
xe_mmio_write32(gt, DMA_CTRL.reg, _MASKED_BIT_DISABLE(dma_flags));