#define XE_EXECLIST_HANG_LIMIT 1
-#define GEN11_SW_CTX_ID_SHIFT 37
-#define GEN11_SW_CTX_ID_WIDTH 11
+#define SW_CTX_ID_SHIFT 37
+#define SW_CTX_ID_WIDTH 11
#define XEHP_SW_CTX_ID_SHIFT 39
#define XEHP_SW_CTX_ID_WIDTH 16
-#define GEN11_SW_CTX_ID \
- GENMASK_ULL(GEN11_SW_CTX_ID_WIDTH + GEN11_SW_CTX_ID_SHIFT - 1, \
- GEN11_SW_CTX_ID_SHIFT)
+#define SW_CTX_ID \
+ GENMASK_ULL(SW_CTX_ID_WIDTH + SW_CTX_ID_SHIFT - 1, \
+ SW_CTX_ID_SHIFT)
#define XEHP_SW_CTX_ID \
GENMASK_ULL(XEHP_SW_CTX_ID_WIDTH + XEHP_SW_CTX_ID_SHIFT - 1, \
xe_gt_assert(hwe->gt, FIELD_FIT(XEHP_SW_CTX_ID, ctx_id));
lrc_desc |= FIELD_PREP(XEHP_SW_CTX_ID, ctx_id);
} else {
- xe_gt_assert(hwe->gt, FIELD_FIT(GEN11_SW_CTX_ID, ctx_id));
- lrc_desc |= FIELD_PREP(GEN11_SW_CTX_ID, ctx_id);
+ xe_gt_assert(hwe->gt, FIELD_FIT(SW_CTX_ID, ctx_id));
+ lrc_desc |= FIELD_PREP(SW_CTX_ID, ctx_id);
}
if (hwe->class == XE_ENGINE_CLASS_COMPUTE)
struct xe_execlist_exec_queue *exl)
{
struct xe_device *xe = gt_to_xe(port->hwe->gt);
- int max_ctx = FIELD_MAX(GEN11_SW_CTX_ID);
+ int max_ctx = FIELD_MAX(SW_CTX_ID);
if (GRAPHICS_VERx100(xe) >= 1250)
max_ctx = FIELD_MAX(XEHP_SW_CTX_ID);
xe_mmio_write32(gt, GDRST, GRDOM_FULL);
err = xe_mmio_wait32(gt, GDRST, GRDOM_FULL, 0, 5000, NULL, false);
if (err)
- xe_gt_err(gt, "failed to clear GEN11_GRDOM_FULL (%pe)\n",
+ xe_gt_err(gt, "failed to clear GRDOM_FULL (%pe)\n",
ERR_PTR(err));
return err;
ret = xe_mmio_wait32(gt, GDRST, GRDOM_GUC, 0, 5000, &gdrst, false);
if (ret) {
- drm_err(&xe->drm, "GuC reset timed out, GEN6_GDRST=0x%8x\n",
+ drm_err(&xe->drm, "GuC reset timed out, GDRST=0x%8x\n",
gdrst);
goto err_out;
}
#define MCHBAR_MIRROR_BASE_SNB 0x140000
-#define GEN6_RP_STATE_CAP XE_REG(MCHBAR_MIRROR_BASE_SNB + 0x5998)
+#define RP_STATE_CAP XE_REG(MCHBAR_MIRROR_BASE_SNB + 0x5998)
#define RP0_MASK REG_GENMASK(7, 0)
#define RP1_MASK REG_GENMASK(15, 8)
#define RPN_MASK REG_GENMASK(23, 16)
-#define GEN10_FREQ_INFO_REC XE_REG(MCHBAR_MIRROR_BASE_SNB + 0x5ef0)
+#define FREQ_INFO_REC XE_REG(MCHBAR_MIRROR_BASE_SNB + 0x5ef0)
#define RPE_MASK REG_GENMASK(15, 8)
#define GT_PERF_STATUS XE_REG(0x1381b4)
-#define GEN12_CAGF_MASK REG_GENMASK(19, 11)
+#define CAGF_MASK REG_GENMASK(19, 11)
#define GT_FREQUENCY_MULTIPLIER 50
-#define GEN9_FREQ_SCALER 3
+#define GT_FREQUENCY_SCALER 3
/**
* DOC: GuC Power Conservation (PC)
static u32 decode_freq(u32 raw)
{
return DIV_ROUND_CLOSEST(raw * GT_FREQUENCY_MULTIPLIER,
- GEN9_FREQ_SCALER);
+ GT_FREQUENCY_SCALER);
}
static u32 encode_freq(u32 freq)
{
- return DIV_ROUND_CLOSEST(freq * GEN9_FREQ_SCALER,
+ return DIV_ROUND_CLOSEST(freq * GT_FREQUENCY_SCALER,
GT_FREQUENCY_MULTIPLIER);
}
if (xe->info.platform == XE_PVC)
reg = xe_mmio_read32(gt, PVC_RP_STATE_CAP);
else
- reg = xe_mmio_read32(gt, GEN10_FREQ_INFO_REC);
+ reg = xe_mmio_read32(gt, FREQ_INFO_REC);
pc->rpe_freq = REG_FIELD_GET(RPE_MASK, reg) * GT_FREQUENCY_MULTIPLIER;
}
freq = REG_FIELD_GET(MTL_CAGF_MASK, freq);
} else {
freq = xe_mmio_read32(gt, GT_PERF_STATUS);
- freq = REG_FIELD_GET(GEN12_CAGF_MASK, freq);
+ freq = REG_FIELD_GET(CAGF_MASK, freq);
}
ret = sysfs_emit(buf, "%d\n", decode_freq(freq));
if (xe->info.platform == XE_PVC)
reg = xe_mmio_read32(gt, PVC_RP_STATE_CAP);
else
- reg = xe_mmio_read32(gt, GEN6_RP_STATE_CAP);
+ reg = xe_mmio_read32(gt, RP_STATE_CAP);
pc->rp0_freq = REG_FIELD_GET(RP0_MASK, reg) * GT_FREQUENCY_MULTIPLIER;
pc->rpn_freq = REG_FIELD_GET(RPN_MASK, reg) * GT_FREQUENCY_MULTIPLIER;
}
#include "xe_map.h"
#include "xe_vm.h"
-#define GEN8_CTX_VALID (1 << 0)
-#define GEN8_CTX_L3LLC_COHERENT (1 << 5)
-#define GEN8_CTX_PRIVILEGE (1 << 8)
-#define GEN8_CTX_ADDRESSING_MODE_SHIFT 3
-#define INTEL_LEGACY_64B_CONTEXT 3
+#define CTX_VALID (1 << 0)
+#define CTX_PRIVILEGE (1 << 8)
+#define CTX_ADDRESSING_MODE_SHIFT 3
+#define LEGACY_64B_CONTEXT 3
-#define GEN11_ENGINE_CLASS_SHIFT 61
-#define GEN11_ENGINE_INSTANCE_SHIFT 48
+#define ENGINE_CLASS_SHIFT 61
+#define ENGINE_INSTANCE_SHIFT 48
static struct xe_device *
lrc_to_xe(struct xe_lrc *lrc)
(q->usm.acc_notify << ACC_NOTIFY_S) |
q->usm.acc_trigger);
- lrc->desc = GEN8_CTX_VALID;
- lrc->desc |= INTEL_LEGACY_64B_CONTEXT << GEN8_CTX_ADDRESSING_MODE_SHIFT;
+ lrc->desc = CTX_VALID;
+ lrc->desc |= LEGACY_64B_CONTEXT << CTX_ADDRESSING_MODE_SHIFT;
/* TODO: Priority */
/* While this appears to have something about privileged batches or
* some such, it really just means PPGTT mode.
*/
if (vm)
- lrc->desc |= GEN8_CTX_PRIVILEGE;
+ lrc->desc |= CTX_PRIVILEGE;
if (GRAPHICS_VERx100(xe) < 1250) {
- lrc->desc |= (u64)hwe->instance << GEN11_ENGINE_INSTANCE_SHIFT;
- lrc->desc |= (u64)hwe->class << GEN11_ENGINE_CLASS_SHIFT;
+ lrc->desc |= (u64)hwe->instance << ENGINE_INSTANCE_SHIFT;
+ lrc->desc |= (u64)hwe->class << ENGINE_CLASS_SHIFT;
}
arb_enable = MI_ARB_ON_OFF | MI_ARB_ENABLE;
int i;
/* gather some relevant info */
- current_size = pci_resource_len(pdev, GEN12_LMEM_BAR);
- bar_size_mask = pci_rebar_get_possible_sizes(pdev, GEN12_LMEM_BAR);
+ current_size = pci_resource_len(pdev, LMEM_BAR);
+ bar_size_mask = pci_rebar_get_possible_sizes(pdev, LMEM_BAR);
if (!bar_size_mask)
return;
pci_read_config_dword(pdev, PCI_COMMAND, &pci_cmd);
pci_write_config_dword(pdev, PCI_COMMAND, pci_cmd & ~PCI_COMMAND_MEMORY);
- _resize_bar(xe, GEN12_LMEM_BAR, rebar_size);
+ _resize_bar(xe, LMEM_BAR, rebar_size);
pci_assign_unassigned_bus_resources(pdev->bus);
pci_write_config_dword(pdev, PCI_COMMAND, pci_cmd);
{
struct pci_dev *pdev = to_pci_dev(xe->drm.dev);
- if (!xe_pci_resource_valid(pdev, GEN12_LMEM_BAR)) {
+ if (!xe_pci_resource_valid(pdev, LMEM_BAR)) {
drm_err(&xe->drm, "pci resource is not valid\n");
return -ENXIO;
}
xe_resize_vram_bar(xe);
- xe->mem.vram.io_start = pci_resource_start(pdev, GEN12_LMEM_BAR);
- xe->mem.vram.io_size = pci_resource_len(pdev, GEN12_LMEM_BAR);
+ xe->mem.vram.io_start = pci_resource_start(pdev, LMEM_BAR);
+ xe->mem.vram.io_size = pci_resource_len(pdev, LMEM_BAR);
if (!xe->mem.vram.io_size)
return -EIO;
/* actual size */
if (unlikely(xe->info.platform == XE_DG1)) {
- *tile_size = pci_resource_len(to_pci_dev(xe->drm.dev), GEN12_LMEM_BAR);
+ *tile_size = pci_resource_len(to_pci_dev(xe->drm.dev), LMEM_BAR);
*tile_offset = 0;
} else {
reg = xe_gt_mcr_unicast_read_any(gt, XEHP_TILE_ADDR_RANGE(gt->info.id));
struct drm_file;
struct xe_device;
-#define GEN12_LMEM_BAR 2
+#define LMEM_BAR 2
int xe_mmio_init(struct xe_device *xe);
#define L4_CACHE_POLICY_MASK REG_GENMASK(3, 2)
/* Helper defines */
-#define GEN9_NUM_MOCS_ENTRIES 64 /* 63-64 are reserved, but configured. */
+#define XELP_NUM_MOCS_ENTRIES 64 /* 63-64 are reserved, but configured. */
#define PVC_NUM_MOCS_ENTRIES 3
#define MTL_NUM_MOCS_ENTRIES 16
#define XE2_NUM_MOCS_ENTRIES 16
info->table = dg2_mocs_desc;
}
info->uc_index = 1;
- info->n_entries = GEN9_NUM_MOCS_ENTRIES;
+ info->n_entries = XELP_NUM_MOCS_ENTRIES;
info->unused_entries_index = 3;
break;
case XE_DG1:
info->size = ARRAY_SIZE(dg1_mocs_desc);
info->table = dg1_mocs_desc;
info->uc_index = 1;
- info->n_entries = GEN9_NUM_MOCS_ENTRIES;
+ info->n_entries = XELP_NUM_MOCS_ENTRIES;
info->unused_entries_index = 5;
break;
case XE_TIGERLAKE:
case XE_ALDERLAKE_N:
info->size = ARRAY_SIZE(gen12_mocs_desc);
info->table = gen12_mocs_desc;
- info->n_entries = GEN9_NUM_MOCS_ENTRIES;
+ info->n_entries = XELP_NUM_MOCS_ENTRIES;
info->uc_index = 3;
info->unused_entries_index = 2;
break;
pte |= xe_walk->default_pte;
/*
- * Set the GEN12_PTE_PS64 hint if possible, otherwise if
+ * Set the XE_PTE_PS64 hint if possible, otherwise if
* this device *requires* 64K PTE size for VRAM, fail.
*/
if (level == 0 && !xe_parent->is_compact) {
stolen_size = tile_size - mgr->stolen_base;
/* Verify usage fits in the actual resource available */
- if (mgr->stolen_base + stolen_size <= pci_resource_len(pdev, GEN12_LMEM_BAR))
+ if (mgr->stolen_base + stolen_size <= pci_resource_len(pdev, LMEM_BAR))
mgr->io_base = tile->mem.vram.io_start + mgr->stolen_base;
/*
#define DGFX_WOPCM_SIZE SZ_4M
/* FIXME: Larger size require for MTL, do a proper probe sooner or later */
#define MTL_WOPCM_SIZE SZ_4M
-#define GEN11_WOPCM_SIZE SZ_2M
+#define WOPCM_SIZE SZ_2M
+
+#define MAX_WOPCM_SIZE SZ_8M
-#define GEN12_MAX_WOPCM_SIZE SZ_8M
/* 16KB WOPCM (RSVD WOPCM) is reserved from HuC firmware top. */
#define WOPCM_RESERVED_SIZE SZ_16K
/* GuC WOPCM Offset value needs to be aligned to 16KB. */
#define GUC_WOPCM_OFFSET_ALIGNMENT (1UL << GUC_WOPCM_OFFSET_SHIFT)
-/* 36KB WOPCM reserved at the end of WOPCM on GEN11. */
-#define GEN11_WOPCM_HW_CTX_RESERVED (SZ_32K + SZ_4K)
+/* 36KB WOPCM reserved at the end of WOPCM */
+#define WOPCM_HW_CTX_RESERVED (SZ_32K + SZ_4K)
static inline struct xe_gt *wopcm_to_gt(struct xe_wopcm *wopcm)
{
static u32 context_reserved_size(void)
{
- return GEN11_WOPCM_HW_CTX_RESERVED;
+ return WOPCM_HW_CTX_RESERVED;
}
static bool __check_layout(struct xe_device *xe, u32 wopcm_size,
{
return IS_DGFX(xe) ? DGFX_WOPCM_SIZE :
xe->info.platform == XE_METEORLAKE ? MTL_WOPCM_SIZE :
- GEN11_WOPCM_SIZE;
+ WOPCM_SIZE;
}
/**
* BIOS/IFWI, check against the max allowed wopcm size to
* validate if the programmed values align to the wopcm layout.
*/
- wopcm->size = GEN12_MAX_WOPCM_SIZE;
+ wopcm->size = MAX_WOPCM_SIZE;
goto check;
}