struct xe_bb *bb, u32 second_idx, const char *str,
struct kunit *test)
{
- u64 batch_base = xe_migrate_batch_base(m, xe->info.supports_usm);
+ u64 batch_base = xe_migrate_batch_base(m, xe->info.has_usm);
struct xe_sched_job *job = xe_bb_create_migration_job(m->q, bb,
batch_base,
second_idx);
goto free_pt;
}
- bb = xe_bb_new(tile->primary_gt, 32, xe->info.supports_usm);
+ bb = xe_bb_new(tile->primary_gt, 32, xe->info.has_usm);
if (IS_ERR(bb)) {
KUNIT_FAIL(test, "Failed to create batchbuffer: %li\n",
PTR_ERR(bb));
drm_printf(&p, "tile_count %d\n", xe->info.tile_count);
drm_printf(&p, "vm_max_level %d\n", xe->info.vm_max_level);
drm_printf(&p, "force_execlist %s\n", str_yes_no(xe->info.force_execlist));
- drm_printf(&p, "supports_usm %s\n", str_yes_no(xe->info.supports_usm));
drm_printf(&p, "has_flat_ccs %s\n", str_yes_no(xe->info.has_flat_ccs));
+ drm_printf(&p, "has_usm %s\n", str_yes_no(xe->info.has_usm));
for_each_gt(gt, xe, id) {
drm_printf(&p, "gt%d force wake %d\n", id,
xe_force_wake_ref(gt_to_fw(gt), XE_FW_GT));
/** @is_dgfx: is discrete device */
u8 is_dgfx:1;
- /** @supports_usm: Supports unified shared memory */
- u8 supports_usm:1;
/** @has_asid: Has address space ID */
u8 has_asid:1;
/** @force_execlist: Forced execlist submission */
u8 has_flat_ccs:1;
/** @has_llc: Device has a shared CPU+GPU last level cache */
u8 has_llc:1;
+ /** @has_mmio_ext: Device has extra MMIO address range */
+ u8 has_mmio_ext:1;
/** @has_range_tlb_invalidation: Has range based TLB invalidations */
u8 has_range_tlb_invalidation:1;
/** @has_sriov: Supports SR-IOV */
u8 has_sriov:1;
+ /** @has_usm: Device has unified shared memory support */
+ u8 has_usm:1;
/** @enable_display: display enabled */
u8 enable_display:1;
/** @skip_mtcfg: skip Multi-Tile configuration from MTCFG register */
u8 skip_mtcfg:1;
/** @skip_pcode: skip access to PCODE uC */
u8 skip_pcode:1;
- /** @supports_mmio_ext: supports MMIO extension/s */
- u8 supports_mmio_ext:1;
/** @has_heci_gscfi: device has heci gscfi */
u8 has_heci_gscfi:1;
/** @skip_guc_pc: Skip GuC based PM feature init */
if (XE_IOCTL_DBG(xe, !create))
return -EINVAL;
- if (XE_IOCTL_DBG(xe, !xe->info.supports_usm))
+ if (XE_IOCTL_DBG(xe, !xe->info.has_usm))
return -EINVAL;
q->usm.acc_trigger = value;
if (XE_IOCTL_DBG(xe, !create))
return -EINVAL;
- if (XE_IOCTL_DBG(xe, !xe->info.supports_usm))
+ if (XE_IOCTL_DBG(xe, !xe->info.has_usm))
return -EINVAL;
q->usm.acc_notify = value;
if (XE_IOCTL_DBG(xe, !create))
return -EINVAL;
- if (XE_IOCTL_DBG(xe, !xe->info.supports_usm))
+ if (XE_IOCTL_DBG(xe, !xe->info.has_usm))
return -EINVAL;
if (value > DRM_XE_ACC_GRANULARITY_64M)
/*
* USM has its only SA pool to non-block behind user operations
*/
- if (gt_to_xe(gt)->info.supports_usm) {
+ if (gt_to_xe(gt)->info.has_usm) {
gt->usm.bb_pool = xe_sa_bo_manager_init(gt_to_tile(gt), SZ_1M, 16);
if (IS_ERR(gt->usm.bb_pool)) {
err = PTR_ERR(gt->usm.bb_pool);
{
struct xe_device *xe = gt_to_xe(gt);
- return xe->info.supports_usm && hwe->class == XE_ENGINE_CLASS_COPY &&
+ return xe->info.has_usm && hwe->class == XE_ENGINE_CLASS_COPY &&
hwe->instance == gt->usm.reserved_bcs_instance;
}
struct xe_device *xe = gt_to_xe(gt);
int i;
- if (!xe->info.supports_usm)
+ if (!xe->info.has_usm)
return 0;
for (i = 0; i < NUM_PF_QUEUE; ++i) {
struct xe_device *xe = gt_to_xe(gt);
int i;
- if (!xe->info.supports_usm)
+ if (!xe->info.has_usm)
return;
for (i = 0; i < NUM_PF_QUEUE; ++i) {
{
struct xe_device *xe = ads_to_xe(ads);
- if (!xe->info.supports_usm)
+ if (!xe->info.has_usm)
return 0;
return GUC_UM_QUEUE_SIZE * GUC_UM_HW_QUEUE_MAX;
guc_capture_list_init(ads);
guc_doorbell_init(ads);
- if (xe->info.supports_usm) {
+ if (xe->info.has_usm) {
guc_um_init_params(ads);
ads_blob_write(ads, ads.um_init_data, base +
offsetof(struct __guc_ads_blob, um_init_params));
xe_hw_engine_enable_ring(hwe);
/* We reserve the highest BCS instance for USM */
- if (xe->info.supports_usm && hwe->class == XE_ENGINE_CLASS_COPY)
+ if (xe->info.has_usm && hwe->class == XE_ENGINE_CLASS_COPY)
gt->usm.reserved_bcs_instance = hwe->instance;
err = drmm_add_action_or_reset(&xe->drm, hw_engine_fini, hwe);
hwe->logical_instance >= gt->ccs_mode)
return true;
- return xe->info.supports_usm && hwe->class == XE_ENGINE_CLASS_COPY &&
+ return xe->info.has_usm && hwe->class == XE_ENGINE_CLASS_COPY &&
hwe->instance == gt->usm.reserved_bcs_instance;
}
xe_lrc_write_ctx_reg(lrc, PVC_CTX_ASID,
(q->usm.acc_granularity <<
ACC_GRANULARITY_S) | vm->usm.asid);
- if (xe->info.supports_usm && vm)
+ if (xe->info.has_usm && vm)
xe_lrc_write_ctx_reg(lrc, PVC_CTX_ACC_CTR_THOLD,
(q->usm.acc_notify << ACC_NOTIFY_S) |
q->usm.acc_trigger);
if (!IS_DGFX(xe)) {
/* Write out batch too */
m->batch_base_ofs = NUM_PT_SLOTS * XE_PAGE_SIZE;
- if (xe->info.supports_usm) {
+ if (xe->info.has_usm) {
batch = tile->primary_gt->usm.bb_pool->bo;
m->usm_batch_base_ofs = m->batch_base_ofs;
}
m->batch_base_ofs = xe_migrate_vram_ofs(xe, batch_addr);
- if (xe->info.supports_usm) {
+ if (xe->info.has_usm) {
batch = tile->primary_gt->usm.bb_pool->bo;
batch_addr = xe_bo_addr(batch, 0, XE_PAGE_SIZE);
m->usm_batch_base_ofs = xe_migrate_vram_ofs(xe, batch_addr);
return ERR_PTR(err);
}
- if (xe->info.supports_usm) {
+ if (xe->info.has_usm) {
struct xe_hw_engine *hwe = xe_gt_hw_engine(primary_gt,
XE_ENGINE_CLASS_COPY,
primary_gt->usm.reserved_bcs_instance,
xe_vm_close_and_put(vm);
return ERR_CAST(m->q);
}
- if (xe->info.supports_usm)
+ if (xe->info.has_usm)
m->q->priority = XE_EXEC_QUEUE_PRIORITY_KERNEL;
mutex_init(&m->job_mutex);
u32 update_idx;
u64 ccs_ofs, ccs_size;
u32 ccs_pt;
- bool usm = xe->info.supports_usm;
+ bool usm = xe->info.has_usm;
src_L0 = xe_migrate_res_sizes(&src_it);
dst_L0 = xe_migrate_res_sizes(&dst_it);
struct xe_sched_job *job;
struct xe_bb *bb;
u32 batch_size, update_idx;
- bool usm = xe->info.supports_usm;
+ bool usm = xe->info.has_usm;
clear_L0 = xe_migrate_res_sizes(&src_it);
drm_dbg(&xe->drm, "Pass %u, size: %llu\n", pass++, clear_L0);
u32 i, batch_size, ppgtt_ofs, update_idx, page_ofs = 0;
u64 addr;
int err = 0;
- bool usm = !q && xe->info.supports_usm;
+ bool usm = !q && xe->info.has_usm;
bool first_munmap_rebind = vma &&
vma->gpuva.flags & XE_VMA_FIRST_REBIND;
struct xe_exec_queue *q_override = !q ? m->q : q;
*/
xe_tile_assert(tile, batch_size < SZ_128K);
- bb = xe_bb_new(gt, batch_size, !q && xe->info.supports_usm);
+ bb = xe_bb_new(gt, batch_size, !q && xe->info.has_usm);
if (IS_ERR(bb))
return ERR_CAST(bb);
}
add_mmio_ext:
- /* By design, there's a contiguous multi-tile MMIO space (16MB hard coded per tile).
+ /*
+ * By design, there's a contiguous multi-tile MMIO space (16MB hard coded per tile).
* When supported, there could be an additional contiguous multi-tile MMIO extension
* space ON TOP of it, and hence the necessity for distinguished MMIO spaces.
*/
- if (xe->info.supports_mmio_ext) {
+ if (xe->info.has_mmio_ext) {
regs = xe->mmio.regs + tile_mmio_size * tile_count;
for_each_tile(tile, xe, id) {
u8 require_force_probe:1;
u8 is_dgfx:1;
+
u8 has_display:1;
u8 has_heci_gscfi:1;
-
u8 has_llc:1;
+ u8 has_mmio_ext:1;
u8 has_sriov:1;
+ u8 skip_guc_pc:1;
u8 skip_mtcfg:1;
u8 skip_pcode:1;
- u8 supports_mmio_ext:1;
- u8 skip_guc_pc:1;
};
__diag_push();
.has_asid = 1,
.has_flat_ccs = 0,
- .supports_usm = 1,
+ .has_usm = 1,
};
static const struct xe_graphics_desc graphics_xelpg = {
.has_asid = 1, \
.has_flat_ccs = 0 /* FIXME: implementation missing */, \
.has_range_tlb_invalidation = 1, \
- .supports_usm = 0 /* FIXME: implementation missing */, \
+ .has_usm = 0 /* FIXME: implementation missing */, \
.va_bits = 48, \
.vm_max_level = 4, \
.hw_engine_mask = \
DGFX_FEATURES,
PLATFORM(XE_DG1),
.has_display = true,
- .require_force_probe = true,
.has_heci_gscfi = 1,
+ .require_force_probe = true,
};
static const u16 dg2_g10_ids[] = { XE_DG2_G10_IDS(NOP), XE_ATS_M150_IDS(NOP), 0 };
DGFX_FEATURES,
PLATFORM(XE_PVC),
.has_display = false,
- .require_force_probe = true,
.has_heci_gscfi = 1,
+ .require_force_probe = true,
};
static const struct xe_device_desc mtl_desc = {
xe->info.is_dgfx = desc->is_dgfx;
xe->info.has_heci_gscfi = desc->has_heci_gscfi;
xe->info.has_llc = desc->has_llc;
+ xe->info.has_mmio_ext = desc->has_mmio_ext;
xe->info.has_sriov = desc->has_sriov;
+ xe->info.skip_guc_pc = desc->skip_guc_pc;
xe->info.skip_mtcfg = desc->skip_mtcfg;
xe->info.skip_pcode = desc->skip_pcode;
- xe->info.supports_mmio_ext = desc->supports_mmio_ext;
- xe->info.skip_guc_pc = desc->skip_guc_pc;
xe->info.enable_display = IS_ENABLED(CONFIG_DRM_XE_DISPLAY) &&
xe_modparam.enable_display &&
xe->info.vram_flags = graphics_desc->vram_flags;
xe->info.va_bits = graphics_desc->va_bits;
xe->info.vm_max_level = graphics_desc->vm_max_level;
- xe->info.supports_usm = graphics_desc->supports_usm;
xe->info.has_asid = graphics_desc->has_asid;
xe->info.has_flat_ccs = graphics_desc->has_flat_ccs;
xe->info.has_range_tlb_invalidation = graphics_desc->has_range_tlb_invalidation;
+ xe->info.has_usm = graphics_desc->has_usm;
/*
* All platforms have at least one primary GT. Any platform with media
u8 has_asid:1;
u8 has_flat_ccs:1;
u8 has_range_tlb_invalidation:1;
- u8 supports_usm:1;
+ u8 has_usm:1;
};
struct xe_media_desc {
args->flags |= DRM_XE_VM_CREATE_FLAG_SCRATCH_PAGE;
if (XE_IOCTL_DBG(xe, args->flags & DRM_XE_VM_CREATE_FLAG_FAULT_MODE &&
- !xe->info.supports_usm))
+ !xe->info.has_usm))
return -EINVAL;
if (XE_IOCTL_DBG(xe, args->reserved[0] || args->reserved[1]))