* space to accomodate the platform-specific hardware prefetch
* requirements.
*/
- bb->bo = xe_sa_bo_new(!usm ? >->kernel_bb_pool : >->usm.bb_pool,
+ bb->bo = xe_sa_bo_new(!usm ? gt->kernel_bb_pool : gt->usm.bb_pool,
4 * (dwords + 1) + bb_prefetch(gt));
if (IS_ERR(bb->bo)) {
err = PTR_ERR(bb->bo);
if (IS_ERR(bb))
return PTR_ERR(bb);
- batch_ofs = xe_bo_ggtt_addr(gt->kernel_bb_pool.bo);
+ batch_ofs = xe_bo_ggtt_addr(gt->kernel_bb_pool->bo);
job = xe_bb_create_wa_job(e, bb, batch_ofs);
if (IS_ERR(job)) {
xe_bb_free(bb, NULL);
}
}
- batch_ofs = xe_bo_ggtt_addr(gt->kernel_bb_pool.bo);
+ batch_ofs = xe_bo_ggtt_addr(gt->kernel_bb_pool->bo);
job = xe_bb_create_wa_job(e, bb, batch_ofs);
if (IS_ERR(job)) {
xe_bb_free(bb, NULL);
if (err)
goto err_force_wake;
- /*
- * FIXME: This should be ok as SA should only be used by gt->migrate and
- * vm->gt->migrate and both should be pointing to a non-media GT. But to
- * realy safe, convert gt->kernel_bb_pool to a pointer and point a media
- * GT to the kernel_bb_pool on a real tile.
- */
if (!xe_gt_is_media_type(gt)) {
- err = xe_sa_bo_manager_init(gt, >->kernel_bb_pool, SZ_1M, 16);
- if (err)
+ gt->kernel_bb_pool = xe_sa_bo_manager_init(gt, SZ_1M, 16);
+ if (IS_ERR(gt->kernel_bb_pool)) {
+ err = PTR_ERR(gt->kernel_bb_pool);
goto err_force_wake;
+ }
/*
* USM has its only SA pool to non-block behind user operations
*/
if (gt_to_xe(gt)->info.supports_usm) {
- err = xe_sa_bo_manager_init(gt, >->usm.bb_pool,
- SZ_1M, 16);
- if (err)
+ gt->usm.bb_pool = xe_sa_bo_manager_init(gt, SZ_1M, 16);
+ if (IS_ERR(gt->usm.bb_pool)) {
+ err = PTR_ERR(gt->usm.bb_pool);
goto err_force_wake;
+ }
}
+ } else {
+ struct xe_gt *full_gt = xe_find_full_gt(gt);
+
+ /*
+ * Media GT's kernel_bb_pool is only used while recording the
+ * default context during GT init. The USM pool should never
+ * be needed on the media GT.
+ */
+ gt->kernel_bb_pool = full_gt->kernel_bb_pool;
}
if (!xe_gt_is_media_type(gt)) {
struct xe_gt *gt = node_to_gt(m->private);
struct drm_printer p = drm_seq_file_printer(m);
- drm_suballoc_dump_debug_info(>->kernel_bb_pool.base, &p,
- gt->kernel_bb_pool.gpu_addr);
+ drm_suballoc_dump_debug_info(>->kernel_bb_pool->base, &p,
+ gt->kernel_bb_pool->gpu_addr);
return 0;
}
* behind any user operations which may have resulted in a
* fault.
*/
- struct xe_sa_manager bb_pool;
+ struct xe_sa_manager *bb_pool;
/**
* @reserved_bcs_instance: reserved BCS instance used for USM
* operations (e.g. mmigrations, fixing page tables)
/** @hw_engines: hardware engines on the GT */
struct xe_hw_engine hw_engines[XE_NUM_HW_ENGINES];
- /** @kernel_bb_pool: Pool from which batchbuffers are allocated */
- struct xe_sa_manager kernel_bb_pool;
+ /**
+ * @kernel_bb_pool: Pool from which batchbuffers are allocated.
+ *
+ * Media GT shares a pool with its primary GT.
+ */
+ struct xe_sa_manager *kernel_bb_pool;
/** @migrate: Migration helper for vram blits and clearing */
struct xe_migrate *migrate;
u32 num_entries = NUM_PT_SLOTS, num_level = vm->pt_root[id]->level;
u32 map_ofs, level, i;
struct xe_device *xe = gt_to_xe(m->gt);
- struct xe_bo *bo, *batch = gt->kernel_bb_pool.bo;
+ struct xe_bo *bo, *batch = gt->kernel_bb_pool->bo;
u64 entry;
int ret;
m->batch_base_ofs = xe_migrate_vram_ofs(batch_addr);
if (xe->info.supports_usm) {
- batch = gt->usm.bb_pool.bo;
+ batch = gt->usm.bb_pool->bo;
batch_addr = xe_bo_addr(batch, 0, GEN8_PAGE_SIZE,
&is_vram);
m->usm_batch_base_ofs = xe_migrate_vram_ofs(batch_addr);
sa_manager->bo = NULL;
}
-int xe_sa_bo_manager_init(struct xe_gt *gt,
- struct xe_sa_manager *sa_manager,
- u32 size, u32 align)
+struct xe_sa_manager *xe_sa_bo_manager_init(struct xe_gt *gt, u32 size, u32 align)
{
struct xe_device *xe = gt_to_xe(gt);
u32 managed_size = size - SZ_4K;
struct xe_bo *bo;
+ int ret;
+
+ struct xe_sa_manager *sa_manager = drmm_kzalloc(>_to_xe(gt)->drm,
+ sizeof(*sa_manager),
+ GFP_KERNEL);
+ if (!sa_manager)
+ return ERR_PTR(-ENOMEM);
sa_manager->bo = NULL;
if (IS_ERR(bo)) {
drm_err(&xe->drm, "failed to allocate bo for sa manager: %ld\n",
PTR_ERR(bo));
- return PTR_ERR(bo);
+ return (struct xe_sa_manager *)bo;
}
sa_manager->bo = bo;
if (!sa_manager->cpu_ptr) {
xe_bo_unpin_map_no_vm(sa_manager->bo);
sa_manager->bo = NULL;
- return -ENOMEM;
+ return ERR_PTR(-ENOMEM);
}
} else {
sa_manager->cpu_ptr = bo->vmap.vaddr;
memset(sa_manager->cpu_ptr, 0, bo->ttm.base.size);
}
- return drmm_add_action_or_reset(&xe->drm, xe_sa_bo_manager_fini,
- sa_manager);
+ ret = drmm_add_action_or_reset(&xe->drm, xe_sa_bo_manager_fini,
+ sa_manager);
+ if (ret)
+ return ERR_PTR(ret);
+
+ return sa_manager;
}
struct drm_suballoc *xe_sa_bo_new(struct xe_sa_manager *sa_manager,
struct xe_bo;
struct xe_gt;
-int xe_sa_bo_manager_init(struct xe_gt *gt,
- struct xe_sa_manager *sa_manager,
- u32 size, u32 align);
+struct xe_sa_manager *xe_sa_bo_manager_init(struct xe_gt *gt, u32 size, u32 align);
struct drm_suballoc *xe_sa_bo_new(struct xe_sa_manager *sa_manager,
u32 size);