xe_map_wr(xe, &bo->vmap, XE_PAGE_SIZE * (NUM_KERNEL_PDE - 1), u64,
0xdeaddeadbeefbeef);
expected = xe_pte_encode(NULL, pt, 0, XE_CACHE_WB, 0);
- if (m->eng->vm->flags & XE_VM_FLAGS_64K)
+ if (m->eng->vm->flags & XE_VM_FLAG_64K)
expected |= XE_PTE_PS64;
if (xe_bo_is_vram(pt))
xe_res_first(pt->ttm.resource, 0, pt->size, &src_it);
xe_map_wr(xe, &bo->vmap, map_ofs + level * 8, u64, entry);
- if (vm->flags & XE_VM_FLAGS_64K)
+ if (vm->flags & XE_VM_FLAG_64K)
i += 16;
else
i += 1;
/* Write out batch too */
m->batch_base_ofs = NUM_PT_SLOTS * XE_PAGE_SIZE;
for (i = 0; i < batch->size;
- i += vm->flags & XE_VM_FLAGS_64K ? XE_64K_PAGE_SIZE :
+ i += vm->flags & XE_VM_FLAG_64K ? XE_64K_PAGE_SIZE :
XE_PAGE_SIZE) {
entry = xe_pte_encode(NULL, batch, i,
XE_CACHE_WB, 0);
for (level = 1; level < num_level; level++) {
u32 flags = 0;
- if (vm->flags & XE_VM_FLAGS_64K && level == 1)
+ if (vm->flags & XE_VM_FLAG_64K && level == 1)
flags = XE_PDE_64K;
entry = xe_pde_encode(bo, map_ofs + (level - 1) *
addr = xe_res_dma(cur) & PAGE_MASK;
if (is_vram) {
/* Is this a 64K PTE entry? */
- if ((m->eng->vm->flags & XE_VM_FLAGS_64K) &&
+ if ((m->eng->vm->flags & XE_VM_FLAG_64K) &&
!(cur_ofs & (16 * 8 - 1))) {
XE_WARN_ON(!IS_ALIGNED(addr, SZ_64K));
addr |= XE_PTE_PS64;
* platforms where 64K pages are needed for VRAM.
*/
flags = XE_BO_CREATE_PINNED_BIT;
- if (vm->flags & XE_VM_FLAGS_64K)
+ if (vm->flags & XE_VM_FLAG_64K)
flags |= XE_BO_CREATE_SYSTEM_BIT;
else
flags |= XE_BO_CREATE_VRAM_IF_DGFX(tile);
.va_curs_start = xe_vma_start(vma),
.vma = vma,
.wupd.entries = entries,
- .needs_64K = (xe_vma_vm(vma)->flags & XE_VM_FLAGS_64K) && is_vram,
+ .needs_64K = (xe_vma_vm(vma)->flags & XE_VM_FLAG_64K) && is_vram,
};
struct xe_pt *pt = xe_vma_vm(vma)->pt_root[tile->id];
int ret;
goto err_close;
if (IS_DGFX(xe) && xe->info.vram_flags & XE_VRAM_FLAGS_NEED64K)
- vm->flags |= XE_VM_FLAGS_64K;
+ vm->flags |= XE_VM_FLAG_64K;
for_each_tile(tile, xe, id) {
if (flags & XE_VM_FLAG_MIGRATION &&
- tile->id != XE_VM_FLAG_GT_ID(flags))
+ tile->id != XE_VM_FLAG_TILE_ID(flags))
continue;
vm->pt_root[id] = xe_pt_create(vm, tile, xe->info.vm_max_level);
struct ttm_buffer_object *xe_vm_ttm_bo(struct xe_vm *vm)
{
int idx = vm->flags & XE_VM_FLAG_MIGRATION ?
- XE_VM_FLAG_GT_ID(vm->flags) : 0;
+ XE_VM_FLAG_TILE_ID(vm->flags) : 0;
/* Safe to use index 0 as all BO in the VM share a single dma-resv lock */
return &vm->pt_root[idx]->bo->ttm;
* @flags: flags for this VM, statically setup a creation time aside
* from XE_VM_FLAG_BANNED which requires vm->lock to set / read safely
*/
-#define XE_VM_FLAGS_64K BIT(0)
+#define XE_VM_FLAG_64K BIT(0)
#define XE_VM_FLAG_COMPUTE_MODE BIT(1)
#define XE_VM_FLAG_ASYNC_BIND_OPS BIT(2)
#define XE_VM_FLAG_MIGRATION BIT(3)
#define XE_VM_FLAG_SCRATCH_PAGE BIT(4)
#define XE_VM_FLAG_FAULT_MODE BIT(5)
#define XE_VM_FLAG_BANNED BIT(6)
-#define XE_VM_FLAG_GT_ID(flags) (((flags) >> 7) & 0x3)
+#define XE_VM_FLAG_TILE_ID(flags) (((flags) >> 7) & 0x3)
#define XE_VM_FLAG_SET_TILE_ID(tile) ((tile)->id << 7)
unsigned long flags;