/* Map the entire BO in our level 0 pt */
for (i = 0, level = 0; i < num_entries; level++) {
- entry = xe_pte_encode(NULL, bo, i * XE_PAGE_SIZE,
- XE_CACHE_WB, 0);
+ entry = xe_pte_encode(bo, i * XE_PAGE_SIZE, XE_CACHE_WB, 0);
xe_map_wr(xe, &bo->vmap, map_ofs + level * 8, u64, entry);
for (i = 0; i < batch->size;
i += vm->flags & XE_VM_FLAG_64K ? XE_64K_PAGE_SIZE :
XE_PAGE_SIZE) {
- entry = xe_pte_encode(NULL, batch, i,
- XE_CACHE_WB, 0);
+ entry = xe_pte_encode(batch, i, XE_CACHE_WB, 0);
xe_map_wr(xe, &bo->vmap, map_ofs + level * 8, u64,
entry);
BUG_ON(pt_bo->size != SZ_4K);
- addr = xe_pte_encode(NULL, pt_bo, 0, XE_CACHE_WB, 0);
+ addr = xe_pte_encode(pt_bo, 0, XE_CACHE_WB, 0);
bb->cs[bb->len++] = lower_32_bits(addr);
bb->cs[bb->len++] = upper_32_bits(addr);
}
return pde;
}
-static dma_addr_t vma_addr(struct xe_vma *vma, u64 offset,
- size_t page_size, bool *is_vram)
-{
- if (xe_vma_is_null(vma)) {
- *is_vram = 0;
- return 0;
- }
-
- if (xe_vma_is_userptr(vma)) {
- struct xe_res_cursor cur;
- u64 page;
-
- *is_vram = false;
- page = offset >> PAGE_SHIFT;
- offset &= (PAGE_SIZE - 1);
-
- xe_res_first_sg(vma->userptr.sg, page << PAGE_SHIFT, page_size,
- &cur);
- return xe_res_dma(&cur) + offset;
- } else {
- return xe_bo_addr(xe_vma_bo(vma), offset, page_size, is_vram);
- }
-}
-
static u64 __pte_encode(u64 pte, enum xe_cache_level cache,
struct xe_vma *vma, u32 pt_level)
{
/**
* xe_pte_encode() - Encode a page-table entry pointing to memory.
- * @vma: The vma representing the memory to point to.
- * @bo: If @vma is NULL, representing the memory to point to.
- * @offset: The offset into @vma or @bo.
+ * @bo: The BO representing the memory to point to.
+ * @offset: The offset into @bo.
* @cache: The cache level indicating
* @pt_level: The page-table level of the page-table into which the entry
* is to be inserted.
*
* Return: An encoded page-table entry. No errors.
*/
-u64 xe_pte_encode(struct xe_vma *vma, struct xe_bo *bo,
- u64 offset, enum xe_cache_level cache,
+u64 xe_pte_encode(struct xe_bo *bo, u64 offset, enum xe_cache_level cache,
u32 pt_level)
{
u64 pte;
bool is_vram;
- if (vma)
- pte = vma_addr(vma, offset, XE_PAGE_SIZE, &is_vram);
- else
- pte = xe_bo_addr(bo, offset, XE_PAGE_SIZE, &is_vram);
-
- if (is_vram) {
+ pte = xe_bo_addr(bo, offset, XE_PAGE_SIZE, &is_vram);
+ if (is_vram)
pte |= XE_PPGTT_PTE_LM;
- if (vma && vma->gpuva.flags & XE_VMA_ATOMIC_PTE_BIT)
- pte |= XE_USM_PPGTT_PTE_AE;
- }
- return __pte_encode(pte, cache, vma, pt_level);
+ return __pte_encode(pte, cache, NULL, pt_level);
}
static u64 __xe_pt_empty_pte(struct xe_tile *tile, struct xe_vm *vm,
return 0;
if (level == 0) {
- u64 empty = xe_pte_encode(NULL, vm->scratch_bo[id], 0,
+ u64 empty = xe_pte_encode(vm->scratch_bo[id], 0,
XE_CACHE_WB, 0);
return empty;