if (xe_bo_is_vram(pt))
xe_res_first(pt->ttm.resource, 0, pt->size, &src_it);
else
- xe_res_first_sg(xe_bo_get_sg(pt), 0, pt->size, &src_it);
+ xe_res_first_sg(xe_bo_sg(pt), 0, pt->size, &src_it);
emit_pte(m, bb, NUM_KERNEL_PDE - 1, xe_bo_is_vram(pt),
&src_it, XE_PAGE_SIZE, pt);
return 0;
}
-struct sg_table *xe_bo_get_sg(struct xe_bo *bo)
+struct sg_table *xe_bo_sg(struct xe_bo *bo)
{
struct ttm_tt *tt = bo->ttm.ttm;
struct xe_ttm_tt *xe_tt = container_of(tt, struct xe_ttm_tt, ttm);
if (!xe_bo_is_vram(bo) && !xe_bo_is_stolen(bo)) {
xe_assert(xe, bo->ttm.ttm);
- xe_res_first_sg(xe_bo_get_sg(bo), page << PAGE_SHIFT,
+ xe_res_first_sg(xe_bo_sg(bo), page << PAGE_SHIFT,
page_size, &cur);
return xe_res_dma(&cur) + offset;
} else {
void xe_bo_put_commit(struct llist_head *deferred);
-struct sg_table *xe_bo_get_sg(struct xe_bo *bo);
+struct sg_table *xe_bo_sg(struct xe_bo *bo);
/*
* xe_sg_segment_size() - Provides upper limit for sg segment size.
return ERR_PTR(-EINVAL);
if (!src_is_vram)
- xe_res_first_sg(xe_bo_get_sg(src_bo), 0, size, &src_it);
+ xe_res_first_sg(xe_bo_sg(src_bo), 0, size, &src_it);
else
xe_res_first(src, 0, size, &src_it);
if (!dst_is_vram)
- xe_res_first_sg(xe_bo_get_sg(dst_bo), 0, size, &dst_it);
+ xe_res_first_sg(xe_bo_sg(dst_bo), 0, size, &dst_it);
else
xe_res_first(dst, 0, size, &dst_it);
if (copy_system_ccs)
- xe_res_first_sg(xe_bo_get_sg(src_bo), xe_bo_ccs_pages_start(src_bo),
+ xe_res_first_sg(xe_bo_sg(src_bo), xe_bo_ccs_pages_start(src_bo),
PAGE_ALIGN(xe_device_ccs_bytes(xe, size)),
&ccs_it);
int pass = 0;
if (!clear_vram)
- xe_res_first_sg(xe_bo_get_sg(bo), 0, bo->size, &src_it);
+ xe_res_first_sg(xe_bo_sg(bo), 0, bo->size, &src_it);
else
xe_res_first(src, 0, bo->size, &src_it);
xe_res_first(bo->ttm.resource, xe_vma_bo_offset(vma),
xe_vma_size(vma), &curs);
else
- xe_res_first_sg(xe_bo_get_sg(bo), xe_vma_bo_offset(vma),
+ xe_res_first_sg(xe_bo_sg(bo), xe_vma_bo_offset(vma),
xe_vma_size(vma), &curs);
} else {
curs.size = xe_vma_size(vma);