void *new_addr = gt->mem.vram.mapping +
(new_mem->start << PAGE_SHIFT);
+ if (XE_WARN_ON(new_mem->start == XE_BO_INVALID_OFFSET)) {
+ ret = -EINVAL;
+ xe_device_mem_access_put(xe);
+ goto out;
+ }
+
XE_BUG_ON(new_mem->start !=
bo->placements->fpfn);
struct xe_ttm_vram_mgr_resource *vres;
u64 size, remaining_size, lpfn, fpfn;
struct drm_buddy *mm = &mgr->mm;
- struct drm_buddy_block *block;
unsigned long pages_per_block;
int r;
list_splice_tail(trim_list, &vres->blocks);
}
- vres->base.start = 0;
- list_for_each_entry(block, &vres->blocks, link) {
- unsigned long start;
+ if (!(vres->base.placement & TTM_PL_FLAG_CONTIGUOUS) &&
+ xe_is_vram_mgr_blocks_contiguous(mm, &vres->blocks))
+ vres->base.placement |= TTM_PL_FLAG_CONTIGUOUS;
- start = drm_buddy_block_offset(block) +
- drm_buddy_block_size(mm, block);
- start >>= PAGE_SHIFT;
+ /*
+ * For some kernel objects we still rely on the start when io mapping
+ * the object.
+ */
+ if (vres->base.placement & TTM_PL_FLAG_CONTIGUOUS) {
+ struct drm_buddy_block *block = list_first_entry(&vres->blocks,
+ typeof(*block),
+ link);
- if (start > PFN_UP(vres->base.size))
- start -= PFN_UP(vres->base.size);
- else
- start = 0;
- vres->base.start = max(vres->base.start, start);
+ vres->base.start = drm_buddy_block_offset(block) >> PAGE_SHIFT;
+ } else {
+ vres->base.start = XE_BO_INVALID_OFFSET;
}
- if (xe_is_vram_mgr_blocks_contiguous(mm, &vres->blocks))
- vres->base.placement |= TTM_PL_FLAG_CONTIGUOUS;
-
*res = &vres->base;
return 0;