drm/i915: Wrap all access to i915_vma.node.start|size
authorChris Wilson <chris@chris-wilson.co.uk>
Wed, 30 Nov 2022 23:58:02 +0000 (00:58 +0100)
committerAndi Shyti <andi.shyti@linux.intel.com>
Tue, 6 Dec 2022 09:52:42 +0000 (10:52 +0100)
We already wrap i915_vma.node.start for use with the GGTT, as there we
can perform additional sanity checks that the node belongs to the GGTT
and fits within the 32b registers. In the next couple of patches, we
will introduce guard pages around the objects _inside_ the drm_mm_node
allocation. That is we will offset the vma->pages so that the first page
is at drm_mm_node.start + vma->guard (not 0 as is currently the case).
All users must then not use i915_vma.node.start directly, but compute
the guard offset, thus all users are converted to use a
i915_vma_offset() wrapper.

The notable exceptions are the selftests that are testing exact
behaviour of i915_vma_pin/i915_vma_insert.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: Tejas Upadhyay <tejaskumarx.surendrakumar.upadhyay@intel.com>
Co-developed-by: Thomas Hellström <thomas.hellstrom@linux.intel.com>
Signed-off-by: Thomas Hellström <thomas.hellstrom@linux.intel.com>
Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Signed-off-by: Andi Shyti <andi.shyti@linux.intel.com>
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20221130235805.221010-3-andi.shyti@linux.intel.com
29 files changed:
drivers/gpu/drm/i915/display/intel_fbdev.c
drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
drivers/gpu/drm/i915/gem/i915_gem_mman.c
drivers/gpu/drm/i915/gem/i915_gem_shrinker.c
drivers/gpu/drm/i915/gem/i915_gem_tiling.c
drivers/gpu/drm/i915/gem/selftests/huge_pages.c
drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c
drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.c
drivers/gpu/drm/i915/gt/gen7_renderclear.c
drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c
drivers/gpu/drm/i915/gt/intel_renderstate.c
drivers/gpu/drm/i915/gt/intel_ring_submission.c
drivers/gpu/drm/i915/gt/selftest_engine_cs.c
drivers/gpu/drm/i915/gt/selftest_execlists.c
drivers/gpu/drm/i915/gt/selftest_hangcheck.c
drivers/gpu/drm/i915/gt/selftest_lrc.c
drivers/gpu/drm/i915/gt/selftest_ring_submission.c
drivers/gpu/drm/i915/gt/selftest_rps.c
drivers/gpu/drm/i915/gt/selftest_workarounds.c
drivers/gpu/drm/i915/i915_cmd_parser.c
drivers/gpu/drm/i915/i915_debugfs.c
drivers/gpu/drm/i915/i915_perf.c
drivers/gpu/drm/i915/i915_vma.c
drivers/gpu/drm/i915/i915_vma.h
drivers/gpu/drm/i915/i915_vma_resource.h
drivers/gpu/drm/i915/selftests/i915_request.c
drivers/gpu/drm/i915/selftests/igt_spinner.c

index ab385d18ddcc37708fd381401e0cef1c8f34fc2a..05c3d4739c85bc708a4437b77374835a548d15b9 100644 (file)
@@ -284,7 +284,7 @@ static int intelfb_create(struct drm_fb_helper *helper,
 
                /* Our framebuffer is the entirety of fbdev's system memory */
                info->fix.smem_start =
-                       (unsigned long)(ggtt->gmadr.start + vma->node.start);
+                       (unsigned long)(ggtt->gmadr.start + i915_ggtt_offset(vma));
                info->fix.smem_len = vma->size;
        }
 
index 7d742f7e52b8cadcbe1633ff50d0623e0745cea7..16618a548026650775f18a38aca4aafb2d2b70ea 100644 (file)
@@ -378,22 +378,25 @@ eb_vma_misplaced(const struct drm_i915_gem_exec_object2 *entry,
                 const struct i915_vma *vma,
                 unsigned int flags)
 {
-       if (vma->node.size < entry->pad_to_size)
+       const u64 start = i915_vma_offset(vma);
+       const u64 size = i915_vma_size(vma);
+
+       if (size < entry->pad_to_size)
                return true;
 
-       if (entry->alignment && !IS_ALIGNED(vma->node.start, entry->alignment))
+       if (entry->alignment && !IS_ALIGNED(start, entry->alignment))
                return true;
 
        if (flags & EXEC_OBJECT_PINNED &&
-           vma->node.start != entry->offset)
+           start != entry->offset)
                return true;
 
        if (flags & __EXEC_OBJECT_NEEDS_BIAS &&
-           vma->node.start < BATCH_OFFSET_BIAS)
+           start < BATCH_OFFSET_BIAS)
                return true;
 
        if (!(flags & EXEC_OBJECT_SUPPORTS_48B_ADDRESS) &&
-           (vma->node.start + vma->node.size + 4095) >> 32)
+           (start + size + 4095) >> 32)
                return true;
 
        if (flags & __EXEC_OBJECT_NEEDS_MAP &&
@@ -439,7 +442,7 @@ eb_pin_vma(struct i915_execbuffer *eb,
        int err;
 
        if (vma->node.size)
-               pin_flags = vma->node.start;
+               pin_flags =  __i915_vma_offset(vma);
        else
                pin_flags = entry->offset & PIN_OFFSET_MASK;
 
@@ -662,8 +665,8 @@ static int eb_reserve_vma(struct i915_execbuffer *eb,
        if (err)
                return err;
 
-       if (entry->offset != vma->node.start) {
-               entry->offset = vma->node.start | UPDATE;
+       if (entry->offset != i915_vma_offset(vma)) {
+               entry->offset = i915_vma_offset(vma) | UPDATE;
                eb->args->flags |= __EXEC_HAS_RELOC;
        }
 
@@ -983,8 +986,8 @@ static int eb_validate_vmas(struct i915_execbuffer *eb)
                        return err;
 
                if (!err) {
-                       if (entry->offset != vma->node.start) {
-                               entry->offset = vma->node.start | UPDATE;
+                       if (entry->offset != i915_vma_offset(vma)) {
+                               entry->offset = i915_vma_offset(vma) | UPDATE;
                                eb->args->flags |= __EXEC_HAS_RELOC;
                        }
                } else {
@@ -1065,7 +1068,7 @@ static inline u64
 relocation_target(const struct drm_i915_gem_relocation_entry *reloc,
                  const struct i915_vma *target)
 {
-       return gen8_canonical_addr((int)reloc->delta + target->node.start);
+       return gen8_canonical_addr((int)reloc->delta + i915_vma_offset(target));
 }
 
 static void reloc_cache_init(struct reloc_cache *cache,
@@ -1274,7 +1277,7 @@ static void *reloc_iomap(struct i915_vma *batch,
                        if (err) /* no inactive aperture space, use cpu reloc */
                                return NULL;
                } else {
-                       cache->node.start = vma->node.start;
+                       cache->node.start = i915_ggtt_offset(vma);
                        cache->node.mm = (void *)vma;
                }
        }
@@ -1437,7 +1440,7 @@ eb_relocate_entry(struct i915_execbuffer *eb,
         * more work needs to be done.
         */
        if (!DBG_FORCE_RELOC &&
-           gen8_canonical_addr(target->vma->node.start) == reloc->presumed_offset)
+           gen8_canonical_addr(i915_vma_offset(target->vma)) == reloc->presumed_offset)
                return 0;
 
        /* Check that the relocation address is valid... */
@@ -2367,7 +2370,7 @@ static int eb_request_submit(struct i915_execbuffer *eb,
        }
 
        err = rq->context->engine->emit_bb_start(rq,
-                                                batch->node.start +
+                                                i915_vma_offset(batch) +
                                                 eb->batch_start_offset,
                                                 batch_len,
                                                 eb->batch_flags);
@@ -2378,7 +2381,7 @@ static int eb_request_submit(struct i915_execbuffer *eb,
                GEM_BUG_ON(intel_context_is_parallel(rq->context));
                GEM_BUG_ON(eb->batch_start_offset);
                err = rq->context->engine->emit_bb_start(rq,
-                                                        eb->trampoline->node.start +
+                                                        i915_vma_offset(eb->trampoline) +
                                                         batch_len, 0, 0);
                if (err)
                        return err;
index c29efdef8313a5acffef16332d6cf261f4ad395e..d73ba0f5c4c52affc9947ddc8ddd0fd958f0eb8d 100644 (file)
@@ -395,7 +395,7 @@ retry:
        /* Finally, remap it using the new GTT offset */
        ret = remap_io_mapping(area,
                               area->vm_start + (vma->gtt_view.partial.offset << PAGE_SHIFT),
-                              (ggtt->gmadr.start + vma->node.start) >> PAGE_SHIFT,
+                              (ggtt->gmadr.start + i915_ggtt_offset(vma)) >> PAGE_SHIFT,
                               min_t(u64, vma->size, area->vm_end - area->vm_start),
                               &ggtt->iomap);
        if (ret)
index 8dc5c8874d8a2213d54f42dab891c5152bc995db..b1672e054b21e02d96c24ad70fd8a64c679b039c 100644 (file)
@@ -400,7 +400,7 @@ i915_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr
        mutex_lock(&to_gt(i915)->ggtt->vm.mutex);
        list_for_each_entry_safe(vma, next,
                                 &to_gt(i915)->ggtt->vm.bound_list, vm_link) {
-               unsigned long count = vma->node.size >> PAGE_SHIFT;
+               unsigned long count = i915_vma_size(vma) >> PAGE_SHIFT;
                struct drm_i915_gem_object *obj = vma->obj;
 
                if (!vma->iomap || i915_vma_is_active(vma))
index fd42b89b7162b7e6e8210266ced13ba592da4920..04bb909acdec837f46766ad1d34dadfb470b2e8a 100644 (file)
@@ -168,11 +168,11 @@ static bool i915_vma_fence_prepare(struct i915_vma *vma,
                return true;
 
        size = i915_gem_fence_size(i915, vma->size, tiling_mode, stride);
-       if (vma->node.size < size)
+       if (i915_vma_size(vma) < size)
                return false;
 
        alignment = i915_gem_fence_alignment(i915, vma->size, tiling_mode, stride);
-       if (!IS_ALIGNED(vma->node.start, alignment))
+       if (!IS_ALIGNED(i915_ggtt_offset(vma), alignment))
                return false;
 
        return true;
index beaf27e09e8a9d6fdebe9046d242a303d43d6deb..e0c2ac9c8053a97bd1c60681b0fed4742b3dc19d 100644 (file)
@@ -400,7 +400,7 @@ static int igt_check_page_sizes(struct i915_vma *vma)
         * Maintaining alignment is required to utilise huge pages in the ppGGT.
         */
        if (i915_gem_object_is_lmem(obj) &&
-           IS_ALIGNED(vma->node.start, SZ_2M) &&
+           IS_ALIGNED(i915_vma_offset(vma), SZ_2M) &&
            vma->page_sizes.sg & SZ_2M &&
            vma->resource->page_sizes_gtt < SZ_2M) {
                pr_err("gtt pages mismatch for LMEM, expected 2M GTT pages, sg(%u), gtt(%u)\n",
index 692a16914ca0fe81bb46693b78d7fab8d1058644..3bb1f7f0110e614a26a52bf683eaca91c9b2cf95 100644 (file)
@@ -194,12 +194,12 @@ static int prepare_blit(const struct tiled_blits *t,
                *cs++ = src_4t | dst_4t | BLT_DEPTH_32 | dst_pitch;
                *cs++ = 0;
                *cs++ = t->height << 16 | t->width;
-               *cs++ = lower_32_bits(dst->vma->node.start);
-               *cs++ = upper_32_bits(dst->vma->node.start);
+               *cs++ = lower_32_bits(i915_vma_offset(dst->vma));
+               *cs++ = upper_32_bits(i915_vma_offset(dst->vma));
                *cs++ = 0;
                *cs++ = src_pitch;
-               *cs++ = lower_32_bits(src->vma->node.start);
-               *cs++ = upper_32_bits(src->vma->node.start);
+               *cs++ = lower_32_bits(i915_vma_offset(src->vma));
+               *cs++ = upper_32_bits(i915_vma_offset(src->vma));
        } else {
                if (ver >= 6) {
                        *cs++ = MI_LOAD_REGISTER_IMM(1);
@@ -240,14 +240,14 @@ static int prepare_blit(const struct tiled_blits *t,
                *cs++ = BLT_DEPTH_32 | BLT_ROP_SRC_COPY | dst_pitch;
                *cs++ = 0;
                *cs++ = t->height << 16 | t->width;
-               *cs++ = lower_32_bits(dst->vma->node.start);
+               *cs++ = lower_32_bits(i915_vma_offset(dst->vma));
                if (use_64b_reloc)
-                       *cs++ = upper_32_bits(dst->vma->node.start);
+                       *cs++ = upper_32_bits(i915_vma_offset(dst->vma));
                *cs++ = 0;
                *cs++ = src_pitch;
-               *cs++ = lower_32_bits(src->vma->node.start);
+               *cs++ = lower_32_bits(i915_vma_offset(src->vma));
                if (use_64b_reloc)
-                       *cs++ = upper_32_bits(src->vma->node.start);
+                       *cs++ = upper_32_bits(i915_vma_offset(src->vma));
        }
 
        *cs++ = MI_BATCH_BUFFER_END;
@@ -462,7 +462,7 @@ static int pin_buffer(struct i915_vma *vma, u64 addr)
 {
        int err;
 
-       if (drm_mm_node_allocated(&vma->node) && vma->node.start != addr) {
+       if (drm_mm_node_allocated(&vma->node) && i915_vma_offset(vma) != addr) {
                err = i915_vma_unbind_unlocked(vma);
                if (err)
                        return err;
@@ -472,6 +472,7 @@ static int pin_buffer(struct i915_vma *vma, u64 addr)
        if (err)
                return err;
 
+       GEM_BUG_ON(i915_vma_offset(vma) != addr);
        return 0;
 }
 
@@ -518,8 +519,8 @@ tiled_blit(struct tiled_blits *t,
                err = igt_vma_move_to_active_unlocked(dst->vma, rq, 0);
        if (!err)
                err = rq->engine->emit_bb_start(rq,
-                                               t->batch->node.start,
-                                               t->batch->node.size,
+                                               i915_vma_offset(t->batch),
+                                               i915_vma_size(t->batch),
                                                0);
        i915_request_get(rq);
        i915_request_add(rq);
index a0ff51d71d07aefe7ea8370fbc6af01b4fe8fd76..ac02fb0365920883d1d2e3834f667332636dab07 100644 (file)
@@ -914,8 +914,8 @@ static int rpcs_query_batch(struct drm_i915_gem_object *rpcs,
 
        *cmd++ = MI_STORE_REGISTER_MEM_GEN8;
        *cmd++ = i915_mmio_reg_offset(GEN8_R_PWR_CLK_STATE(engine->mmio_base));
-       *cmd++ = lower_32_bits(vma->node.start);
-       *cmd++ = upper_32_bits(vma->node.start);
+       *cmd++ = lower_32_bits(i915_vma_offset(vma));
+       *cmd++ = upper_32_bits(i915_vma_offset(vma));
        *cmd = MI_BATCH_BUFFER_END;
 
        __i915_gem_object_flush_map(rpcs, 0, 64);
@@ -999,7 +999,8 @@ retry:
        }
 
        err = rq->engine->emit_bb_start(rq,
-                                       batch->node.start, batch->node.size,
+                                       i915_vma_offset(batch),
+                                       i915_vma_size(batch),
                                        0);
        if (err)
                goto skip_request;
@@ -1560,7 +1561,8 @@ static int write_to_scratch(struct i915_gem_context *ctx,
                        goto skip_request;
        }
 
-       err = engine->emit_bb_start(rq, vma->node.start, vma->node.size, 0);
+       err = engine->emit_bb_start(rq, i915_vma_offset(vma),
+                                   i915_vma_size(vma), 0);
        if (err)
                goto skip_request;
 
@@ -1665,7 +1667,7 @@ static int read_from_scratch(struct i915_gem_context *ctx,
                *cmd++ = offset;
                *cmd++ = MI_STORE_REGISTER_MEM | MI_USE_GGTT;
                *cmd++ = reg;
-               *cmd++ = vma->node.start + result;
+               *cmd++ = i915_vma_offset(vma) + result;
                *cmd = MI_BATCH_BUFFER_END;
 
                i915_gem_object_flush_map(obj);
@@ -1694,7 +1696,8 @@ static int read_from_scratch(struct i915_gem_context *ctx,
                        goto skip_request;
        }
 
-       err = engine->emit_bb_start(rq, vma->node.start, vma->node.size, flags);
+       err = engine->emit_bb_start(rq, i915_vma_offset(vma),
+                                   i915_vma_size(vma), flags);
        if (err)
                goto skip_request;
 
index 80e7fdd5d1642790425c051bb4e5680910047e97..c04d317df96daad9fd9861049b468068109e0b7c 100644 (file)
@@ -1608,7 +1608,7 @@ retry:
 
                err = i915_vma_move_to_active(vma, rq, 0);
 
-               err = engine->emit_bb_start(rq, vma->node.start, 0, 0);
+               err = engine->emit_bb_start(rq, i915_vma_offset(vma), 0, 0);
                i915_request_get(rq);
                i915_request_add(rq);
 
index 374b10ac430e8fa49161d0c3007734e702b72efc..c147038230c4623b6b92d8c62e0219993da92970 100644 (file)
@@ -62,8 +62,8 @@ igt_emit_store_dw(struct i915_vma *vma,
                goto err;
        }
 
-       GEM_BUG_ON(offset + (count - 1) * PAGE_SIZE > vma->node.size);
-       offset += vma->node.start;
+       GEM_BUG_ON(offset + (count - 1) * PAGE_SIZE > i915_vma_size(vma));
+       offset += i915_vma_offset(vma);
 
        for (n = 0; n < count; n++) {
                if (ver >= 8) {
@@ -147,7 +147,8 @@ int igt_gpu_fill_dw(struct intel_context *ce,
                flags |= I915_DISPATCH_SECURE;
 
        err = rq->engine->emit_bb_start(rq,
-                                       batch->node.start, batch->node.size,
+                                       i915_vma_offset(batch),
+                                       i915_vma_size(batch),
                                        flags);
 
 skip_request:
index 317efb145787ec69781805fb1967ff1dc21b4123..d38b914d1206944ab5f8bee1f79e1ccc1b38128c 100644 (file)
@@ -106,7 +106,7 @@ static u32 batch_offset(const struct batch_chunk *bc, u32 *cs)
 
 static u32 batch_addr(const struct batch_chunk *bc)
 {
-       return bc->vma->node.start;
+       return i915_vma_offset(bc->vma);
 }
 
 static void batch_add(struct batch_chunk *bc, const u32 d)
index 995082d45cb21308bce3fa687360b6aa95a0d1d8..7ac8ed13e1fe30f5a02b09c82b5110957b7000e1 100644 (file)
@@ -220,7 +220,8 @@ static int fence_update(struct i915_fence_reg *fence,
                                return ret;
                }
 
-               fence->start = vma->node.start;
+               GEM_BUG_ON(vma->fence_size > i915_vma_size(vma));
+               fence->start = i915_ggtt_offset(vma);
                fence->size = vma->fence_size;
                fence->stride = i915_gem_object_get_stride(vma->obj);
                fence->tiling = i915_gem_object_get_tiling(vma->obj);
index 9c1ae070ee7b9a847ea4dd798f3b26d89ec45e32..4b56ec3743cf375bbc40de41f39b143a4845138c 100644 (file)
@@ -63,7 +63,7 @@ static int render_state_setup(struct intel_renderstate *so,
                u32 s = rodata->batch[i];
 
                if (i * 4  == rodata->reloc[reloc_index]) {
-                       u64 r = s + so->vma->node.start;
+                       u64 r = s + i915_vma_offset(so->vma);
 
                        s = lower_32_bits(r);
                        if (HAS_64BIT_RELOC(i915)) {
index d5d6f1fadcae397ff1d060f47ce4a178f484aa73..7d6db5fc92451cc5647a7542487c8cc114b33caa 100644 (file)
@@ -895,7 +895,7 @@ static int clear_residuals(struct i915_request *rq)
        }
 
        ret = engine->emit_bb_start(rq,
-                                   engine->wa_ctx.vma->node.start, 0,
+                                   i915_vma_offset(engine->wa_ctx.vma), 0,
                                    0);
        if (ret)
                return ret;
index 881b64f3e7b9920cfc1c10d7debfd6e736f0e3a9..542ce6d2de1922386c9096fe217bfb38686981e4 100644 (file)
@@ -178,7 +178,7 @@ static int perf_mi_bb_start(void *arg)
                                goto out;
 
                        err = rq->engine->emit_bb_start(rq,
-                                                       batch->node.start, 8,
+                                                       i915_vma_offset(batch), 8,
                                                        0);
                        if (err)
                                goto out;
@@ -321,7 +321,7 @@ static int perf_mi_noop(void *arg)
                                goto out;
 
                        err = rq->engine->emit_bb_start(rq,
-                                                       base->node.start, 8,
+                                                       i915_vma_offset(base), 8,
                                                        0);
                        if (err)
                                goto out;
@@ -331,8 +331,8 @@ static int perf_mi_noop(void *arg)
                                goto out;
 
                        err = rq->engine->emit_bb_start(rq,
-                                                       nop->node.start,
-                                                       nop->node.size,
+                                                       i915_vma_offset(nop),
+                                                       i915_vma_size(nop),
                                                        0);
                        if (err)
                                goto out;
index ab2e9a6a24522ae9d1bc8e8bb651ab038b0bd297..a619057543fd9469b0b0c700dd37f1afd2460670 100644 (file)
@@ -2737,11 +2737,11 @@ static int create_gang(struct intel_engine_cs *engine,
                MI_SEMAPHORE_POLL |
                MI_SEMAPHORE_SAD_EQ_SDD;
        *cs++ = 0;
-       *cs++ = lower_32_bits(vma->node.start);
-       *cs++ = upper_32_bits(vma->node.start);
+       *cs++ = lower_32_bits(i915_vma_offset(vma));
+       *cs++ = upper_32_bits(i915_vma_offset(vma));
 
        if (*prev) {
-               u64 offset = (*prev)->batch->node.start;
+               u64 offset = i915_vma_offset((*prev)->batch);
 
                /* Terminate the spinner in the next lower priority batch. */
                *cs++ = MI_STORE_DWORD_IMM_GEN4;
@@ -2767,7 +2767,7 @@ static int create_gang(struct intel_engine_cs *engine,
        err = i915_vma_move_to_active(vma, rq, 0);
        if (!err)
                err = rq->engine->emit_bb_start(rq,
-                                               vma->node.start,
+                                               i915_vma_offset(vma),
                                                PAGE_SIZE, 0);
        i915_vma_unlock(vma);
        i915_request_add(rq);
@@ -3095,7 +3095,7 @@ create_gpr_user(struct intel_engine_cs *engine,
                *cs++ = MI_MATH_ADD;
                *cs++ = MI_MATH_STORE(MI_MATH_REG(i), MI_MATH_REG_ACCU);
 
-               addr = result->node.start + offset + i * sizeof(*cs);
+               addr = i915_vma_offset(result) + offset + i * sizeof(*cs);
                *cs++ = MI_STORE_REGISTER_MEM_GEN8;
                *cs++ = CS_GPR(engine, 2 * i);
                *cs++ = lower_32_bits(addr);
@@ -3105,8 +3105,8 @@ create_gpr_user(struct intel_engine_cs *engine,
                        MI_SEMAPHORE_POLL |
                        MI_SEMAPHORE_SAD_GTE_SDD;
                *cs++ = i;
-               *cs++ = lower_32_bits(result->node.start);
-               *cs++ = upper_32_bits(result->node.start);
+               *cs++ = lower_32_bits(i915_vma_offset(result));
+               *cs++ = upper_32_bits(i915_vma_offset(result));
        }
 
        *cs++ = MI_BATCH_BUFFER_END;
@@ -3186,7 +3186,7 @@ create_gpr_client(struct intel_engine_cs *engine,
                err = i915_vma_move_to_active(batch, rq, 0);
        if (!err)
                err = rq->engine->emit_bb_start(rq,
-                                               batch->node.start,
+                                               i915_vma_offset(batch),
                                                PAGE_SIZE, 0);
        i915_vma_unlock(batch);
        i915_vma_unpin(batch);
@@ -3518,7 +3518,7 @@ static int smoke_submit(struct preempt_smoke *smoke,
                err = i915_vma_move_to_active(vma, rq, 0);
                if (!err)
                        err = rq->engine->emit_bb_start(rq,
-                                                       vma->node.start,
+                                                       i915_vma_offset(vma),
                                                        PAGE_SIZE, 0);
                i915_vma_unlock(vma);
        }
index bc05ef48c194ae70c0261e653a5062aed220058f..8b0d84f2aad228a3e966eaf19edfd39d81b12c6c 100644 (file)
@@ -96,7 +96,8 @@ err_ctx:
 static u64 hws_address(const struct i915_vma *hws,
                       const struct i915_request *rq)
 {
-       return hws->node.start + offset_in_page(sizeof(u32)*rq->fence.context);
+       return i915_vma_offset(hws) +
+              offset_in_page(sizeof(u32) * rq->fence.context);
 }
 
 static struct i915_request *
@@ -180,8 +181,8 @@ hang_create_request(struct hang *h, struct intel_engine_cs *engine)
 
                *batch++ = MI_NOOP;
                *batch++ = MI_BATCH_BUFFER_START | 1 << 8 | 1;
-               *batch++ = lower_32_bits(vma->node.start);
-               *batch++ = upper_32_bits(vma->node.start);
+               *batch++ = lower_32_bits(i915_vma_offset(vma));
+               *batch++ = upper_32_bits(i915_vma_offset(vma));
        } else if (GRAPHICS_VER(gt->i915) >= 6) {
                *batch++ = MI_STORE_DWORD_IMM_GEN4;
                *batch++ = 0;
@@ -194,7 +195,7 @@ hang_create_request(struct hang *h, struct intel_engine_cs *engine)
 
                *batch++ = MI_NOOP;
                *batch++ = MI_BATCH_BUFFER_START | 1 << 8;
-               *batch++ = lower_32_bits(vma->node.start);
+               *batch++ = lower_32_bits(i915_vma_offset(vma));
        } else if (GRAPHICS_VER(gt->i915) >= 4) {
                *batch++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
                *batch++ = 0;
@@ -207,7 +208,7 @@ hang_create_request(struct hang *h, struct intel_engine_cs *engine)
 
                *batch++ = MI_NOOP;
                *batch++ = MI_BATCH_BUFFER_START | 2 << 6;
-               *batch++ = lower_32_bits(vma->node.start);
+               *batch++ = lower_32_bits(i915_vma_offset(vma));
        } else {
                *batch++ = MI_STORE_DWORD_IMM | MI_MEM_VIRTUAL;
                *batch++ = lower_32_bits(hws_address(hws, rq));
@@ -219,7 +220,7 @@ hang_create_request(struct hang *h, struct intel_engine_cs *engine)
 
                *batch++ = MI_NOOP;
                *batch++ = MI_BATCH_BUFFER_START | 2 << 6;
-               *batch++ = lower_32_bits(vma->node.start);
+               *batch++ = lower_32_bits(i915_vma_offset(vma));
        }
        *batch++ = MI_BATCH_BUFFER_END; /* not reached */
        intel_gt_chipset_flush(engine->gt);
@@ -234,7 +235,7 @@ hang_create_request(struct hang *h, struct intel_engine_cs *engine)
        if (GRAPHICS_VER(gt->i915) <= 5)
                flags |= I915_DISPATCH_SECURE;
 
-       err = rq->engine->emit_bb_start(rq, vma->node.start, PAGE_SIZE, flags);
+       err = rq->engine->emit_bb_start(rq, i915_vma_offset(vma), PAGE_SIZE, flags);
 
 cancel_rq:
        if (err) {
index 7c56ffd2c659773005704d00c19476b094a4bc19..a61ae9d7e0a220b715c84887fe892d1648c585bb 100644 (file)
@@ -1030,8 +1030,8 @@ store_context(struct intel_context *ce, struct i915_vma *scratch)
                while (len--) {
                        *cs++ = MI_STORE_REGISTER_MEM_GEN8;
                        *cs++ = hw[dw];
-                       *cs++ = lower_32_bits(scratch->node.start + x);
-                       *cs++ = upper_32_bits(scratch->node.start + x);
+                       *cs++ = lower_32_bits(i915_vma_offset(scratch) + x);
+                       *cs++ = upper_32_bits(i915_vma_offset(scratch) + x);
 
                        dw += 2;
                        x += 4;
@@ -1098,8 +1098,8 @@ record_registers(struct intel_context *ce,
 
        *cs++ = MI_ARB_ON_OFF | MI_ARB_DISABLE;
        *cs++ = MI_BATCH_BUFFER_START_GEN8 | BIT(8);
-       *cs++ = lower_32_bits(b_before->node.start);
-       *cs++ = upper_32_bits(b_before->node.start);
+       *cs++ = lower_32_bits(i915_vma_offset(b_before));
+       *cs++ = upper_32_bits(i915_vma_offset(b_before));
 
        *cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
        *cs++ = MI_SEMAPHORE_WAIT |
@@ -1114,8 +1114,8 @@ record_registers(struct intel_context *ce,
 
        *cs++ = MI_ARB_ON_OFF | MI_ARB_DISABLE;
        *cs++ = MI_BATCH_BUFFER_START_GEN8 | BIT(8);
-       *cs++ = lower_32_bits(b_after->node.start);
-       *cs++ = upper_32_bits(b_after->node.start);
+       *cs++ = lower_32_bits(i915_vma_offset(b_after));
+       *cs++ = upper_32_bits(i915_vma_offset(b_after));
 
        intel_ring_advance(rq, cs);
 
@@ -1236,8 +1236,8 @@ static int poison_registers(struct intel_context *ce, u32 poison, u32 *sema)
 
        *cs++ = MI_ARB_ON_OFF | MI_ARB_DISABLE;
        *cs++ = MI_BATCH_BUFFER_START_GEN8 | BIT(8);
-       *cs++ = lower_32_bits(batch->node.start);
-       *cs++ = upper_32_bits(batch->node.start);
+       *cs++ = lower_32_bits(i915_vma_offset(batch));
+       *cs++ = upper_32_bits(i915_vma_offset(batch));
 
        *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
        *cs++ = i915_ggtt_offset(ce->engine->status_page.vma) +
index 70f9ac1ec2c76ffc7ca35a3c1ec1a51e7a6c59d8..87ceb0f374b6730802e32adc2298104d443a940a 100644 (file)
@@ -50,7 +50,7 @@ static struct i915_vma *create_wally(struct intel_engine_cs *engine)
        } else {
                *cs++ = MI_STORE_DWORD_IMM | MI_MEM_VIRTUAL;
        }
-       *cs++ = vma->node.start + 4000;
+       *cs++ = i915_vma_offset(vma) + 4000;
        *cs++ = STACK_MAGIC;
 
        *cs++ = MI_BATCH_BUFFER_END;
index 39f1b7564170c4874a89c6e7d86f26b719387ca1..6755bbc4ebdac01d9d4f96a5a1c30fb1044cc8d0 100644 (file)
@@ -122,14 +122,14 @@ create_spin_counter(struct intel_engine_cs *engine,
                if (srm) {
                        *cs++ = MI_STORE_REGISTER_MEM_GEN8;
                        *cs++ = i915_mmio_reg_offset(CS_GPR(COUNT));
-                       *cs++ = lower_32_bits(vma->node.start + end * sizeof(*cs));
-                       *cs++ = upper_32_bits(vma->node.start + end * sizeof(*cs));
+                       *cs++ = lower_32_bits(i915_vma_offset(vma) + end * sizeof(*cs));
+                       *cs++ = upper_32_bits(i915_vma_offset(vma) + end * sizeof(*cs));
                }
        }
 
        *cs++ = MI_BATCH_BUFFER_START_GEN8;
-       *cs++ = lower_32_bits(vma->node.start + loop * sizeof(*cs));
-       *cs++ = upper_32_bits(vma->node.start + loop * sizeof(*cs));
+       *cs++ = lower_32_bits(i915_vma_offset(vma) + loop * sizeof(*cs));
+       *cs++ = upper_32_bits(i915_vma_offset(vma) + loop * sizeof(*cs));
        GEM_BUG_ON(cs - base > end);
 
        i915_gem_object_flush_map(obj);
@@ -655,7 +655,7 @@ int live_rps_frequency_cs(void *arg)
                err = i915_vma_move_to_active(vma, rq, 0);
                if (!err)
                        err = rq->engine->emit_bb_start(rq,
-                                                       vma->node.start,
+                                                       i915_vma_offset(vma),
                                                        PAGE_SIZE, 0);
                i915_request_add(rq);
                if (err)
@@ -794,7 +794,7 @@ int live_rps_frequency_srm(void *arg)
                err = i915_vma_move_to_active(vma, rq, 0);
                if (!err)
                        err = rq->engine->emit_bb_start(rq,
-                                                       vma->node.start,
+                                                       i915_vma_offset(vma),
                                                        PAGE_SIZE, 0);
                i915_request_add(rq);
                if (err)
index 96e3861706d6f933a0a4675f27070d6c4719ec2f..9c5449709161c0930258487102e070e6bebd7da1 100644 (file)
@@ -521,7 +521,7 @@ static int check_dirty_whitelist(struct intel_context *ce)
        for (i = 0; i < engine->whitelist.count; i++) {
                u32 reg = i915_mmio_reg_offset(engine->whitelist.list[i].reg);
                struct i915_gem_ww_ctx ww;
-               u64 addr = scratch->node.start;
+               u64 addr = i915_vma_offset(scratch);
                struct i915_request *rq;
                u32 srm, lrm, rsvd;
                u32 expect;
@@ -640,7 +640,7 @@ retry:
                        goto err_request;
 
                err = engine->emit_bb_start(rq,
-                                           batch->node.start, PAGE_SIZE,
+                                           i915_vma_offset(batch), PAGE_SIZE,
                                            0);
                if (err)
                        goto err_request;
@@ -870,7 +870,7 @@ static int read_whitelisted_registers(struct intel_context *ce,
        }
 
        for (i = 0; i < engine->whitelist.count; i++) {
-               u64 offset = results->node.start + sizeof(u32) * i;
+               u64 offset = i915_vma_offset(results) + sizeof(u32) * i;
                u32 reg = i915_mmio_reg_offset(engine->whitelist.list[i].reg);
 
                /* Clear non priv flags */
@@ -942,7 +942,7 @@ static int scrub_whitelisted_registers(struct intel_context *ce)
                goto err_request;
 
        /* Perform the writes from an unprivileged "user" batch */
-       err = engine->emit_bb_start(rq, batch->node.start, 0, 0);
+       err = engine->emit_bb_start(rq, i915_vma_offset(batch), 0, 0);
 
 err_request:
        err = request_add_sync(rq, err);
index f93e6122f247888e7682c4ec66fc1e34474eeb33..ddf49c2dbb917c5dc650dfd88f409727760e4ba0 100644 (file)
@@ -1471,8 +1471,8 @@ int intel_engine_cmd_parser(struct intel_engine_cs *engine,
                /* Defer failure until attempted use */
                jump_whitelist = alloc_whitelist(batch_length);
 
-       shadow_addr = gen8_canonical_addr(shadow->node.start);
-       batch_addr = gen8_canonical_addr(batch->node.start + batch_offset);
+       shadow_addr = gen8_canonical_addr(i915_vma_offset(shadow));
+       batch_addr = gen8_canonical_addr(i915_vma_offset(batch) + batch_offset);
 
        /*
         * We use the batch length as size because the shadow object is as
index 6c7ac73b69a5253fc1f80511fe8e19333dc1e62a..a356ca490159e3e8250330c48ecf73464c6559bf 100644 (file)
@@ -183,7 +183,7 @@ i915_debugfs_describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
 
                seq_printf(m, " (%s offset: %08llx, size: %08llx, pages: %s",
                           stringify_vma_type(vma),
-                          vma->node.start, vma->node.size,
+                          i915_vma_offset(vma), i915_vma_size(vma),
                           stringify_page_sizes(vma->resource->page_sizes_gtt,
                                                NULL, 0));
                if (i915_vma_is_ggtt(vma) || i915_vma_is_dpt(vma)) {
index c2bee21a7df22f69d56182be7bc9d0fba7d96ce1..bee6f3b61819dc81242fbaf1296dfd5de24d2607 100644 (file)
@@ -2258,7 +2258,7 @@ retry:
                goto err_add_request;
 
        err = rq->engine->emit_bb_start(rq,
-                                       vma->node.start, 0,
+                                       i915_vma_offset(vma), 0,
                                        I915_DISPATCH_SECURE);
        if (err)
                goto err_add_request;
index 726705b10637caad8c768d54644a2cc2cca3469f..fefee5fef38d3d885d1268b53bc2da2864d65a78 100644 (file)
@@ -418,8 +418,8 @@ i915_vma_resource_init_from_vma(struct i915_vma_resource *vma_res,
        i915_vma_resource_init(vma_res, vma->vm, vma->pages, &vma->page_sizes,
                               obj->mm.rsgt, i915_gem_object_is_readonly(obj),
                               i915_gem_object_is_lmem(obj), obj->mm.region,
-                              vma->ops, vma->private, vma->node.start,
-                              vma->node.size, vma->size);
+                              vma->ops, vma->private, __i915_vma_offset(vma),
+                              __i915_vma_size(vma), vma->size);
 }
 
 /**
@@ -447,7 +447,7 @@ int i915_vma_bind(struct i915_vma *vma,
 
        lockdep_assert_held(&vma->vm->mutex);
        GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
-       GEM_BUG_ON(vma->size > vma->node.size);
+       GEM_BUG_ON(vma->size > i915_vma_size(vma));
 
        if (GEM_DEBUG_WARN_ON(range_overflows(vma->node.start,
                                              vma->node.size,
@@ -569,8 +569,8 @@ void __iomem *i915_vma_pin_iomap(struct i915_vma *vma)
                                                          vma->obj->base.size);
                } else if (i915_vma_is_map_and_fenceable(vma)) {
                        ptr = io_mapping_map_wc(&i915_vm_to_ggtt(vma->vm)->iomap,
-                                               vma->node.start,
-                                               vma->node.size);
+                                               i915_vma_offset(vma),
+                                               i915_vma_size(vma));
                } else {
                        ptr = (void __iomem *)
                                i915_gem_object_pin_map(vma->obj, I915_MAP_WC);
@@ -659,22 +659,22 @@ bool i915_vma_misplaced(const struct i915_vma *vma,
        if (test_bit(I915_VMA_ERROR_BIT, __i915_vma_flags(vma)))
                return true;
 
-       if (vma->node.size < size)
+       if (i915_vma_size(vma) < size)
                return true;
 
        GEM_BUG_ON(alignment && !is_power_of_2(alignment));
-       if (alignment && !IS_ALIGNED(vma->node.start, alignment))
+       if (alignment && !IS_ALIGNED(i915_vma_offset(vma), alignment))
                return true;
 
        if (flags & PIN_MAPPABLE && !i915_vma_is_map_and_fenceable(vma))
                return true;
 
        if (flags & PIN_OFFSET_BIAS &&
-           vma->node.start < (flags & PIN_OFFSET_MASK))
+           i915_vma_offset(vma) < (flags & PIN_OFFSET_MASK))
                return true;
 
        if (flags & PIN_OFFSET_FIXED &&
-           vma->node.start != (flags & PIN_OFFSET_MASK))
+           i915_vma_offset(vma) != (flags & PIN_OFFSET_MASK))
                return true;
 
        return false;
@@ -687,10 +687,11 @@ void __i915_vma_set_map_and_fenceable(struct i915_vma *vma)
        GEM_BUG_ON(!i915_vma_is_ggtt(vma));
        GEM_BUG_ON(!vma->fence_size);
 
-       fenceable = (vma->node.size >= vma->fence_size &&
-                    IS_ALIGNED(vma->node.start, vma->fence_alignment));
+       fenceable = (i915_vma_size(vma) >= vma->fence_size &&
+                    IS_ALIGNED(i915_vma_offset(vma), vma->fence_alignment));
 
-       mappable = vma->node.start + vma->fence_size <= i915_vm_to_ggtt(vma->vm)->mappable_end;
+       mappable = i915_ggtt_offset(vma) + vma->fence_size <=
+                  i915_vm_to_ggtt(vma->vm)->mappable_end;
 
        if (mappable && fenceable)
                set_bit(I915_VMA_CAN_FENCE_BIT, __i915_vma_flags(vma));
index 0757977a489bcc524ffe351c268c92d512a1fb3b..3fd4512b1f65f8061e329dc12c925e0378e0c894 100644 (file)
@@ -125,13 +125,58 @@ static inline bool i915_vma_is_closed(const struct i915_vma *vma)
        return !list_empty(&vma->closed_link);
 }
 
+/* Internal use only. */
+static inline u64 __i915_vma_size(const struct i915_vma *vma)
+{
+       return vma->node.size;
+}
+
+/**
+ * i915_vma_offset - Obtain the va range size of the vma
+ * @vma: The vma
+ *
+ * GPU virtual address space may be allocated with padding. This
+ * function returns the effective virtual address range size
+ * with padding subtracted.
+ *
+ * Return: The effective virtual address range size.
+ */
+static inline u64 i915_vma_size(const struct i915_vma *vma)
+{
+       GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
+       return __i915_vma_size(vma);
+}
+
+/* Internal use only. */
+static inline u64 __i915_vma_offset(const struct i915_vma *vma)
+{
+       return vma->node.start;
+}
+
+/**
+ * i915_vma_offset - Obtain the va offset of the vma
+ * @vma: The vma
+ *
+ * GPU virtual address space may be allocated with padding. This
+ * function returns the effective virtual address offset the gpu
+ * should use to access the bound data.
+ *
+ * Return: The effective virtual address offset.
+ */
+static inline u64 i915_vma_offset(const struct i915_vma *vma)
+{
+       GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
+       return __i915_vma_offset(vma);
+}
+
 static inline u32 i915_ggtt_offset(const struct i915_vma *vma)
 {
        GEM_BUG_ON(!i915_vma_is_ggtt(vma));
        GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
-       GEM_BUG_ON(upper_32_bits(vma->node.start));
-       GEM_BUG_ON(upper_32_bits(vma->node.start + vma->node.size - 1));
-       return lower_32_bits(vma->node.start);
+       GEM_BUG_ON(upper_32_bits(i915_vma_offset(vma)));
+       GEM_BUG_ON(upper_32_bits(i915_vma_offset(vma) +
+                                i915_vma_size(vma) - 1));
+       return lower_32_bits(i915_vma_offset(vma));
 }
 
 static inline u32 i915_ggtt_pin_bias(struct i915_vma *vma)
index 06923d1816e7e70ffcb5d6c8d9b1770abbfa3304..54edf3739ca0bfc89319dfb1c66062a8988603a2 100644 (file)
@@ -52,8 +52,10 @@ struct i915_page_sizes {
  * @mr: The memory region of the object pointed to by the vma.
  * @ops: Pointer to the backend i915_vma_ops.
  * @private: Bind backend private info.
- * @start: Offset into the address space of bind range start.
- * @node_size: Size of the allocated range manager node.
+ * @start: Offset into the address space of bind range start. Note that
+ * this is after any padding that might have been allocated.
+ * @node_size: Size of the allocated range manager node with padding
+ * subtracted.
  * @vma_size: Bind size.
  * @page_sizes_gtt: Resulting page sizes from the bind operation.
  * @bound_flags: Flags indicating binding status.
@@ -174,8 +176,8 @@ static inline void i915_vma_resource_put(struct i915_vma_resource *vma_res)
  * @mr: The memory region of the object the vma points to.
  * @ops: The backend ops.
  * @private: Bind backend private info.
- * @start: Offset into the address space of bind range start.
- * @node_size: Size of the allocated range manager node.
+ * @start: Offset into the address space of bind range start after padding.
+ * @node_size: Size of the allocated range manager node minus padding.
  * @size: Bind size.
  *
  * Initializes a vma resource allocated using i915_vma_resource_alloc().
index 0daa8669181dafbc269e6407f2784a000da8ec7c..6fe22b096bdd759439af4a8936d827bf222a1a55 100644 (file)
@@ -1017,8 +1017,8 @@ empty_request(struct intel_engine_cs *engine,
                return request;
 
        err = engine->emit_bb_start(request,
-                                   batch->node.start,
-                                   batch->node.size,
+                                   i915_vma_offset(batch),
+                                   i915_vma_size(batch),
                                    I915_DISPATCH_SECURE);
        if (err)
                goto out_request;
@@ -1138,14 +1138,14 @@ static struct i915_vma *recursive_batch(struct drm_i915_private *i915)
 
        if (ver >= 8) {
                *cmd++ = MI_BATCH_BUFFER_START | 1 << 8 | 1;
-               *cmd++ = lower_32_bits(vma->node.start);
-               *cmd++ = upper_32_bits(vma->node.start);
+               *cmd++ = lower_32_bits(i915_vma_offset(vma));
+               *cmd++ = upper_32_bits(i915_vma_offset(vma));
        } else if (ver >= 6) {
                *cmd++ = MI_BATCH_BUFFER_START | 1 << 8;
-               *cmd++ = lower_32_bits(vma->node.start);
+               *cmd++ = lower_32_bits(i915_vma_offset(vma));
        } else {
                *cmd++ = MI_BATCH_BUFFER_START | MI_BATCH_GTT;
-               *cmd++ = lower_32_bits(vma->node.start);
+               *cmd++ = lower_32_bits(i915_vma_offset(vma));
        }
        *cmd++ = MI_BATCH_BUFFER_END; /* terminate early in case of error */
 
@@ -1227,8 +1227,8 @@ static int live_all_engines(void *arg)
                GEM_BUG_ON(err);
 
                err = engine->emit_bb_start(request[idx],
-                                           batch->node.start,
-                                           batch->node.size,
+                                           i915_vma_offset(batch),
+                                           i915_vma_size(batch),
                                            0);
                GEM_BUG_ON(err);
                request[idx]->batch = batch;
@@ -1354,8 +1354,8 @@ static int live_sequential_engines(void *arg)
                GEM_BUG_ON(err);
 
                err = engine->emit_bb_start(request[idx],
-                                           batch->node.start,
-                                           batch->node.size,
+                                           i915_vma_offset(batch),
+                                           i915_vma_size(batch),
                                            0);
                GEM_BUG_ON(err);
                request[idx]->batch = batch;
index 16978ac59797851a4285bb69afd0c17940612082..618d9386d554941d0530494273fec50ceeb2fc66 100644 (file)
@@ -116,7 +116,7 @@ static unsigned int seqno_offset(u64 fence)
 static u64 hws_address(const struct i915_vma *hws,
                       const struct i915_request *rq)
 {
-       return hws->node.start + seqno_offset(rq->fence.context);
+       return i915_vma_offset(hws) + seqno_offset(rq->fence.context);
 }
 
 struct i915_request *
@@ -187,8 +187,8 @@ igt_spinner_create_request(struct igt_spinner *spin,
                *batch++ = MI_BATCH_BUFFER_START;
        else
                *batch++ = MI_BATCH_BUFFER_START | MI_BATCH_GTT;
-       *batch++ = lower_32_bits(vma->node.start);
-       *batch++ = upper_32_bits(vma->node.start);
+       *batch++ = lower_32_bits(i915_vma_offset(vma));
+       *batch++ = upper_32_bits(i915_vma_offset(vma));
 
        *batch++ = MI_BATCH_BUFFER_END; /* not reached */
 
@@ -203,7 +203,7 @@ igt_spinner_create_request(struct igt_spinner *spin,
        flags = 0;
        if (GRAPHICS_VER(rq->engine->i915) <= 5)
                flags |= I915_DISPATCH_SECURE;
-       err = engine->emit_bb_start(rq, vma->node.start, PAGE_SIZE, flags);
+       err = engine->emit_bb_start(rq, i915_vma_offset(vma), PAGE_SIZE, flags);
 
 cancel_rq:
        if (err) {