}
 
        if (iova) {
-               int ret = msm_gem_get_iova_locked(bo, gpu->id, iova);
+               int ret = msm_gem_get_iova_locked(bo, gpu->aspace, iova);
 
                if (ret) {
                        drm_gem_object_unreference(bo);
 
        if (a5xx_gpu->pm4_bo) {
                if (a5xx_gpu->pm4_iova)
-                       msm_gem_put_iova(a5xx_gpu->pm4_bo, gpu->id);
+                       msm_gem_put_iova(a5xx_gpu->pm4_bo, gpu->aspace);
                drm_gem_object_unreference_unlocked(a5xx_gpu->pm4_bo);
        }
 
        if (a5xx_gpu->pfp_bo) {
                if (a5xx_gpu->pfp_iova)
-                       msm_gem_put_iova(a5xx_gpu->pfp_bo, gpu->id);
+                       msm_gem_put_iova(a5xx_gpu->pfp_bo, gpu->aspace);
                drm_gem_object_unreference_unlocked(a5xx_gpu->pfp_bo);
        }
 
        if (a5xx_gpu->gpmu_bo) {
                if (a5xx_gpu->gpmu_iova)
-                       msm_gem_put_iova(a5xx_gpu->gpmu_bo, gpu->id);
+                       msm_gem_put_iova(a5xx_gpu->gpmu_bo, gpu->aspace);
                drm_gem_object_unreference_unlocked(a5xx_gpu->gpmu_bo);
        }
 
 
        if (IS_ERR(a5xx_gpu->gpmu_bo))
                goto err;
 
-       if (msm_gem_get_iova_locked(a5xx_gpu->gpmu_bo, gpu->id, &a5xx_gpu->gpmu_iova))
+       if (msm_gem_get_iova_locked(a5xx_gpu->gpmu_bo, gpu->aspace,
+                       &a5xx_gpu->gpmu_iova))
                goto err;
 
        ptr = msm_gem_get_vaddr_locked(a5xx_gpu->gpmu_bo);
 
 err:
        if (a5xx_gpu->gpmu_iova)
-               msm_gem_put_iova(a5xx_gpu->gpmu_bo, gpu->id);
+               msm_gem_put_iova(a5xx_gpu->gpmu_bo, gpu->aspace);
        if (a5xx_gpu->gpmu_bo)
                drm_gem_object_unreference(a5xx_gpu->gpmu_bo);
 
 
 
        DBG("%s", gpu->name);
 
-       ret = msm_gem_get_iova_locked(gpu->rb->bo, gpu->id, &gpu->rb_iova);
+       ret = msm_gem_get_iova_locked(gpu->rb->bo, gpu->aspace, &gpu->rb_iova);
        if (ret) {
                gpu->rb_iova = 0;
                dev_err(gpu->dev->dev, "could not map ringbuffer: %d\n", ret);
                return -ENOMEM;
        }
 
-       ret = msm_gem_get_iova(adreno_gpu->memptrs_bo, gpu->id,
+       ret = msm_gem_get_iova(adreno_gpu->memptrs_bo, gpu->aspace,
                        &adreno_gpu->memptrs_iova);
        if (ret) {
                dev_err(drm->dev, "could not map memptrs: %d\n", ret);
                        msm_gem_put_vaddr(adreno_gpu->memptrs_bo);
 
                if (adreno_gpu->memptrs_iova)
-                       msm_gem_put_iova(adreno_gpu->memptrs_bo, gpu->id);
+                       msm_gem_put_iova(adreno_gpu->memptrs_bo, gpu->aspace);
 
                drm_gem_object_unreference_unlocked(adreno_gpu->memptrs_bo);
        }
 
                }
 
                ret = msm_gem_get_iova_locked(msm_host->tx_gem_obj,
-                               priv->kms->id, &iova);
+                               priv->kms->aspace, &iova);
                mutex_unlock(&dev->struct_mutex);
                if (ret) {
                        pr_err("%s: failed to get iova, %d\n", __func__, ret);
 
        if (cfg_hnd->major == MSM_DSI_VER_MAJOR_6G) {
                ret = msm_gem_get_iova(msm_host->tx_gem_obj,
-                               priv->kms->id, &dma_base);
+                               priv->kms->aspace, &dma_base);
                if (ret) {
                        pr_err("%s: failed to get iova: %d\n", __func__, ret);
                        return ret;
 
        struct mdp4_kms *mdp4_kms = get_kms(&mdp4_crtc->base);
        struct msm_kms *kms = &mdp4_kms->base.base;
 
-       msm_gem_put_iova(val, kms->id);
+       msm_gem_put_iova(val, kms->aspace);
        drm_gem_object_unreference_unlocked(val);
 }
 
                if (next_bo) {
                        /* take a obj ref + iova ref when we start scanning out: */
                        drm_gem_object_reference(next_bo);
-                       msm_gem_get_iova_locked(next_bo, kms->id, &iova);
+                       msm_gem_get_iova_locked(next_bo, kms->aspace, &iova);
 
                        /* enable cursor: */
                        mdp4_write(mdp4_kms, REG_MDP4_DMA_CURSOR_SIZE(dma),
        }
 
        if (cursor_bo) {
-               ret = msm_gem_get_iova(cursor_bo, kms->id, &iova);
+               ret = msm_gem_get_iova(cursor_bo, kms->aspace, &iova);
                if (ret)
                        goto fail;
        } else {
 
        struct msm_gem_address_space *aspace = kms->aspace;
 
        if (mdp4_kms->blank_cursor_iova)
-               msm_gem_put_iova(mdp4_kms->blank_cursor_bo, kms->id);
+               msm_gem_put_iova(mdp4_kms->blank_cursor_bo, kms->aspace);
        drm_gem_object_unreference_unlocked(mdp4_kms->blank_cursor_bo);
 
        if (aspace) {
                goto fail;
        }
 
-       ret = msm_gem_get_iova(mdp4_kms->blank_cursor_bo, kms->id,
+       ret = msm_gem_get_iova(mdp4_kms->blank_cursor_bo, kms->aspace,
                        &mdp4_kms->blank_cursor_iova);
        if (ret) {
                dev_err(dev->dev, "could not pin blank-cursor bo: %d\n", ret);
 
                return 0;
 
        DBG("%s: prepare: FB[%u]", mdp4_plane->name, fb->base.id);
-       return msm_framebuffer_prepare(fb, kms->id);
+       return msm_framebuffer_prepare(fb, kms->aspace);
 }
 
 static void mdp4_plane_cleanup_fb(struct drm_plane *plane,
                return;
 
        DBG("%s: cleanup: FB[%u]", mdp4_plane->name, fb->base.id);
-       msm_framebuffer_cleanup(fb, kms->id);
+       msm_framebuffer_cleanup(fb, kms->aspace);
 }
 
 
                        MDP4_PIPE_SRC_STRIDE_B_P3(fb->pitches[3]));
 
        mdp4_write(mdp4_kms, REG_MDP4_PIPE_SRCP0_BASE(pipe),
-                       msm_framebuffer_iova(fb, kms->id, 0));
+                       msm_framebuffer_iova(fb, kms->aspace, 0));
        mdp4_write(mdp4_kms, REG_MDP4_PIPE_SRCP1_BASE(pipe),
-                       msm_framebuffer_iova(fb, kms->id, 1));
+                       msm_framebuffer_iova(fb, kms->aspace, 1));
        mdp4_write(mdp4_kms, REG_MDP4_PIPE_SRCP2_BASE(pipe),
-                       msm_framebuffer_iova(fb, kms->id, 2));
+                       msm_framebuffer_iova(fb, kms->aspace, 2));
        mdp4_write(mdp4_kms, REG_MDP4_PIPE_SRCP3_BASE(pipe),
-                       msm_framebuffer_iova(fb, kms->id, 3));
+                       msm_framebuffer_iova(fb, kms->aspace, 3));
 
        plane->fb = fb;
 }
 
        struct mdp5_kms *mdp5_kms = get_kms(&mdp5_crtc->base);
        struct msm_kms *kms = &mdp5_kms->base.base;
 
-       msm_gem_put_iova(val, kms->id);
+       msm_gem_put_iova(val, kms->aspace);
        drm_gem_object_unreference_unlocked(val);
 }
 
        if (!cursor_bo)
                return -ENOENT;
 
-       ret = msm_gem_get_iova(cursor_bo, kms->id, &cursor_addr);
+       ret = msm_gem_get_iova(cursor_bo, kms->aspace, &cursor_addr);
        if (ret)
                return -EINVAL;
 
 
                return 0;
 
        DBG("%s: prepare: FB[%u]", plane->name, fb->base.id);
-       return msm_framebuffer_prepare(fb, kms->id);
+       return msm_framebuffer_prepare(fb, kms->aspace);
 }
 
 static void mdp5_plane_cleanup_fb(struct drm_plane *plane,
                return;
 
        DBG("%s: cleanup: FB[%u]", plane->name, fb->base.id);
-       msm_framebuffer_cleanup(fb, kms->id);
+       msm_framebuffer_cleanup(fb, kms->aspace);
 }
 
 #define FRAC_16_16(mult, div)    (((mult) << 16) / (div))
                        MDP5_PIPE_SRC_STRIDE_B_P3(fb->pitches[3]));
 
        mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC0_ADDR(pipe),
-                       msm_framebuffer_iova(fb, kms->id, 0));
+                       msm_framebuffer_iova(fb, kms->aspace, 0));
        mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC1_ADDR(pipe),
-                       msm_framebuffer_iova(fb, kms->id, 1));
+                       msm_framebuffer_iova(fb, kms->aspace, 1));
        mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC2_ADDR(pipe),
-                       msm_framebuffer_iova(fb, kms->id, 2));
+                       msm_framebuffer_iova(fb, kms->aspace, 2));
        mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC3_ADDR(pipe),
-                       msm_framebuffer_iova(fb, kms->id, 3));
+                       msm_framebuffer_iova(fb, kms->aspace, 3));
 }
 
 /* Note: mdp5_plane->pipe_lock must be locked */
 
        .atomic_state_free = msm_atomic_state_free,
 };
 
+#include "msm_gem.h"  /* temporary */
 int msm_register_address_space(struct drm_device *dev,
                struct msm_gem_address_space *aspace)
 {
 
        priv->aspace[priv->num_aspaces] = aspace;
 
-       return priv->num_aspaces++;
+       aspace->id = priv->num_aspaces++;
+
+       return aspace->id;
 }
 
 #ifdef CONFIG_DRM_MSM_REGISTER_LOGGING
        if (!priv->gpu)
                return -EINVAL;
 
-       return msm_gem_get_iova(obj, priv->gpu->id, iova);
+       return msm_gem_get_iova(obj, priv->gpu->aspace, iova);
 }
 
 static int msm_ioctl_gem_info(struct drm_device *dev, void *data,
 
 int msm_gem_mmap(struct file *filp, struct vm_area_struct *vma);
 int msm_gem_fault(struct vm_fault *vmf);
 uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj);
-int msm_gem_get_iova_locked(struct drm_gem_object *obj, int id,
-               uint64_t *iova);
-int msm_gem_get_iova(struct drm_gem_object *obj, int id, uint64_t *iova);
-uint64_t msm_gem_iova(struct drm_gem_object *obj, int id);
+int msm_gem_get_iova_locked(struct drm_gem_object *obj,
+               struct msm_gem_address_space *aspace, uint64_t *iova);
+int msm_gem_get_iova(struct drm_gem_object *obj,
+               struct msm_gem_address_space *aspace, uint64_t *iova);
+uint64_t msm_gem_iova(struct drm_gem_object *obj,
+               struct msm_gem_address_space *aspace);
 struct page **msm_gem_get_pages(struct drm_gem_object *obj);
 void msm_gem_put_pages(struct drm_gem_object *obj);
-void msm_gem_put_iova(struct drm_gem_object *obj, int id);
+void msm_gem_put_iova(struct drm_gem_object *obj,
+               struct msm_gem_address_space *aspace);
 int msm_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
                struct drm_mode_create_dumb *args);
 int msm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
 struct drm_gem_object *msm_gem_import(struct drm_device *dev,
                struct dma_buf *dmabuf, struct sg_table *sgt);
 
-int msm_framebuffer_prepare(struct drm_framebuffer *fb, int id);
-void msm_framebuffer_cleanup(struct drm_framebuffer *fb, int id);
-uint32_t msm_framebuffer_iova(struct drm_framebuffer *fb, int id, int plane);
+int msm_framebuffer_prepare(struct drm_framebuffer *fb,
+               struct msm_gem_address_space *aspace);
+void msm_framebuffer_cleanup(struct drm_framebuffer *fb,
+               struct msm_gem_address_space *aspace);
+uint32_t msm_framebuffer_iova(struct drm_framebuffer *fb,
+               struct msm_gem_address_space *aspace, int plane);
 struct drm_gem_object *msm_framebuffer_bo(struct drm_framebuffer *fb, int plane);
 const struct msm_format *msm_framebuffer_format(struct drm_framebuffer *fb);
 struct drm_framebuffer *msm_framebuffer_init(struct drm_device *dev,
 
  * should be fine, since only the scanout (mdpN) side of things needs
  * this, the gpu doesn't care about fb's.
  */
-int msm_framebuffer_prepare(struct drm_framebuffer *fb, int id)
+int msm_framebuffer_prepare(struct drm_framebuffer *fb,
+               struct msm_gem_address_space *aspace)
 {
        struct msm_framebuffer *msm_fb = to_msm_framebuffer(fb);
        int ret, i, n = fb->format->num_planes;
        uint64_t iova;
 
        for (i = 0; i < n; i++) {
-               ret = msm_gem_get_iova(msm_fb->planes[i], id, &iova);
+               ret = msm_gem_get_iova(msm_fb->planes[i], aspace, &iova);
                DBG("FB[%u]: iova[%d]: %08llx (%d)", fb->base.id, i, iova, ret);
                if (ret)
                        return ret;
        return 0;
 }
 
-void msm_framebuffer_cleanup(struct drm_framebuffer *fb, int id)
+void msm_framebuffer_cleanup(struct drm_framebuffer *fb,
+               struct msm_gem_address_space *aspace)
 {
        struct msm_framebuffer *msm_fb = to_msm_framebuffer(fb);
        int i, n = fb->format->num_planes;
 
        for (i = 0; i < n; i++)
-               msm_gem_put_iova(msm_fb->planes[i], id);
+               msm_gem_put_iova(msm_fb->planes[i], aspace);
 }
 
-uint32_t msm_framebuffer_iova(struct drm_framebuffer *fb, int id, int plane)
+uint32_t msm_framebuffer_iova(struct drm_framebuffer *fb,
+               struct msm_gem_address_space *aspace, int plane)
 {
        struct msm_framebuffer *msm_fb = to_msm_framebuffer(fb);
        if (!msm_fb->planes[plane])
                return 0;
-       return msm_gem_iova(msm_fb->planes[plane], id) + fb->offsets[plane];
+       return msm_gem_iova(msm_fb->planes[plane], aspace) + fb->offsets[plane];
 }
 
 struct drm_gem_object *msm_framebuffer_bo(struct drm_framebuffer *fb, int plane)
 
         * in panic (ie. lock-safe, etc) we could avoid pinning the
         * buffer now:
         */
-       ret = msm_gem_get_iova_locked(fbdev->bo, priv->kms->id, &paddr);
+       ret = msm_gem_get_iova_locked(fbdev->bo, priv->kms->aspace, &paddr);
        if (ret) {
                dev_err(dev->dev, "failed to get buffer obj iova: %d\n", ret);
                goto fail_unlock;
 
  * That means when I do eventually need to add support for unpinning
  * the refcnt counter needs to be atomic_t.
  */
-int msm_gem_get_iova_locked(struct drm_gem_object *obj, int id,
-               uint64_t *iova)
+int msm_gem_get_iova_locked(struct drm_gem_object *obj,
+               struct msm_gem_address_space *aspace, uint64_t *iova)
 {
        struct msm_gem_object *msm_obj = to_msm_bo(obj);
+       int id = aspace ? aspace->id : 0;
        int ret = 0;
 
        WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex));
 }
 
 /* get iova, taking a reference.  Should have a matching put */
-int msm_gem_get_iova(struct drm_gem_object *obj, int id, uint64_t *iova)
+int msm_gem_get_iova(struct drm_gem_object *obj,
+               struct msm_gem_address_space *aspace, uint64_t *iova)
 {
        struct msm_gem_object *msm_obj = to_msm_bo(obj);
+       int id = aspace ? aspace->id : 0;
        int ret;
 
        /* this is safe right now because we don't unmap until the
        }
 
        mutex_lock(&obj->dev->struct_mutex);
-       ret = msm_gem_get_iova_locked(obj, id, iova);
+       ret = msm_gem_get_iova_locked(obj, aspace, iova);
        mutex_unlock(&obj->dev->struct_mutex);
        return ret;
 }
 /* get iova without taking a reference, used in places where you have
  * already done a 'msm_gem_get_iova()'.
  */
-uint64_t msm_gem_iova(struct drm_gem_object *obj, int id)
+uint64_t msm_gem_iova(struct drm_gem_object *obj,
+               struct msm_gem_address_space *aspace)
 {
        struct msm_gem_object *msm_obj = to_msm_bo(obj);
+       int id = aspace ? aspace->id : 0;
        WARN_ON(!msm_obj->domain[id].iova);
        return msm_obj->domain[id].iova;
 }
 
-void msm_gem_put_iova(struct drm_gem_object *obj, int id)
+void msm_gem_put_iova(struct drm_gem_object *obj,
+               struct msm_gem_address_space *aspace)
 {
        // XXX TODO ..
        // NOTE: probably don't need a _locked() version.. we wouldn't
 
        struct drm_mm mm;
        struct msm_mmu *mmu;
        struct kref kref;
+       int id;    /* temporary */
 };
 
 struct msm_gem_vma {
 
        struct msm_gem_object *msm_obj = submit->bos[i].obj;
 
        if (submit->bos[i].flags & BO_PINNED)
-               msm_gem_put_iova(&msm_obj->base, submit->gpu->id);
+               msm_gem_put_iova(&msm_obj->base, submit->gpu->aspace);
 
        if (submit->bos[i].flags & BO_LOCKED)
                ww_mutex_unlock(&msm_obj->resv->lock);
 
                /* if locking succeeded, pin bo: */
                ret = msm_gem_get_iova_locked(&msm_obj->base,
-                               submit->gpu->id, &iova);
+                               submit->gpu->aspace, &iova);
 
                if (ret)
                        break;
 
                struct msm_gem_object *msm_obj = submit->bos[i].obj;
                /* move to inactive: */
                msm_gem_move_to_inactive(&msm_obj->base);
-               msm_gem_put_iova(&msm_obj->base, gpu->id);
+               msm_gem_put_iova(&msm_obj->base, gpu->aspace);
                drm_gem_object_unreference(&msm_obj->base);
        }
 
                /* submit takes a reference to the bo and iova until retired: */
                drm_gem_object_reference(&msm_obj->base);
                msm_gem_get_iova_locked(&msm_obj->base,
-                               submit->gpu->id, &iova);
+                               submit->gpu->aspace, &iova);
 
                if (submit->bos[i].flags & MSM_SUBMIT_BO_WRITE)
                        msm_gem_move_to_active(&msm_obj->base, gpu, true, submit->fence);
 
        if (gpu->rb) {
                if (gpu->rb_iova)
-                       msm_gem_put_iova(gpu->rb->bo, gpu->id);
+                       msm_gem_put_iova(gpu->rb->bo, gpu->aspace);
                msm_ringbuffer_destroy(gpu->rb);
        }