i915_gem_object_put(vm->scratch[1]);
 err_scratch0:
        i915_gem_object_put(vm->scratch[0]);
+       vm->scratch[0] = NULL;
        return ret;
 }
 
        gen6_ppgtt_free_pd(ppgtt);
        free_scratch(vm);
 
-       mutex_destroy(&ppgtt->flush);
+       if (ppgtt->base.pd)
+               free_pd(&ppgtt->base.vm, ppgtt->base.pd);
 
-       free_pd(&ppgtt->base.vm, ppgtt->base.pd);
+       mutex_destroy(&ppgtt->flush);
 }
 
 static void pd_vma_bind(struct i915_address_space *vm,
 
        err = gen6_ppgtt_init_scratch(ppgtt);
        if (err)
-               goto err_free;
+               goto err_put;
 
        ppgtt->base.pd = gen6_alloc_top_pd(ppgtt);
        if (IS_ERR(ppgtt->base.pd)) {
                err = PTR_ERR(ppgtt->base.pd);
-               goto err_scratch;
+               goto err_put;
        }
 
        return &ppgtt->base;
 
-err_scratch:
-       free_scratch(&ppgtt->base.vm);
-err_free:
-       kfree(ppgtt);
+err_put:
+       i915_vm_put(&ppgtt->base.vm);
        return ERR_PTR(err);
 }
 
        if (intel_vgpu_active(vm->i915))
                gen8_ppgtt_notify_vgt(ppgtt, false);
 
-       __gen8_ppgtt_cleanup(vm, ppgtt->pd, gen8_pd_top_count(vm), vm->top);
+       if (ppgtt->pd)
+               __gen8_ppgtt_cleanup(vm, ppgtt->pd,
+                                    gen8_pd_top_count(vm), vm->top);
+
        free_scratch(vm);
 }
 
                struct drm_i915_gem_object *obj;
 
                obj = vm->alloc_pt_dma(vm, I915_GTT_PAGE_SIZE_4K);
-               if (IS_ERR(obj))
+               if (IS_ERR(obj)) {
+                       ret = PTR_ERR(obj);
                        goto free_scratch;
+               }
 
                ret = map_pt_dma(vm, obj);
                if (ret) {
 free_scratch:
        while (i--)
                i915_gem_object_put(vm->scratch[i]);
-       return -ENOMEM;
+       vm->scratch[0] = NULL;
+       return ret;
 }
 
 static int gen8_preallocate_top_level_pdp(struct i915_ppgtt *ppgtt)
 struct i915_ppgtt *gen8_ppgtt_create(struct intel_gt *gt,
                                     unsigned long lmem_pt_obj_flags)
 {
+       struct i915_page_directory *pd;
        struct i915_ppgtt *ppgtt;
        int err;
 
                ppgtt->vm.alloc_scratch_dma = alloc_pt_dma;
        }
 
-       err = gen8_init_scratch(&ppgtt->vm);
-       if (err)
-               goto err_free;
-
-       ppgtt->pd = gen8_alloc_top_pd(&ppgtt->vm);
-       if (IS_ERR(ppgtt->pd)) {
-               err = PTR_ERR(ppgtt->pd);
-               goto err_free_scratch;
-       }
-
-       if (!i915_vm_is_4lvl(&ppgtt->vm)) {
-               err = gen8_preallocate_top_level_pdp(ppgtt);
-               if (err)
-                       goto err_free_pd;
-       }
+       ppgtt->vm.pte_encode = gen8_pte_encode;
 
        ppgtt->vm.bind_async_flags = I915_VMA_LOCAL_BIND;
        ppgtt->vm.insert_entries = gen8_ppgtt_insert;
        ppgtt->vm.allocate_va_range = gen8_ppgtt_alloc;
        ppgtt->vm.clear_range = gen8_ppgtt_clear;
        ppgtt->vm.foreach = gen8_ppgtt_foreach;
+       ppgtt->vm.cleanup = gen8_ppgtt_cleanup;
 
-       ppgtt->vm.pte_encode = gen8_pte_encode;
+       err = gen8_init_scratch(&ppgtt->vm);
+       if (err)
+               goto err_put;
+
+       pd = gen8_alloc_top_pd(&ppgtt->vm);
+       if (IS_ERR(pd)) {
+               err = PTR_ERR(pd);
+               goto err_put;
+       }
+       ppgtt->pd = pd;
+
+       if (!i915_vm_is_4lvl(&ppgtt->vm)) {
+               err = gen8_preallocate_top_level_pdp(ppgtt);
+               if (err)
+                       goto err_put;
+       }
 
        if (intel_vgpu_active(gt->i915))
                gen8_ppgtt_notify_vgt(ppgtt, true);
 
-       ppgtt->vm.cleanup = gen8_ppgtt_cleanup;
-
        return ppgtt;
 
-err_free_pd:
-       __gen8_ppgtt_cleanup(&ppgtt->vm, ppgtt->pd,
-                            gen8_pd_top_count(&ppgtt->vm), ppgtt->vm.top);
-err_free_scratch:
-       free_scratch(&ppgtt->vm);
-err_free:
-       kfree(ppgtt);
+err_put:
+       i915_vm_put(&ppgtt->vm);
        return ERR_PTR(err);
 }