int pkvm_cpu_set_vector(enum arm64_hyp_spectre_vector slot);
 int pkvm_create_mappings(void *from, void *to, enum kvm_pgtable_prot prot);
 int pkvm_create_mappings_locked(void *from, void *to, enum kvm_pgtable_prot prot);
-unsigned long __pkvm_create_private_mapping(phys_addr_t phys, size_t size,
-                                           enum kvm_pgtable_prot prot);
+int __pkvm_create_private_mapping(phys_addr_t phys, size_t size,
+                                 enum kvm_pgtable_prot prot,
+                                 unsigned long *haddr);
+int pkvm_alloc_private_va_range(size_t size, unsigned long *haddr);
 
 static inline void hyp_vmemmap_range(phys_addr_t phys, unsigned long size,
                                     unsigned long *start, unsigned long *end)
 
        DECLARE_REG(size_t, size, host_ctxt, 2);
        DECLARE_REG(enum kvm_pgtable_prot, prot, host_ctxt, 3);
 
-       cpu_reg(host_ctxt, 1) = __pkvm_create_private_mapping(phys, size, prot);
+       /*
+        * __pkvm_create_private_mapping() populates a pointer with the
+        * hypervisor start address of the allocation.
+        *
+        * However, handle___pkvm_create_private_mapping() hypercall crosses the
+        * EL1/EL2 boundary so the pointer would not be valid in this context.
+        *
+        * Instead pass the allocation address as the return value (or return
+        * ERR_PTR() on failure).
+        */
+       unsigned long haddr;
+       int err = __pkvm_create_private_mapping(phys, size, prot, &haddr);
+
+       if (err)
+               haddr = (unsigned long)ERR_PTR(err);
+
+       cpu_reg(host_ctxt, 1) = haddr;
 }
 
 static void handle___pkvm_prot_finalize(struct kvm_cpu_context *host_ctxt)
 
        return err;
 }
 
-unsigned long __pkvm_create_private_mapping(phys_addr_t phys, size_t size,
-                                           enum kvm_pgtable_prot prot)
+/**
+ * pkvm_alloc_private_va_range - Allocates a private VA range.
+ * @size:      The size of the VA range to reserve.
+ * @haddr:     The hypervisor virtual start address of the allocation.
+ *
+ * The private virtual address (VA) range is allocated above __io_map_base
+ * and aligned based on the order of @size.
+ *
+ * Return: 0 on success or negative error code on failure.
+ */
+int pkvm_alloc_private_va_range(size_t size, unsigned long *haddr)
 {
-       unsigned long addr;
-       int err;
+       unsigned long base, addr;
+       int ret = 0;
 
        hyp_spin_lock(&pkvm_pgd_lock);
 
-       size = PAGE_ALIGN(size + offset_in_page(phys));
-       addr = __io_map_base;
-       __io_map_base += size;
+       /* Align the allocation based on the order of its size */
+       addr = ALIGN(__io_map_base, PAGE_SIZE << get_order(size));
 
-       /* Are we overflowing on the vmemmap ? */
-       if (__io_map_base > __hyp_vmemmap) {
-               __io_map_base -= size;
-               addr = (unsigned long)ERR_PTR(-ENOMEM);
-               goto out;
-       }
+       /* The allocated size is always a multiple of PAGE_SIZE */
+       base = addr + PAGE_ALIGN(size);
 
-       err = kvm_pgtable_hyp_map(&pkvm_pgtable, addr, size, phys, prot);
-       if (err) {
-               addr = (unsigned long)ERR_PTR(err);
-               goto out;
+       /* Are we overflowing on the vmemmap ? */
+       if (!addr || base > __hyp_vmemmap)
+               ret = -ENOMEM;
+       else {
+               __io_map_base = base;
+               *haddr = addr;
        }
 
-       addr = addr + offset_in_page(phys);
-out:
        hyp_spin_unlock(&pkvm_pgd_lock);
 
-       return addr;
+       return ret;
+}
+
+int __pkvm_create_private_mapping(phys_addr_t phys, size_t size,
+                                 enum kvm_pgtable_prot prot,
+                                 unsigned long *haddr)
+{
+       unsigned long addr;
+       int err;
+
+       size = PAGE_ALIGN(size + offset_in_page(phys));
+       err = pkvm_alloc_private_va_range(size, &addr);
+       if (err)
+               return err;
+
+       err = __pkvm_create_mappings(addr, size, phys, prot);
+       if (err)
+               return err;
+
+       *haddr = addr + offset_in_page(phys);
+       return err;
 }
 
 int pkvm_create_mappings_locked(void *from, void *to, enum kvm_pgtable_prot prot)
 int hyp_map_vectors(void)
 {
        phys_addr_t phys;
-       void *bp_base;
+       unsigned long bp_base;
+       int ret;
 
        if (!kvm_system_needs_idmapped_vectors()) {
                __hyp_bp_vect_base = __bp_harden_hyp_vecs;
        }
 
        phys = __hyp_pa(__bp_harden_hyp_vecs);
-       bp_base = (void *)__pkvm_create_private_mapping(phys,
-                                                       __BP_HARDEN_HYP_VECS_SZ,
-                                                       PAGE_HYP_EXEC);
-       if (IS_ERR_OR_NULL(bp_base))
-               return PTR_ERR(bp_base);
+       ret = __pkvm_create_private_mapping(phys, __BP_HARDEN_HYP_VECS_SZ,
+                                           PAGE_HYP_EXEC, &bp_base);
+       if (ret)
+               return ret;
 
-       __hyp_bp_vect_base = bp_base;
+       __hyp_bp_vect_base = (void *)bp_base;
 
        return 0;
 }