{
        struct hl_device *hdev = ctx->hdev;
        struct asic_fixed_properties *prop = &hdev->asic_prop;
-       struct hl_vm_va_block *va_block, *tmp;
-       dma_addr_t bus_addr;
-       u64 virt_addr;
        u32 page_size = prop->pmmu.page_size;
-       s32 offset;
        int rc;
 
        if (!hdev->supports_cb_mapping) {
                return -EINVAL;
        }
 
-       INIT_LIST_HEAD(&cb->va_block_list);
-
-       for (bus_addr = cb->bus_address;
-                       bus_addr < cb->bus_address + cb->size;
-                       bus_addr += page_size) {
-
-               virt_addr = (u64) gen_pool_alloc(ctx->cb_va_pool, page_size);
-               if (!virt_addr) {
-                       dev_err(hdev->dev,
-                               "Failed to allocate device virtual address for CB\n");
-                       rc = -ENOMEM;
-                       goto err_va_pool_free;
-               }
+       if (cb->is_mmu_mapped)
+               return 0;
 
-               va_block = kzalloc(sizeof(*va_block), GFP_KERNEL);
-               if (!va_block) {
-                       rc = -ENOMEM;
-                       gen_pool_free(ctx->cb_va_pool, virt_addr, page_size);
-                       goto err_va_pool_free;
-               }
+       cb->roundup_size = roundup(cb->size, page_size);
 
-               va_block->start = virt_addr;
-               va_block->end = virt_addr + page_size - 1;
-               va_block->size = page_size;
-               list_add_tail(&va_block->node, &cb->va_block_list);
+       cb->virtual_addr = (u64) gen_pool_alloc(ctx->cb_va_pool, cb->roundup_size);
+       if (!cb->virtual_addr) {
+               dev_err(hdev->dev, "Failed to allocate device virtual address for CB\n");
+               return -ENOMEM;
        }
 
        mutex_lock(&ctx->mmu_lock);
-
-       bus_addr = cb->bus_address;
-       offset = 0;
-       list_for_each_entry(va_block, &cb->va_block_list, node) {
-               rc = hl_mmu_map_page(ctx, va_block->start, bus_addr,
-                               va_block->size, list_is_last(&va_block->node,
-                                                       &cb->va_block_list));
-               if (rc) {
-                       dev_err(hdev->dev, "Failed to map VA %#llx to CB\n",
-                               va_block->start);
-                       goto err_va_umap;
-               }
-
-               bus_addr += va_block->size;
-               offset += va_block->size;
+       rc = hl_mmu_map_contiguous(ctx, cb->virtual_addr, cb->bus_address, cb->roundup_size);
+       if (rc) {
+               dev_err(hdev->dev, "Failed to map VA %#llx to CB\n", cb->virtual_addr);
+               goto err_va_umap;
        }
-
        rc = hl_mmu_invalidate_cache(hdev, false, MMU_OP_USERPTR | MMU_OP_SKIP_LOW_CACHE_INV);
-
        mutex_unlock(&ctx->mmu_lock);
 
        cb->is_mmu_mapped = true;
-
        return rc;
 
 err_va_umap:
-       list_for_each_entry(va_block, &cb->va_block_list, node) {
-               if (offset <= 0)
-                       break;
-               hl_mmu_unmap_page(ctx, va_block->start, va_block->size,
-                               offset <= va_block->size);
-               offset -= va_block->size;
-       }
-
-       rc = hl_mmu_invalidate_cache(hdev, true, MMU_OP_USERPTR);
-
        mutex_unlock(&ctx->mmu_lock);
-
-err_va_pool_free:
-       list_for_each_entry_safe(va_block, tmp, &cb->va_block_list, node) {
-               gen_pool_free(ctx->cb_va_pool, va_block->start, va_block->size);
-               list_del(&va_block->node);
-               kfree(va_block);
-       }
-
+       gen_pool_free(ctx->cb_va_pool, cb->virtual_addr, cb->roundup_size);
        return rc;
 }
 
 static void cb_unmap_mem(struct hl_ctx *ctx, struct hl_cb *cb)
 {
        struct hl_device *hdev = ctx->hdev;
-       struct hl_vm_va_block *va_block, *tmp;
 
        mutex_lock(&ctx->mmu_lock);
-
-       list_for_each_entry(va_block, &cb->va_block_list, node)
-               if (hl_mmu_unmap_page(ctx, va_block->start, va_block->size,
-                               list_is_last(&va_block->node,
-                                               &cb->va_block_list)))
-                       dev_warn_ratelimited(hdev->dev,
-                                       "Failed to unmap CB's va 0x%llx\n",
-                                       va_block->start);
-
+       hl_mmu_unmap_contiguous(ctx, cb->virtual_addr, cb->roundup_size);
        hl_mmu_invalidate_cache(hdev, true, MMU_OP_USERPTR);
-
        mutex_unlock(&ctx->mmu_lock);
 
-       list_for_each_entry_safe(va_block, tmp, &cb->va_block_list, node) {
-               gen_pool_free(ctx->cb_va_pool, va_block->start, va_block->size);
-               list_del(&va_block->node);
-               kfree(va_block);
-       }
+       gen_pool_free(ctx->cb_va_pool, cb->virtual_addr, cb->roundup_size);
 }
 
 static void cb_fini(struct hl_device *hdev, struct hl_cb *cb)
 static int hl_cb_info(struct hl_mem_mgr *mmg,
                        u64 handle, u32 flags, u32 *usage_cnt, u64 *device_va)
 {
-       struct hl_vm_va_block *va_block;
        struct hl_cb *cb;
        int rc = 0;
 
        }
 
        if (flags & HL_CB_FLAGS_GET_DEVICE_VA) {
-               va_block = list_first_entry(&cb->va_block_list, struct hl_vm_va_block, node);
-               if (va_block) {
-                       *device_va = va_block->start;
+               if (cb->is_mmu_mapped) {
+                       *device_va = cb->virtual_addr;
                } else {
                        dev_err(mmg->dev, "CB is not mapped to the device's MMU\n");
                        rc = -EINVAL;