gen_pool_free(hdev->internal_cb_pool,
                                (uintptr_t)cb->kernel_address, cb->size);
        else
-               hdev->asic_funcs->asic_dma_free_coherent(hdev, cb->size,
-                               cb->kernel_address, cb->bus_address);
+               hl_asic_dma_free_coherent(hdev, cb->size, cb->kernel_address, cb->bus_address);
 
        kfree(cb);
 }
                cb->is_internal = true;
                cb->bus_address =  hdev->internal_cb_va_base + cb_offset;
        } else if (ctx_id == HL_KERNEL_ASID_ID) {
-               p = hdev->asic_funcs->asic_dma_alloc_coherent(hdev, cb_size,
-                                               &cb->bus_address, GFP_ATOMIC);
+               p = hl_asic_dma_alloc_coherent(hdev, cb_size, &cb->bus_address, GFP_ATOMIC);
                if (!p)
-                       p = hdev->asic_funcs->asic_dma_alloc_coherent(hdev,
-                                       cb_size, &cb->bus_address, GFP_KERNEL);
+                       p = hl_asic_dma_alloc_coherent(hdev, cb_size, &cb->bus_address, GFP_KERNEL);
        } else {
-               p = hdev->asic_funcs->asic_dma_alloc_coherent(hdev, cb_size,
-                                               &cb->bus_address,
+               p = hl_asic_dma_alloc_coherent(hdev, cb_size, &cb->bus_address,
                                                GFP_USER | __GFP_ZERO);
        }
 
 
 
 #define MEM_SCRUB_DEFAULT_VAL 0x1122334455667788
 
+enum dma_alloc_type {
+       DMA_ALLOC_COHERENT,
+       DMA_ALLOC_CPU_ACCESSIBLE,
+       DMA_ALLOC_POOL,
+};
+
 /*
  * hl_set_dram_bar- sets the bar to allow later access to address
  *
        return 0;
 }
 
+static void *hl_dma_alloc_common(struct hl_device *hdev, size_t size, dma_addr_t *dma_handle,
+               gfp_t flag, enum dma_alloc_type alloc_type)
+{
+       void *ptr;
+
+       switch (alloc_type) {
+       case DMA_ALLOC_COHERENT:
+               ptr = hdev->asic_funcs->asic_dma_alloc_coherent(hdev, size, dma_handle, flag);
+               break;
+       case DMA_ALLOC_CPU_ACCESSIBLE:
+               ptr = hdev->asic_funcs->cpu_accessible_dma_pool_alloc(hdev, size, dma_handle);
+               break;
+       case DMA_ALLOC_POOL:
+               ptr = hdev->asic_funcs->asic_dma_pool_zalloc(hdev, size, flag, dma_handle);
+               break;
+       }
+
+       return ptr;
+}
+
+static void hl_asic_dma_free_common(struct hl_device *hdev, size_t size, void *cpu_addr,
+                                       dma_addr_t dma_handle, enum dma_alloc_type alloc_type)
+{
+       switch (alloc_type) {
+       case DMA_ALLOC_COHERENT:
+               hdev->asic_funcs->asic_dma_free_coherent(hdev, size, cpu_addr, dma_handle);
+               break;
+       case DMA_ALLOC_CPU_ACCESSIBLE:
+               hdev->asic_funcs->cpu_accessible_dma_pool_free(hdev, size, cpu_addr);
+               break;
+       case DMA_ALLOC_POOL:
+               hdev->asic_funcs->asic_dma_pool_free(hdev, cpu_addr, dma_handle);
+               break;
+       }
+}
+
+void *hl_asic_dma_alloc_coherent(struct hl_device *hdev, size_t size, dma_addr_t *dma_handle,
+                                       gfp_t flag)
+{
+       return hl_dma_alloc_common(hdev, size, dma_handle, flag, DMA_ALLOC_COHERENT);
+}
+
+void hl_asic_dma_free_coherent(struct hl_device *hdev, size_t size, void *cpu_addr,
+                                       dma_addr_t dma_handle)
+{
+       hl_asic_dma_free_common(hdev, size, cpu_addr, dma_handle, DMA_ALLOC_COHERENT);
+}
+
+void *hl_cpu_accessible_dma_pool_alloc(struct hl_device *hdev, size_t size, dma_addr_t *dma_handle)
+{
+       return hl_dma_alloc_common(hdev, size, dma_handle, 0, DMA_ALLOC_CPU_ACCESSIBLE);
+}
+
+void hl_cpu_accessible_dma_pool_free(struct hl_device *hdev, size_t size, void *vaddr)
+{
+       hl_asic_dma_free_common(hdev, size, vaddr, 0, DMA_ALLOC_CPU_ACCESSIBLE);
+}
+
+void *hl_asic_dma_pool_zalloc(struct hl_device *hdev, size_t size, gfp_t mem_flags,
+                                       dma_addr_t *dma_handle)
+{
+       return hl_dma_alloc_common(hdev, size, dma_handle, mem_flags, DMA_ALLOC_POOL);
+}
+
+void hl_asic_dma_pool_free(struct hl_device *hdev, void *vaddr, dma_addr_t dma_addr)
+{
+       hl_asic_dma_free_common(hdev, 0, vaddr, dma_addr, DMA_ALLOC_POOL);
+}
+
 int hl_dma_map_sgtable(struct hl_device *hdev, struct sg_table *sgt, enum dma_data_direction dir)
 {
        struct asic_fixed_properties *prop = &hdev->asic_prop;
 
        u32 tmp, expected_ack_val, pi;
        int rc;
 
-       pkt = hdev->asic_funcs->cpu_accessible_dma_pool_alloc(hdev, len,
-                                                               &pkt_dma_addr);
+       pkt = hl_cpu_accessible_dma_pool_alloc(hdev, len, &pkt_dma_addr);
        if (!pkt) {
                dev_err(hdev->dev,
                        "Failed to allocate DMA memory for packet to CPU\n");
 out:
        mutex_unlock(&hdev->send_cpu_message_lock);
 
-       hdev->asic_funcs->cpu_accessible_dma_pool_free(hdev, len, pkt);
+       hl_cpu_accessible_dma_pool_free(hdev, len, pkt);
 
        return rc;
 }
        u64 result;
        int rc;
 
-       cpucp_info_cpu_addr =
-                       hdev->asic_funcs->cpu_accessible_dma_pool_alloc(hdev,
-                                       sizeof(struct cpucp_info),
-                                       &cpucp_info_dma_addr);
+       cpucp_info_cpu_addr = hl_cpu_accessible_dma_pool_alloc(hdev, sizeof(struct cpucp_info),
+                                                               &cpucp_info_dma_addr);
        if (!cpucp_info_cpu_addr) {
                dev_err(hdev->dev,
                        "Failed to allocate DMA memory for CPU-CP info packet\n");
                prop->fw_app_cpu_boot_dev_sts1 = RREG32(sts_boot_dev_sts1_reg);
 
 out:
-       hdev->asic_funcs->cpu_accessible_dma_pool_free(hdev,
-                       sizeof(struct cpucp_info), cpucp_info_cpu_addr);
+       hl_cpu_accessible_dma_pool_free(hdev, sizeof(struct cpucp_info), cpucp_info_cpu_addr);
 
        return rc;
 }
        u64 result;
        int rc;
 
-       eeprom_info_cpu_addr =
-                       hdev->asic_funcs->cpu_accessible_dma_pool_alloc(hdev,
-                                       max_size, &eeprom_info_dma_addr);
+       eeprom_info_cpu_addr = hl_cpu_accessible_dma_pool_alloc(hdev, max_size,
+                                                                       &eeprom_info_dma_addr);
        if (!eeprom_info_cpu_addr) {
                dev_err(hdev->dev,
                        "Failed to allocate DMA memory for CPU-CP EEPROM packet\n");
        memcpy(data, eeprom_info_cpu_addr, min((size_t)result, max_size));
 
 out:
-       hdev->asic_funcs->cpu_accessible_dma_pool_free(hdev, max_size,
-                       eeprom_info_cpu_addr);
+       hl_cpu_accessible_dma_pool_free(hdev, max_size, eeprom_info_cpu_addr);
 
        return rc;
 }
        int i, rc;
 
        data_size = sizeof(struct cpucp_monitor_dump);
-       mon_dump_cpu_addr = hdev->asic_funcs->cpu_accessible_dma_pool_alloc(hdev, data_size,
-                                                                               &mon_dump_dma_addr);
+       mon_dump_cpu_addr = hl_cpu_accessible_dma_pool_alloc(hdev, data_size, &mon_dump_dma_addr);
        if (!mon_dump_cpu_addr) {
                dev_err(hdev->dev,
                        "Failed to allocate DMA memory for CPU-CP monitor-dump packet\n");
        }
 
 out:
-       hdev->asic_funcs->cpu_accessible_dma_pool_free(hdev, data_size, mon_dump_cpu_addr);
+       hl_cpu_accessible_dma_pool_free(hdev, data_size, mon_dump_cpu_addr);
 
        return rc;
 }
        u64 result;
        int rc;
 
-       cpucp_repl_rows_info_cpu_addr =
-                       hdev->asic_funcs->cpu_accessible_dma_pool_alloc(hdev,
-                                       sizeof(struct cpucp_hbm_row_info),
-                                       &cpucp_repl_rows_info_dma_addr);
+       cpucp_repl_rows_info_cpu_addr = hl_cpu_accessible_dma_pool_alloc(hdev,
+                                                       sizeof(struct cpucp_hbm_row_info),
+                                                       &cpucp_repl_rows_info_dma_addr);
        if (!cpucp_repl_rows_info_cpu_addr) {
                dev_err(hdev->dev,
                        "Failed to allocate DMA memory for CPU-CP replaced rows info packet\n");
        memcpy(info, cpucp_repl_rows_info_cpu_addr, sizeof(*info));
 
 out:
-       hdev->asic_funcs->cpu_accessible_dma_pool_free(hdev,
-                                       sizeof(struct cpucp_hbm_row_info),
-                                       cpucp_repl_rows_info_cpu_addr);
+       hl_cpu_accessible_dma_pool_free(hdev, sizeof(struct cpucp_hbm_row_info),
+                                               cpucp_repl_rows_info_cpu_addr);
 
        return rc;
 }
 
 }
 
 uint64_t hl_set_dram_bar_default(struct hl_device *hdev, u64 addr);
+void *hl_asic_dma_alloc_coherent(struct hl_device *hdev, size_t size, dma_addr_t *dma_handle,
+                                       gfp_t flag);
+void hl_asic_dma_free_coherent(struct hl_device *hdev, size_t size, void *cpu_addr,
+                                       dma_addr_t dma_handle);
+void *hl_cpu_accessible_dma_pool_alloc(struct hl_device *hdev, size_t size, dma_addr_t *dma_handle);
+void hl_cpu_accessible_dma_pool_free(struct hl_device *hdev, size_t size, void *vaddr);
+void *hl_asic_dma_pool_zalloc(struct hl_device *hdev, size_t size, gfp_t mem_flags,
+                                       dma_addr_t *dma_handle);
+void hl_asic_dma_pool_free(struct hl_device *hdev, void *vaddr, dma_addr_t dma_addr);
 int hl_dma_map_sgtable(struct hl_device *hdev, struct sg_table *sgt, enum dma_data_direction dir);
 void hl_dma_unmap_sgtable(struct hl_device *hdev, struct sg_table *sgt,
                                enum dma_data_direction dir);
 
        int rc;
 
        if (is_cpu_queue)
-               p = hdev->asic_funcs->cpu_accessible_dma_pool_alloc(hdev,
-                                                       HL_QUEUE_SIZE_IN_BYTES,
-                                                       &q->bus_address);
+               p = hl_cpu_accessible_dma_pool_alloc(hdev, HL_QUEUE_SIZE_IN_BYTES, &q->bus_address);
        else
-               p = hdev->asic_funcs->asic_dma_alloc_coherent(hdev,
-                                               HL_QUEUE_SIZE_IN_BYTES,
-                                               &q->bus_address,
+               p = hl_asic_dma_alloc_coherent(hdev, HL_QUEUE_SIZE_IN_BYTES, &q->bus_address,
                                                GFP_KERNEL | __GFP_ZERO);
        if (!p)
                return -ENOMEM;
 
 free_queue:
        if (is_cpu_queue)
-               hdev->asic_funcs->cpu_accessible_dma_pool_free(hdev,
-                                       HL_QUEUE_SIZE_IN_BYTES,
-                                       q->kernel_address);
+               hl_cpu_accessible_dma_pool_free(hdev, HL_QUEUE_SIZE_IN_BYTES, q->kernel_address);
        else
-               hdev->asic_funcs->asic_dma_free_coherent(hdev,
-                                       HL_QUEUE_SIZE_IN_BYTES,
-                                       q->kernel_address,
-                                       q->bus_address);
+               hl_asic_dma_free_coherent(hdev, HL_QUEUE_SIZE_IN_BYTES, q->kernel_address,
+                                               q->bus_address);
 
        return rc;
 }
 {
        void *p;
 
-       p = hdev->asic_funcs->asic_dma_alloc_coherent(hdev,
-                                               HL_QUEUE_SIZE_IN_BYTES,
-                                               &q->bus_address,
-                                               GFP_KERNEL | __GFP_ZERO);
+       p = hl_asic_dma_alloc_coherent(hdev, HL_QUEUE_SIZE_IN_BYTES, &q->bus_address,
+                                       GFP_KERNEL | __GFP_ZERO);
        if (!p)
                return -ENOMEM;
 
        kfree(q->shadow_queue);
 
        if (q->queue_type == QUEUE_TYPE_CPU)
-               hdev->asic_funcs->cpu_accessible_dma_pool_free(hdev,
-                                       HL_QUEUE_SIZE_IN_BYTES,
-                                       q->kernel_address);
+               hl_cpu_accessible_dma_pool_free(hdev, HL_QUEUE_SIZE_IN_BYTES, q->kernel_address);
        else
-               hdev->asic_funcs->asic_dma_free_coherent(hdev,
-                                       HL_QUEUE_SIZE_IN_BYTES,
-                                       q->kernel_address,
-                                       q->bus_address);
+               hl_asic_dma_free_coherent(hdev, HL_QUEUE_SIZE_IN_BYTES, q->kernel_address,
+                                               q->bus_address);
 }
 
 int hl_hw_queues_create(struct hl_device *hdev)
 
 {
        void *p;
 
-       p = hdev->asic_funcs->asic_dma_alloc_coherent(hdev, HL_CQ_SIZE_IN_BYTES,
-                               &q->bus_address, GFP_KERNEL | __GFP_ZERO);
+       p = hl_asic_dma_alloc_coherent(hdev, HL_CQ_SIZE_IN_BYTES, &q->bus_address,
+                                       GFP_KERNEL | __GFP_ZERO);
        if (!p)
                return -ENOMEM;
 
  */
 void hl_cq_fini(struct hl_device *hdev, struct hl_cq *q)
 {
-       hdev->asic_funcs->asic_dma_free_coherent(hdev, HL_CQ_SIZE_IN_BYTES,
-                                                q->kernel_address,
-                                                q->bus_address);
+       hl_asic_dma_free_coherent(hdev, HL_CQ_SIZE_IN_BYTES, q->kernel_address, q->bus_address);
 }
 
 void hl_cq_reset(struct hl_device *hdev, struct hl_cq *q)
 {
        void *p;
 
-       p = hdev->asic_funcs->cpu_accessible_dma_pool_alloc(hdev,
-                                                       HL_EQ_SIZE_IN_BYTES,
-                                                       &q->bus_address);
+       p = hl_cpu_accessible_dma_pool_alloc(hdev, HL_EQ_SIZE_IN_BYTES, &q->bus_address);
        if (!p)
                return -ENOMEM;
 
 {
        flush_workqueue(hdev->eq_wq);
 
-       hdev->asic_funcs->cpu_accessible_dma_pool_free(hdev,
-                                       HL_EQ_SIZE_IN_BYTES,
-                                       q->kernel_address);
+       hl_cpu_accessible_dma_pool_free(hdev, HL_EQ_SIZE_IN_BYTES, q->kernel_address);
 }
 
 void hl_eq_reset(struct hl_device *hdev, struct hl_eq *q)
 
        }
 
        fw_size = fw->size;
-       cpu_addr = hdev->asic_funcs->asic_dma_alloc_coherent(hdev, fw_size,
-                       &dma_handle, GFP_KERNEL | __GFP_ZERO);
+       cpu_addr = hl_asic_dma_alloc_coherent(hdev, fw_size, &dma_handle, GFP_KERNEL | __GFP_ZERO);
        if (!cpu_addr) {
                dev_err(hdev->dev,
                        "Failed to allocate %zu of dma memory for TPC kernel\n",
 
        rc = _gaudi_init_tpc_mem(hdev, dma_handle, fw_size);
 
-       hdev->asic_funcs->asic_dma_free_coherent(hdev, fw->size, cpu_addr,
-                       dma_handle);
+       hl_asic_dma_free_coherent(hdev, fw->size, cpu_addr, dma_handle);
 
 out:
        release_firmware(fw);
         */
 
        for (i = 0 ; i < GAUDI_ALLOC_CPU_MEM_RETRY_CNT ; i++) {
-               virt_addr_arr[i] =
-                       hdev->asic_funcs->asic_dma_alloc_coherent(hdev,
-                                               HL_CPU_ACCESSIBLE_MEM_SIZE,
-                                               &dma_addr_arr[i],
-                                               GFP_KERNEL | __GFP_ZERO);
+               virt_addr_arr[i] = hl_asic_dma_alloc_coherent(hdev, HL_CPU_ACCESSIBLE_MEM_SIZE,
+                                                               &dma_addr_arr[i],
+                                                               GFP_KERNEL | __GFP_ZERO);
                if (!virt_addr_arr[i]) {
                        rc = -ENOMEM;
                        goto free_dma_mem_arr;
 
 free_dma_mem_arr:
        for (j = 0 ; j < i ; j++)
-               hdev->asic_funcs->asic_dma_free_coherent(hdev,
-                                               HL_CPU_ACCESSIBLE_MEM_SIZE,
-                                               virt_addr_arr[j],
+               hl_asic_dma_free_coherent(hdev, HL_CPU_ACCESSIBLE_MEM_SIZE, virt_addr_arr[j],
                                                dma_addr_arr[j]);
 
        return rc;
                q = &gaudi->internal_qmans[i];
                if (!q->pq_kernel_addr)
                        continue;
-               hdev->asic_funcs->asic_dma_free_coherent(hdev, q->pq_size,
-                                                       q->pq_kernel_addr,
-                                                       q->pq_dma_addr);
+               hl_asic_dma_free_coherent(hdev, q->pq_size, q->pq_kernel_addr, q->pq_dma_addr);
        }
 }
 
                        goto free_internal_qmans_pq_mem;
                }
 
-               q->pq_kernel_addr = hdev->asic_funcs->asic_dma_alloc_coherent(
-                                               hdev, q->pq_size,
-                                               &q->pq_dma_addr,
-                                               GFP_KERNEL | __GFP_ZERO);
+               q->pq_kernel_addr = hl_asic_dma_alloc_coherent(hdev, q->pq_size, &q->pq_dma_addr,
+                                                               GFP_KERNEL | __GFP_ZERO);
                if (!q->pq_kernel_addr) {
                        rc = -ENOMEM;
                        goto free_internal_qmans_pq_mem;
        if (!hdev->asic_prop.fw_security_enabled)
                GAUDI_CPU_TO_PCI_ADDR(hdev->cpu_accessible_dma_address,
                                        hdev->cpu_pci_msb_addr);
-       hdev->asic_funcs->asic_dma_free_coherent(hdev,
-                       HL_CPU_ACCESSIBLE_MEM_SIZE,
-                       hdev->cpu_accessible_dma_mem,
-                       hdev->cpu_accessible_dma_address);
+       hl_asic_dma_free_coherent(hdev, HL_CPU_ACCESSIBLE_MEM_SIZE, hdev->cpu_accessible_dma_mem,
+                                       hdev->cpu_accessible_dma_address);
 free_dma_pool:
        dma_pool_destroy(hdev->dma_pool);
 free_gaudi_device:
                GAUDI_CPU_TO_PCI_ADDR(hdev->cpu_accessible_dma_address,
                                        hdev->cpu_pci_msb_addr);
 
-       hdev->asic_funcs->asic_dma_free_coherent(hdev,
-                       HL_CPU_ACCESSIBLE_MEM_SIZE,
-                       hdev->cpu_accessible_dma_mem,
-                       hdev->cpu_accessible_dma_address);
+       hl_asic_dma_free_coherent(hdev, HL_CPU_ACCESSIBLE_MEM_SIZE, hdev->cpu_accessible_dma_mem,
+                                       hdev->cpu_accessible_dma_address);
 
        dma_pool_destroy(hdev->dma_pool);
 
 
        fence_val = GAUDI_QMAN0_FENCE_VAL;
 
-       fence_ptr = hdev->asic_funcs->asic_dma_pool_zalloc(hdev, 4, GFP_KERNEL,
-                                                       &fence_dma_addr);
+       fence_ptr = hl_asic_dma_pool_zalloc(hdev, 4, GFP_KERNEL, &fence_dma_addr);
        if (!fence_ptr) {
                dev_err(hdev->dev,
                        "Failed to allocate memory for H/W queue %d testing\n",
 
        *fence_ptr = 0;
 
-       fence_pkt = hdev->asic_funcs->asic_dma_pool_zalloc(hdev,
-                                       sizeof(struct packet_msg_prot),
-                                       GFP_KERNEL, &pkt_dma_addr);
+       fence_pkt = hl_asic_dma_pool_zalloc(hdev, sizeof(struct packet_msg_prot), GFP_KERNEL,
+                                               &pkt_dma_addr);
        if (!fence_pkt) {
                dev_err(hdev->dev,
                        "Failed to allocate packet for H/W queue %d testing\n",
        }
 
 free_pkt:
-       hdev->asic_funcs->asic_dma_pool_free(hdev, (void *) fence_pkt,
-                                       pkt_dma_addr);
+       hl_asic_dma_pool_free(hdev, (void *) fence_pkt, pkt_dma_addr);
 free_fence_ptr:
-       hdev->asic_funcs->asic_dma_pool_free(hdev, (void *) fence_ptr,
-                                       fence_dma_addr);
+       hl_asic_dma_pool_free(hdev, (void *) fence_ptr, fence_dma_addr);
        return rc;
 }
 
        bool is_eng_idle;
        int rc = 0, dma_id;
 
-       kernel_addr = hdev->asic_funcs->asic_dma_alloc_coherent(
-                                               hdev, SZ_2M,
-                                               &dma_addr,
-                                               GFP_KERNEL | __GFP_ZERO);
+       kernel_addr = hl_asic_dma_alloc_coherent(hdev, SZ_2M, &dma_addr, GFP_KERNEL | __GFP_ZERO);
 
        if (!kernel_addr)
                return -ENOMEM;
 out:
        hdev->asic_funcs->hw_queues_unlock(hdev);
 
-       hdev->asic_funcs->asic_dma_free_coherent(hdev, SZ_2M, kernel_addr,
-                                               dma_addr);
+       hl_asic_dma_free_coherent(hdev, SZ_2M, kernel_addr, dma_addr);
 
        return rc;
 }
                return -EBUSY;
        }
 
-       fence_ptr = hdev->asic_funcs->asic_dma_pool_zalloc(hdev, 4, GFP_KERNEL,
-                                                       &fence_dma_addr);
+       fence_ptr = hl_asic_dma_pool_zalloc(hdev, 4, GFP_KERNEL, &fence_dma_addr);
        if (!fence_ptr) {
                dev_err(hdev->dev,
                        "Failed to allocate fence memory for QMAN0\n");
 free_fence_ptr:
        WREG32(mmDMA0_CORE_PROT + dma_offset, BIT(DMA0_CORE_PROT_ERR_VAL_SHIFT));
 
-       hdev->asic_funcs->asic_dma_pool_free(hdev, (void *) fence_ptr,
-                                       fence_dma_addr);
+       hl_asic_dma_pool_free(hdev, (void *) fence_ptr, fence_dma_addr);
        return rc;
 }
 
        if (!(gaudi->hw_cap_initialized & HW_CAP_MMU))
                return 0;
 
-       hdev->internal_cb_pool_virt_addr =
-                       hdev->asic_funcs->asic_dma_alloc_coherent(hdev,
-                                       HOST_SPACE_INTERNAL_CB_SZ,
-                                       &hdev->internal_cb_pool_dma_addr,
-                                       GFP_KERNEL | __GFP_ZERO);
+       hdev->internal_cb_pool_virt_addr = hl_asic_dma_alloc_coherent(hdev,
+                                                       HOST_SPACE_INTERNAL_CB_SZ,
+                                                       &hdev->internal_cb_pool_dma_addr,
+                                                       GFP_KERNEL | __GFP_ZERO);
 
        if (!hdev->internal_cb_pool_virt_addr)
                return -ENOMEM;
 destroy_internal_cb_pool:
        gen_pool_destroy(hdev->internal_cb_pool);
 free_internal_cb_pool:
-       hdev->asic_funcs->asic_dma_free_coherent(hdev,
-                       HOST_SPACE_INTERNAL_CB_SZ,
-                       hdev->internal_cb_pool_virt_addr,
-                       hdev->internal_cb_pool_dma_addr);
+       hl_asic_dma_free_coherent(hdev, HOST_SPACE_INTERNAL_CB_SZ, hdev->internal_cb_pool_virt_addr,
+                                       hdev->internal_cb_pool_dma_addr);
 
        return rc;
 }
 
        gen_pool_destroy(hdev->internal_cb_pool);
 
-       hdev->asic_funcs->asic_dma_free_coherent(hdev,
-                       HOST_SPACE_INTERNAL_CB_SZ,
-                       hdev->internal_cb_pool_virt_addr,
-                       hdev->internal_cb_pool_dma_addr);
+       hl_asic_dma_free_coherent(hdev, HOST_SPACE_INTERNAL_CB_SZ, hdev->internal_cb_pool_virt_addr,
+                                       hdev->internal_cb_pool_dma_addr);
 }
 
 static int gaudi_ctx_init(struct hl_ctx *ctx)
 
                goto free_goya_device;
        }
 
-       hdev->cpu_accessible_dma_mem =
-                       hdev->asic_funcs->asic_dma_alloc_coherent(hdev,
-                                       HL_CPU_ACCESSIBLE_MEM_SIZE,
-                                       &hdev->cpu_accessible_dma_address,
-                                       GFP_KERNEL | __GFP_ZERO);
+       hdev->cpu_accessible_dma_mem = hl_asic_dma_alloc_coherent(hdev, HL_CPU_ACCESSIBLE_MEM_SIZE,
+                                                       &hdev->cpu_accessible_dma_address,
+                                                       GFP_KERNEL | __GFP_ZERO);
 
        if (!hdev->cpu_accessible_dma_mem) {
                rc = -ENOMEM;
 free_cpu_accessible_dma_pool:
        gen_pool_destroy(hdev->cpu_accessible_dma_pool);
 free_cpu_dma_mem:
-       hdev->asic_funcs->asic_dma_free_coherent(hdev,
-                       HL_CPU_ACCESSIBLE_MEM_SIZE,
-                       hdev->cpu_accessible_dma_mem,
-                       hdev->cpu_accessible_dma_address);
+       hl_asic_dma_free_coherent(hdev, HL_CPU_ACCESSIBLE_MEM_SIZE, hdev->cpu_accessible_dma_mem,
+                                       hdev->cpu_accessible_dma_address);
 free_dma_pool:
        dma_pool_destroy(hdev->dma_pool);
 free_goya_device:
 
        gen_pool_destroy(hdev->cpu_accessible_dma_pool);
 
-       hdev->asic_funcs->asic_dma_free_coherent(hdev,
-                       HL_CPU_ACCESSIBLE_MEM_SIZE,
-                       hdev->cpu_accessible_dma_mem,
-                       hdev->cpu_accessible_dma_address);
+       hl_asic_dma_free_coherent(hdev, HL_CPU_ACCESSIBLE_MEM_SIZE, hdev->cpu_accessible_dma_mem,
+                                       hdev->cpu_accessible_dma_address);
 
        dma_pool_destroy(hdev->dma_pool);
 
                return -EBUSY;
        }
 
-       fence_ptr = hdev->asic_funcs->asic_dma_pool_zalloc(hdev, 4, GFP_KERNEL,
-                                                       &fence_dma_addr);
+       fence_ptr = hl_asic_dma_pool_zalloc(hdev, 4, GFP_KERNEL, &fence_dma_addr);
        if (!fence_ptr) {
                dev_err(hdev->dev,
                        "Failed to allocate fence memory for QMAN0\n");
        }
 
 free_fence_ptr:
-       hdev->asic_funcs->asic_dma_pool_free(hdev, (void *) fence_ptr,
-                                       fence_dma_addr);
+       hl_asic_dma_pool_free(hdev, (void *) fence_ptr, fence_dma_addr);
 
        goya_qman0_set_security(hdev, false);
 
 
        fence_val = GOYA_QMAN0_FENCE_VAL;
 
-       fence_ptr = hdev->asic_funcs->asic_dma_pool_zalloc(hdev, 4, GFP_KERNEL,
-                                                       &fence_dma_addr);
+       fence_ptr = hl_asic_dma_pool_zalloc(hdev, 4, GFP_KERNEL, &fence_dma_addr);
        if (!fence_ptr) {
                dev_err(hdev->dev,
                        "Failed to allocate memory for H/W queue %d testing\n",
 
        *fence_ptr = 0;
 
-       fence_pkt = hdev->asic_funcs->asic_dma_pool_zalloc(hdev,
-                                       sizeof(struct packet_msg_prot),
-                                       GFP_KERNEL, &pkt_dma_addr);
+       fence_pkt = hl_asic_dma_pool_zalloc(hdev, sizeof(struct packet_msg_prot), GFP_KERNEL,
+                                               &pkt_dma_addr);
        if (!fence_pkt) {
                dev_err(hdev->dev,
                        "Failed to allocate packet for H/W queue %d testing\n",
        }
 
 free_pkt:
-       hdev->asic_funcs->asic_dma_pool_free(hdev, (void *) fence_pkt,
-                                       pkt_dma_addr);
+       hl_asic_dma_pool_free(hdev, (void *) fence_pkt, pkt_dma_addr);
 free_fence_ptr:
-       hdev->asic_funcs->asic_dma_pool_free(hdev, (void *) fence_ptr,
-                                       fence_dma_addr);
+       hl_asic_dma_pool_free(hdev, (void *) fence_ptr, fence_dma_addr);
        return rc;
 }