mutex_init(&hdev->fd_open_cnt_lock);
mutex_init(&hdev->send_cpu_message_lock);
+ mutex_init(&hdev->mmu_cache_lock);
INIT_LIST_HEAD(&hdev->hw_queues_mirror_list);
spin_lock_init(&hdev->hw_queues_mirror_lock);
atomic_set(&hdev->in_reset, 0);
*/
static void device_early_fini(struct hl_device *hdev)
{
+ mutex_destroy(&hdev->mmu_cache_lock);
mutex_destroy(&hdev->send_cpu_message_lock);
hl_cb_mgr_fini(hdev, &hdev->kernel_cb_mgr);
/* MMU H/W init was already done in device hw_init() */
- mutex_init(&hdev->mmu_cache_lock);
-
hdev->mmu_pgt_pool =
gen_pool_create(__ffs(prop->mmu_hop_table_size), -1);
if (!hdev->mmu_pgt_pool) {
dev_err(hdev->dev, "Failed to create page gen pool\n");
- rc = -ENOMEM;
- goto err_pool_create;
+ return -ENOMEM;
}
rc = gen_pool_add(hdev->mmu_pgt_pool, prop->mmu_pgt_addr +
err_pool_add:
gen_pool_destroy(hdev->mmu_pgt_pool);
-err_pool_create:
- mutex_destroy(&hdev->mmu_cache_lock);
return rc;
}
kvfree(hdev->mmu_shadow_hop0);
gen_pool_destroy(hdev->mmu_pgt_pool);
- mutex_destroy(&hdev->mmu_cache_lock);
/* MMU H/W fini will be done in device hw_fini() */
}