habanalabs: don't free phys_pg_pack inside lock
authorOded Gabbay <ogabbay@kernel.org>
Sat, 15 Jan 2022 22:18:32 +0000 (00:18 +0200)
committerOded Gabbay <ogabbay@kernel.org>
Mon, 28 Feb 2022 12:22:02 +0000 (14:22 +0200)
Freeing phys_pg_pack includes calling to scrubbing functions of the
device's memory, taking locks and possibly even calling reset.

This is not something that should be done while holding a device-wide
spinlock.

Therefore, save the relevant objects on a local linked-list and after
releasing the spinlock, traverse that list and free the phys_pg_pack
objects.

Signed-off-by: Oded Gabbay <ogabbay@kernel.org>
drivers/misc/habanalabs/common/habanalabs.h
drivers/misc/habanalabs/common/memory.c

index 6c7a602104161d8f973d719c647e29325a265721..9c8374d8890706448856ead75d7820ecfbd0593e 100644 (file)
@@ -1738,6 +1738,8 @@ struct hl_vm_hw_block_list_node {
  * @pages: the physical page array.
  * @npages: num physical pages in the pack.
  * @total_size: total size of all the pages in this list.
+ * @node: used to attach to deletion list that is used when all the allocations are cleared
+ *        at the teardown of the context.
  * @mapping_cnt: number of shared mappings.
  * @exporting_cnt: number of dma-buf exporting.
  * @asid: the context related to this list.
@@ -1753,6 +1755,7 @@ struct hl_vm_phys_pg_pack {
        u64                     *pages;
        u64                     npages;
        u64                     total_size;
+       struct list_head        node;
        atomic_t                mapping_cnt;
        u32                     exporting_cnt;
        u32                     asid;
index c1eefaebacb64f64990338fd1cfad68265fdef99..4a5d3a179765f736ec6f76d5a764adb196f0a795 100644 (file)
@@ -2607,11 +2607,12 @@ int hl_vm_ctx_init(struct hl_ctx *ctx)
  */
 void hl_vm_ctx_fini(struct hl_ctx *ctx)
 {
-       struct hl_device *hdev = ctx->hdev;
-       struct hl_vm *vm = &hdev->vm;
        struct hl_vm_phys_pg_pack *phys_pg_list;
+       struct hl_device *hdev = ctx->hdev;
        struct hl_vm_hash_node *hnode;
+       struct hl_vm *vm = &hdev->vm;
        struct hlist_node *tmp_node;
+       struct list_head free_list;
        struct hl_mem_in args;
        int i;
 
@@ -2644,19 +2645,24 @@ void hl_vm_ctx_fini(struct hl_ctx *ctx)
 
        mutex_unlock(&ctx->mmu_lock);
 
+       INIT_LIST_HEAD(&free_list);
+
        spin_lock(&vm->idr_lock);
        idr_for_each_entry(&vm->phys_pg_pack_handles, phys_pg_list, i)
                if (phys_pg_list->asid == ctx->asid) {
                        dev_dbg(hdev->dev,
                                "page list 0x%px of asid %d is still alive\n",
                                phys_pg_list, ctx->asid);
-                       atomic64_sub(phys_pg_list->total_size,
-                                       &hdev->dram_used_mem);
-                       free_phys_pg_pack(hdev, phys_pg_list);
+
+                       atomic64_sub(phys_pg_list->total_size, &hdev->dram_used_mem);
                        idr_remove(&vm->phys_pg_pack_handles, i);
+                       list_add(&phys_pg_list->node, &free_list);
                }
        spin_unlock(&vm->idr_lock);
 
+       list_for_each_entry(phys_pg_list, &free_list, node)
+               free_phys_pg_pack(hdev, phys_pg_list);
+
        va_range_fini(hdev, ctx->va_range[HL_VA_RANGE_TYPE_DRAM]);
        va_range_fini(hdev, ctx->va_range[HL_VA_RANGE_TYPE_HOST]);