mm: vmalloc: refactor vmalloc_dump_obj() function
authorUladzislau Rezki (Sony) <urezki@gmail.com>
Wed, 24 Jan 2024 18:09:20 +0000 (19:09 +0100)
committerAndrew Morton <akpm@linux-foundation.org>
Sat, 24 Feb 2024 01:48:21 +0000 (17:48 -0800)
This patch tends to simplify the function in question, by removing an
extra stack "objp" variable, returning back to an early exit approach if
spin_trylock() fails or VA was not found.

Link: https://lkml.kernel.org/r/20240124180920.50725-2-urezki@gmail.com
Signed-off-by: Uladzislau Rezki (Sony) <urezki@gmail.com>
Reviewed-by: Lorenzo Stoakes <lstoakes@gmail.com>
Cc: Baoquan He <bhe@redhat.com>
Cc: Christoph Hellwig <hch@infradead.org>
Cc: Dave Chinner <david@fromorbit.com>
Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
Cc: Oleksiy Avramchenko <oleksiy.avramchenko@sony.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
mm/vmalloc.c

index 568f6c0b1fb5ef15485ed81eabc541e499a2d031..25a8df49725544e0320d69cd33b46918f100e4a0 100644 (file)
@@ -4696,34 +4696,35 @@ void pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms)
 #ifdef CONFIG_PRINTK
 bool vmalloc_dump_obj(void *object)
 {
-       void *objp = (void *)PAGE_ALIGN((unsigned long)object);
        const void *caller;
+       struct vm_struct *vm;
        struct vmap_area *va;
        struct vmap_node *vn;
        unsigned long addr;
        unsigned int nr_pages;
-       bool success = false;
-
-       vn = addr_to_node((unsigned long)objp);
 
-       if (spin_trylock(&vn->busy.lock)) {
-               va = __find_vmap_area((unsigned long)objp, &vn->busy.root);
+       addr = PAGE_ALIGN((unsigned long) object);
+       vn = addr_to_node(addr);
 
-               if (va && va->vm) {
-                       addr = (unsigned long)va->vm->addr;
-                       caller = va->vm->caller;
-                       nr_pages = va->vm->nr_pages;
-                       success = true;
-               }
+       if (!spin_trylock(&vn->busy.lock))
+               return false;
 
+       va = __find_vmap_area(addr, &vn->busy.root);
+       if (!va || !va->vm) {
                spin_unlock(&vn->busy.lock);
+               return false;
        }
 
-       if (success)
-               pr_cont(" %u-page vmalloc region starting at %#lx allocated at %pS\n",
-                       nr_pages, addr, caller);
+       vm = va->vm;
+       addr = (unsigned long) vm->addr;
+       caller = vm->caller;
+       nr_pages = vm->nr_pages;
+       spin_unlock(&vn->busy.lock);
 
-       return success;
+       pr_cont(" %u-page vmalloc region starting at %#lx allocated at %pS\n",
+               nr_pages, addr, caller);
+
+       return true;
 }
 #endif