mm/vmalloc.c: optimize to reduce arguments of alloc_vmap_area()
authorBaoquan He <bhe@redhat.com>
Sat, 9 Mar 2024 04:44:54 +0000 (12:44 +0800)
committerAndrew Morton <akpm@linux-foundation.org>
Fri, 26 Apr 2024 03:56:08 +0000 (20:56 -0700)
If called by __get_vm_area_node(), by open coding the field assignments of
'struct vm_struct *vm', and move the vm->flags and vm->caller assignments
into __get_vm_area_node(), the passed in arguments 'flags' and 'caller'
can be removed.

This alleviates overloaded arguments passed in for alloc_vmap_area().

Link: https://lkml.kernel.org/r/20240309044454.648888-1-bhe@redhat.com
Signed-off-by: Baoquan He <bhe@redhat.com>
Reviewed-by: Uladzislau Rezki (Sony) <urezki@gmail.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
mm/vmalloc.c

index e30acfb2cd8ba2e8782b262b7b24ef37cdcd5c9b..6856fad1a8f8c5e2e80e6c8088ad03ea6745d36d 100644 (file)
@@ -1944,8 +1944,7 @@ static struct vmap_area *alloc_vmap_area(unsigned long size,
                                unsigned long align,
                                unsigned long vstart, unsigned long vend,
                                int node, gfp_t gfp_mask,
-                               unsigned long va_flags, struct vm_struct *vm,
-                               unsigned long flags, const void *caller)
+                               unsigned long va_flags, struct vm_struct *vm)
 {
        struct vmap_node *vn;
        struct vmap_area *va;
@@ -2008,8 +2007,11 @@ retry:
        va->vm = NULL;
        va->flags = (va_flags | vn_id);
 
-       if (vm)
-               setup_vmalloc_vm(vm, va, flags, caller);
+       if (vm) {
+               vm->addr = (void *)va->va_start;
+               vm->size = va->va_end - va->va_start;
+               va->vm = vm;
+       }
 
        vn = addr_to_node(va->va_start);
 
@@ -2588,8 +2590,7 @@ static void *new_vmap_block(unsigned int order, gfp_t gfp_mask)
        va = alloc_vmap_area(VMAP_BLOCK_SIZE, VMAP_BLOCK_SIZE,
                                        VMALLOC_START, VMALLOC_END,
                                        node, gfp_mask,
-                                       VMAP_RAM|VMAP_BLOCK, NULL,
-                                       0, NULL);
+                                       VMAP_RAM|VMAP_BLOCK, NULL);
        if (IS_ERR(va)) {
                kfree(vb);
                return ERR_CAST(va);
@@ -2947,7 +2948,7 @@ void *vm_map_ram(struct page **pages, unsigned int count, int node)
                va = alloc_vmap_area(size, PAGE_SIZE,
                                VMALLOC_START, VMALLOC_END,
                                node, GFP_KERNEL, VMAP_RAM,
-                               NULL, 0, NULL);
+                               NULL);
                if (IS_ERR(va))
                        return NULL;
 
@@ -3086,7 +3087,10 @@ static struct vm_struct *__get_vm_area_node(unsigned long size,
        if (!(flags & VM_NO_GUARD))
                size += PAGE_SIZE;
 
-       va = alloc_vmap_area(size, align, start, end, node, gfp_mask, 0, area, flags, caller);
+       area->flags = flags;
+       area->caller = caller;
+
+       va = alloc_vmap_area(size, align, start, end, node, gfp_mask, 0, area);
        if (IS_ERR(va)) {
                kfree(area);
                return NULL;