hugetlb: move vm_fault declaration to the top of hugetlb_fault()
authorVishal Moola (Oracle) <vishal.moola@gmail.com>
Wed, 21 Feb 2024 23:47:29 +0000 (15:47 -0800)
committerAndrew Morton <akpm@linux-foundation.org>
Tue, 5 Mar 2024 01:01:15 +0000 (17:01 -0800)
hugetlb_fault() currently defines a vm_fault to pass to the generic
handle_userfault() function.  We can move this definition to the top of
hugetlb_fault() so that it can be used throughout the rest of the hugetlb
fault path.

This will help cleanup a number of excess variables and function arguments
throughout the stack.  Also, since vm_fault already has space to store the
page offset, use that instead and get rid of idx.

Link: https://lkml.kernel.org/r/20240221234732.187629-3-vishal.moola@gmail.com
Signed-off-by: Vishal Moola (Oracle) <vishal.moola@gmail.com>
Reviewed-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Cc: Muchun Song <muchun.song@linux.dev>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
mm/hugetlb.c

index c53a41d07cd3a27f923180c04f76beb8dc6b0738..02673926b3bb071c666f7ebb00f16be3b4dd7923 100644 (file)
@@ -6378,13 +6378,25 @@ vm_fault_t hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
        spinlock_t *ptl;
        vm_fault_t ret;
        u32 hash;
-       pgoff_t idx;
        struct folio *folio = NULL;
        struct folio *pagecache_folio = NULL;
        struct hstate *h = hstate_vma(vma);
        struct address_space *mapping;
        int need_wait_lock = 0;
        unsigned long haddr = address & huge_page_mask(h);
+       struct vm_fault vmf = {
+               .vma = vma,
+               .address = haddr,
+               .real_address = address,
+               .flags = flags,
+               .pgoff = vma_hugecache_offset(h, vma, haddr),
+               /* TODO: Track hugetlb faults using vm_fault */
+
+               /*
+                * Some fields may not be initialized, be careful as it may
+                * be hard to debug if called functions make assumptions
+                */
+       };
 
        /* TODO: Handle faults under the VMA lock */
        if (flags & FAULT_FLAG_VMA_LOCK) {
@@ -6398,8 +6410,7 @@ vm_fault_t hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
         * the same page in the page cache.
         */
        mapping = vma->vm_file->f_mapping;
-       idx = vma_hugecache_offset(h, vma, haddr);
-       hash = hugetlb_fault_mutex_hash(mapping, idx);
+       hash = hugetlb_fault_mutex_hash(mapping, vmf.pgoff);
        mutex_lock(&hugetlb_fault_mutex_table[hash]);
 
        /*
@@ -6433,8 +6444,9 @@ vm_fault_t hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
                 * hugetlb_no_page will drop vma lock and hugetlb fault
                 * mutex internally, which make us return immediately.
                 */
-               return hugetlb_no_page(mm, vma, mapping, idx, address, ptep,
-                                     entry, flags);
+
+               return hugetlb_no_page(mm, vma, mapping, vmf.pgoff, address,
+                                       ptep, entry, flags);
        }
 
        ret = 0;
@@ -6480,7 +6492,8 @@ vm_fault_t hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
                /* Just decrements count, does not deallocate */
                vma_end_reservation(h, vma, haddr);
 
-               pagecache_folio = filemap_lock_hugetlb_folio(h, mapping, idx);
+               pagecache_folio = filemap_lock_hugetlb_folio(h, mapping,
+                                                            vmf.pgoff);
                if (IS_ERR(pagecache_folio))
                        pagecache_folio = NULL;
        }
@@ -6495,13 +6508,6 @@ vm_fault_t hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
        if (userfaultfd_wp(vma) && huge_pte_uffd_wp(huge_ptep_get(ptep)) &&
            (flags & FAULT_FLAG_WRITE) && !huge_pte_write(entry)) {
                if (!userfaultfd_wp_async(vma)) {
-                       struct vm_fault vmf = {
-                               .vma = vma,
-                               .address = haddr,
-                               .real_address = address,
-                               .flags = flags,
-                       };
-
                        spin_unlock(ptl);
                        if (pagecache_folio) {
                                folio_unlock(pagecache_folio);