mm/hugetlb: convert hugetlb_wp() to take in a folio
authorSidhartha Kumar <sidhartha.kumar@oracle.com>
Wed, 25 Jan 2023 17:05:36 +0000 (09:05 -0800)
committerAndrew Morton <akpm@linux-foundation.org>
Mon, 13 Feb 2023 23:54:29 +0000 (15:54 -0800)
Change the pagecache_page argument of hugetlb_wp to pagecache_folio.
Replaces a call to find_lock_page() with filemap_lock_folio().

Link: https://lkml.kernel.org/r/20230125170537.96973-8-sidhartha.kumar@oracle.com
Signed-off-by: Sidhartha Kumar <sidhartha.kumar@oracle.com>
Reported-by: gerald.schaefer@linux.ibm.com
Cc: John Hubbard <jhubbard@nvidia.com>
Cc: Matthew Wilcox <willy@infradead.org>
Cc: Mike Kravetz <mike.kravetz@oracle.com>
Cc: Muchun Song <songmuchun@bytedance.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
mm/hugetlb.c

index de1f73e5e200d95c3461e0b83d3fbcd8a2210714..3a01a9dbf445a33f886aa2a23af60a1c97817cbe 100644 (file)
@@ -5472,7 +5472,7 @@ static void unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
  */
 static vm_fault_t hugetlb_wp(struct mm_struct *mm, struct vm_area_struct *vma,
                       unsigned long address, pte_t *ptep, unsigned int flags,
-                      struct page *pagecache_page, spinlock_t *ptl)
+                      struct folio *pagecache_folio, spinlock_t *ptl)
 {
        const bool unshare = flags & FAULT_FLAG_UNSHARE;
        pte_t pte;
@@ -5529,7 +5529,7 @@ retry_avoidcopy:
         * of the full address range.
         */
        if (is_vma_resv_set(vma, HPAGE_RESV_OWNER) &&
-                       old_page != pagecache_page)
+                       page_folio(old_page) != pagecache_folio)
                outside_reserve = 1;
 
        get_page(old_page);
@@ -5922,7 +5922,7 @@ static vm_fault_t hugetlb_no_page(struct mm_struct *mm,
        hugetlb_count_add(pages_per_huge_page(h), mm);
        if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
                /* Optimization, do the COW without a second fault */
-               ret = hugetlb_wp(mm, vma, address, ptep, flags, &folio->page, ptl);
+               ret = hugetlb_wp(mm, vma, address, ptep, flags, folio, ptl);
        }
 
        spin_unlock(ptl);
@@ -5985,7 +5985,7 @@ vm_fault_t hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
        u32 hash;
        pgoff_t idx;
        struct page *page = NULL;
-       struct page *pagecache_page = NULL;
+       struct folio *pagecache_folio = NULL;
        struct hstate *h = hstate_vma(vma);
        struct address_space *mapping;
        int need_wait_lock = 0;
@@ -6067,7 +6067,7 @@ vm_fault_t hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
                /* Just decrements count, does not deallocate */
                vma_end_reservation(h, vma, haddr);
 
-               pagecache_page = find_lock_page(mapping, idx);
+               pagecache_folio = filemap_lock_folio(mapping, idx);
        }
 
        ptl = huge_pte_lock(h, mm, ptep);
@@ -6087,9 +6087,9 @@ vm_fault_t hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
                };
 
                spin_unlock(ptl);
-               if (pagecache_page) {
-                       unlock_page(pagecache_page);
-                       put_page(pagecache_page);
+               if (pagecache_folio) {
+                       folio_unlock(pagecache_folio);
+                       folio_put(pagecache_folio);
                }
                hugetlb_vma_unlock_read(vma);
                mutex_unlock(&hugetlb_fault_mutex_table[hash]);
@@ -6098,11 +6098,11 @@ vm_fault_t hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
 
        /*
         * hugetlb_wp() requires page locks of pte_page(entry) and
-        * pagecache_page, so here we need take the former one
-        * when page != pagecache_page or !pagecache_page.
+        * pagecache_folio, so here we need take the former one
+        * when page != pagecache_folio or !pagecache_folio.
         */
        page = pte_page(entry);
-       if (page != pagecache_page)
+       if (page_folio(page) != pagecache_folio)
                if (!trylock_page(page)) {
                        need_wait_lock = 1;
                        goto out_ptl;
@@ -6113,7 +6113,7 @@ vm_fault_t hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
        if (flags & (FAULT_FLAG_WRITE|FAULT_FLAG_UNSHARE)) {
                if (!huge_pte_write(entry)) {
                        ret = hugetlb_wp(mm, vma, address, ptep, flags,
-                                        pagecache_page, ptl);
+                                        pagecache_folio, ptl);
                        goto out_put_page;
                } else if (likely(flags & FAULT_FLAG_WRITE)) {
                        entry = huge_pte_mkdirty(entry);
@@ -6124,15 +6124,15 @@ vm_fault_t hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
                                                flags & FAULT_FLAG_WRITE))
                update_mmu_cache(vma, haddr, ptep);
 out_put_page:
-       if (page != pagecache_page)
+       if (page_folio(page) != pagecache_folio)
                unlock_page(page);
        put_page(page);
 out_ptl:
        spin_unlock(ptl);
 
-       if (pagecache_page) {
-               unlock_page(pagecache_page);
-               put_page(pagecache_page);
+       if (pagecache_folio) {
+               folio_unlock(pagecache_folio);
+               folio_put(pagecache_folio);
        }
 out_mutex:
        hugetlb_vma_unlock_read(vma);