mm/hugetlb: increase use of folios in alloc_huge_page()
authorSidhartha Kumar <sidhartha.kumar@oracle.com>
Fri, 13 Jan 2023 22:30:54 +0000 (16:30 -0600)
committerAndrew Morton <akpm@linux-foundation.org>
Mon, 13 Feb 2023 23:54:27 +0000 (15:54 -0800)
Change hugetlb_cgroup_commit_charge{,_rsvd}(), dequeue_huge_page_vma() and
alloc_buddy_huge_page_with_mpol() to use folios so alloc_huge_page() is
cleaned by operating on folios until its return.

Link: https://lkml.kernel.org/r/20230113223057.173292-6-sidhartha.kumar@oracle.com
Signed-off-by: Sidhartha Kumar <sidhartha.kumar@oracle.com>
Reviewed-by: Mike Kravetz <mike.kravetz@oracle.com>
Cc: John Hubbard <jhubbard@nvidia.com>
Cc: Matthew Wilcox <willy@infradead.org>
Cc: Muchun Song <songmuchun@bytedance.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
include/linux/hugetlb_cgroup.h
mm/hugetlb.c
mm/hugetlb_cgroup.c

index f706626a80635f2df49fe1054e03477fcfd6046b..3d82d91f49acb04c7c123d9210c77eb6d3c7916c 100644 (file)
@@ -141,10 +141,10 @@ extern int hugetlb_cgroup_charge_cgroup_rsvd(int idx, unsigned long nr_pages,
                                             struct hugetlb_cgroup **ptr);
 extern void hugetlb_cgroup_commit_charge(int idx, unsigned long nr_pages,
                                         struct hugetlb_cgroup *h_cg,
-                                        struct page *page);
+                                        struct folio *folio);
 extern void hugetlb_cgroup_commit_charge_rsvd(int idx, unsigned long nr_pages,
                                              struct hugetlb_cgroup *h_cg,
-                                             struct page *page);
+                                             struct folio *folio);
 extern void hugetlb_cgroup_uncharge_folio(int idx, unsigned long nr_pages,
                                         struct folio *folio);
 extern void hugetlb_cgroup_uncharge_folio_rsvd(int idx, unsigned long nr_pages,
@@ -230,14 +230,14 @@ static inline int hugetlb_cgroup_charge_cgroup_rsvd(int idx,
 
 static inline void hugetlb_cgroup_commit_charge(int idx, unsigned long nr_pages,
                                                struct hugetlb_cgroup *h_cg,
-                                               struct page *page)
+                                               struct folio *folio)
 {
 }
 
 static inline void
 hugetlb_cgroup_commit_charge_rsvd(int idx, unsigned long nr_pages,
                                  struct hugetlb_cgroup *h_cg,
-                                 struct page *page)
+                                 struct folio *folio)
 {
 }
 
index fa61b4aa68cae7ff5ebb94b004bb2aae2eba0c11..5d0d1efbe590266f512953687edf9dce78761a9a 100644 (file)
@@ -1348,7 +1348,7 @@ static unsigned long available_huge_pages(struct hstate *h)
        return h->free_huge_pages - h->resv_huge_pages;
 }
 
-static struct page *dequeue_huge_page_vma(struct hstate *h,
+static struct folio *dequeue_hugetlb_folio_vma(struct hstate *h,
                                struct vm_area_struct *vma,
                                unsigned long address, int avoid_reserve,
                                long chg)
@@ -1392,7 +1392,7 @@ static struct page *dequeue_huge_page_vma(struct hstate *h,
        }
 
        mpol_cond_put(mpol);
-       return &folio->page;
+       return folio;
 
 err:
        return NULL;
@@ -2446,7 +2446,7 @@ static struct page *alloc_migrate_huge_page(struct hstate *h, gfp_t gfp_mask,
  * Use the VMA's mpolicy to allocate a huge page from the buddy.
  */
 static
-struct page *alloc_buddy_huge_page_with_mpol(struct hstate *h,
+struct folio *alloc_buddy_hugetlb_folio_with_mpol(struct hstate *h,
                struct vm_area_struct *vma, unsigned long addr)
 {
        struct folio *folio = NULL;
@@ -2469,7 +2469,7 @@ struct page *alloc_buddy_huge_page_with_mpol(struct hstate *h,
        if (!folio)
                folio = alloc_surplus_hugetlb_folio(h, gfp_mask, nid, nodemask);
        mpol_cond_put(mpol);
-       return &folio->page;
+       return folio;
 }
 
 /* page migration callback function */
@@ -3018,7 +3018,6 @@ struct page *alloc_huge_page(struct vm_area_struct *vma,
 {
        struct hugepage_subpool *spool = subpool_vma(vma);
        struct hstate *h = hstate_vma(vma);
-       struct page *page;
        struct folio *folio;
        long map_chg, map_commit;
        long gbl_chg;
@@ -3082,34 +3081,34 @@ struct page *alloc_huge_page(struct vm_area_struct *vma,
         * from the global free pool (global change).  gbl_chg == 0 indicates
         * a reservation exists for the allocation.
         */
-       page = dequeue_huge_page_vma(h, vma, addr, avoid_reserve, gbl_chg);
-       if (!page) {
+       folio = dequeue_hugetlb_folio_vma(h, vma, addr, avoid_reserve, gbl_chg);
+       if (!folio) {
                spin_unlock_irq(&hugetlb_lock);
-               page = alloc_buddy_huge_page_with_mpol(h, vma, addr);
-               if (!page)
+               folio = alloc_buddy_hugetlb_folio_with_mpol(h, vma, addr);
+               if (!folio)
                        goto out_uncharge_cgroup;
                spin_lock_irq(&hugetlb_lock);
                if (!avoid_reserve && vma_has_reserves(vma, gbl_chg)) {
-                       SetHPageRestoreReserve(page);
+                       folio_set_hugetlb_restore_reserve(folio);
                        h->resv_huge_pages--;
                }
-               list_add(&page->lru, &h->hugepage_activelist);
-               set_page_refcounted(page);
+               list_add(&folio->lru, &h->hugepage_activelist);
+               folio_ref_unfreeze(folio, 1);
                /* Fall through */
        }
-       folio = page_folio(page);
-       hugetlb_cgroup_commit_charge(idx, pages_per_huge_page(h), h_cg, page);
+
+       hugetlb_cgroup_commit_charge(idx, pages_per_huge_page(h), h_cg, folio);
        /* If allocation is not consuming a reservation, also store the
         * hugetlb_cgroup pointer on the page.
         */
        if (deferred_reserve) {
                hugetlb_cgroup_commit_charge_rsvd(idx, pages_per_huge_page(h),
-                                                 h_cg, page);
+                                                 h_cg, folio);
        }
 
        spin_unlock_irq(&hugetlb_lock);
 
-       hugetlb_set_page_subpool(page, spool);
+       hugetlb_set_folio_subpool(folio, spool);
 
        map_commit = vma_commit_reservation(h, vma, addr);
        if (unlikely(map_chg > map_commit)) {
@@ -3130,7 +3129,7 @@ struct page *alloc_huge_page(struct vm_area_struct *vma,
                        hugetlb_cgroup_uncharge_folio_rsvd(hstate_index(h),
                                        pages_per_huge_page(h), folio);
        }
-       return page;
+       return &folio->page;
 
 out_uncharge_cgroup:
        hugetlb_cgroup_uncharge_cgroup(idx, pages_per_huge_page(h), h_cg);
index d9e4425d81ac46ebacde9a6e27f64f70f1734e25..dedd2edb076ec211c2078651335bcc56066b516d 100644 (file)
@@ -331,19 +331,15 @@ static void __hugetlb_cgroup_commit_charge(int idx, unsigned long nr_pages,
 
 void hugetlb_cgroup_commit_charge(int idx, unsigned long nr_pages,
                                  struct hugetlb_cgroup *h_cg,
-                                 struct page *page)
+                                 struct folio *folio)
 {
-       struct folio *folio = page_folio(page);
-
        __hugetlb_cgroup_commit_charge(idx, nr_pages, h_cg, folio, false);
 }
 
 void hugetlb_cgroup_commit_charge_rsvd(int idx, unsigned long nr_pages,
                                       struct hugetlb_cgroup *h_cg,
-                                      struct page *page)
+                                      struct folio *folio)
 {
-       struct folio *folio = page_folio(page);
-
        __hugetlb_cgroup_commit_charge(idx, nr_pages, h_cg, folio, true);
 }