mm/hugetlb: convert __update_and_free_page() to folios
authorSidhartha Kumar <sidhartha.kumar@oracle.com>
Fri, 13 Jan 2023 22:30:51 +0000 (16:30 -0600)
committerAndrew Morton <akpm@linux-foundation.org>
Mon, 13 Feb 2023 23:54:26 +0000 (15:54 -0800)
Change __update_and_free_page() to __update_and_free_hugetlb_folio() by
changing its callers to pass in a folio.

Link: https://lkml.kernel.org/r/20230113223057.173292-3-sidhartha.kumar@oracle.com
Signed-off-by: Sidhartha Kumar <sidhartha.kumar@oracle.com>
Reviewed-by: Mike Kravetz <mike.kravetz@oracle.com>
Cc: John Hubbard <jhubbard@nvidia.com>
Cc: Matthew Wilcox <willy@infradead.org>
Cc: Muchun Song <songmuchun@bytedance.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
mm/hugetlb.c

index 0c1e1ce113c84ca98318a8c8c79d004b37552d0a..d27fcf7685480a6b88e4cc502ec0a9edc5d7eeb7 100644 (file)
@@ -1698,10 +1698,10 @@ static void add_hugetlb_folio(struct hstate *h, struct folio *folio,
        enqueue_hugetlb_folio(h, folio);
 }
 
-static void __update_and_free_page(struct hstate *h, struct page *page)
+static void __update_and_free_hugetlb_folio(struct hstate *h,
+                                               struct folio *folio)
 {
        int i;
-       struct folio *folio = page_folio(page);
        struct page *subpage;
 
        if (hstate_is_gigantic(h) && !gigantic_page_runtime_supported())
@@ -1714,7 +1714,7 @@ static void __update_and_free_page(struct hstate *h, struct page *page)
        if (folio_test_hugetlb_raw_hwp_unreliable(folio))
                return;
 
-       if (hugetlb_vmemmap_restore(h, page)) {
+       if (hugetlb_vmemmap_restore(h, &folio->page)) {
                spin_lock_irq(&hugetlb_lock);
                /*
                 * If we cannot allocate vmemmap pages, just refuse to free the
@@ -1750,7 +1750,7 @@ static void __update_and_free_page(struct hstate *h, struct page *page)
                destroy_compound_gigantic_folio(folio, huge_page_order(h));
                free_gigantic_folio(folio, huge_page_order(h));
        } else {
-               __free_pages(page, huge_page_order(h));
+               __free_pages(&folio->page, huge_page_order(h));
        }
 }
 
@@ -1790,7 +1790,7 @@ static void free_hpage_workfn(struct work_struct *work)
                 */
                h = size_to_hstate(page_size(page));
 
-               __update_and_free_page(h, page);
+               __update_and_free_hugetlb_folio(h, page_folio(page));
 
                cond_resched();
        }
@@ -1807,7 +1807,7 @@ static void update_and_free_hugetlb_folio(struct hstate *h, struct folio *folio,
                                 bool atomic)
 {
        if (!folio_test_hugetlb_vmemmap_optimized(folio) || !atomic) {
-               __update_and_free_page(h, &folio->page);
+               __update_and_free_hugetlb_folio(h, folio);
                return;
        }