mm/hugetlb: convert free_huge_page to folios
authorSidhartha Kumar <sidhartha.kumar@oracle.com>
Tue, 1 Nov 2022 22:30:56 +0000 (15:30 -0700)
committerAndrew Morton <akpm@linux-foundation.org>
Wed, 30 Nov 2022 23:58:43 +0000 (15:58 -0800)
Use folios inside free_huge_page(), this is in preparation for converting
hugetlb_cgroup_uncharge_page() to take in a folio.

Link: https://lkml.kernel.org/r/20221101223059.460937-7-sidhartha.kumar@oracle.com
Signed-off-by: Sidhartha Kumar <sidhartha.kumar@oracle.com>
Reviewed-by: Mike Kravetz <mike.kravetz@oracle.com>
Reviewed-by: Muchun Song <songmuchun@bytedance.com>
Cc: Aneesh Kumar K.V <aneesh.kumar@linux.ibm.com>
Cc: Bui Quang Minh <minhquangbui99@gmail.com>
Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
Cc: Miaohe Lin <linmiaohe@huawei.com>
Cc: Mina Almasry <almasrymina@google.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
mm/hugetlb.c

index 666a771c9a3d2d94fefaf386144201a9777cb86b..9841fb0fcaf97a64e7d7e83e9bb7e33fe6c70701 100644 (file)
@@ -1704,21 +1704,22 @@ void free_huge_page(struct page *page)
         * Can't pass hstate in here because it is called from the
         * compound page destructor.
         */
-       struct hstate *h = page_hstate(page);
-       int nid = page_to_nid(page);
-       struct hugepage_subpool *spool = hugetlb_page_subpool(page);
+       struct folio *folio = page_folio(page);
+       struct hstate *h = folio_hstate(folio);
+       int nid = folio_nid(folio);
+       struct hugepage_subpool *spool = hugetlb_folio_subpool(folio);
        bool restore_reserve;
        unsigned long flags;
 
-       VM_BUG_ON_PAGE(page_count(page), page);
-       VM_BUG_ON_PAGE(page_mapcount(page), page);
+       VM_BUG_ON_FOLIO(folio_ref_count(folio), folio);
+       VM_BUG_ON_FOLIO(folio_mapcount(folio), folio);
 
-       hugetlb_set_page_subpool(page, NULL);
-       if (PageAnon(page))
-               __ClearPageAnonExclusive(page);
-       page->mapping = NULL;
-       restore_reserve = HPageRestoreReserve(page);
-       ClearHPageRestoreReserve(page);
+       hugetlb_set_folio_subpool(folio, NULL);
+       if (folio_test_anon(folio))
+               __ClearPageAnonExclusive(&folio->page);
+       folio->mapping = NULL;
+       restore_reserve = folio_test_hugetlb_restore_reserve(folio);
+       folio_clear_hugetlb_restore_reserve(folio);
 
        /*
         * If HPageRestoreReserve was set on page, page allocation consumed a
@@ -1740,7 +1741,7 @@ void free_huge_page(struct page *page)
        }
 
        spin_lock_irqsave(&hugetlb_lock, flags);
-       ClearHPageMigratable(page);
+       folio_clear_hugetlb_migratable(folio);
        hugetlb_cgroup_uncharge_page(hstate_index(h),
                                     pages_per_huge_page(h), page);
        hugetlb_cgroup_uncharge_page_rsvd(hstate_index(h),
@@ -1748,7 +1749,7 @@ void free_huge_page(struct page *page)
        if (restore_reserve)
                h->resv_huge_pages++;
 
-       if (HPageTemporary(page)) {
+       if (folio_test_hugetlb_temporary(folio)) {
                remove_hugetlb_page(h, page, false);
                spin_unlock_irqrestore(&hugetlb_lock, flags);
                update_and_free_page(h, page, true);