mm/hugetlb: convert putback_active_hugepage to take in a folio
authorSidhartha Kumar <sidhartha.kumar@oracle.com>
Wed, 25 Jan 2023 17:05:32 +0000 (09:05 -0800)
committerAndrew Morton <akpm@linux-foundation.org>
Mon, 13 Feb 2023 23:54:28 +0000 (15:54 -0800)
Convert putback_active_hugepage() to folio_putback_active_hugetlb(), this
removes one user of the Huge Page macros which take in a page.  The
callers in migrate.c are also cleaned up by being able to directly use the
src and dst folio variables.

Link: https://lkml.kernel.org/r/20230125170537.96973-4-sidhartha.kumar@oracle.com
Signed-off-by: Sidhartha Kumar <sidhartha.kumar@oracle.com>
Reviewed-by: Mike Kravetz <mike.kravetz@oracle.com>
Cc: Gerald Schaefer <gerald.schaefer@linux.ibm.com>
Cc: John Hubbard <jhubbard@nvidia.com>
Cc: Matthew Wilcox <willy@infradead.org>
Cc: Muchun Song <songmuchun@bytedance.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
include/linux/hugetlb.h
mm/hugetlb.c
mm/migrate.c

index 2375c62c61a46e7115abfd69aa1cce76ed72234c..067906c5778e7b6dd7920994f543034d6879c50c 100644 (file)
@@ -175,7 +175,7 @@ int isolate_hugetlb(struct folio *folio, struct list_head *list);
 int get_hwpoison_hugetlb_folio(struct folio *folio, bool *hugetlb, bool unpoison);
 int get_huge_page_for_hwpoison(unsigned long pfn, int flags,
                                bool *migratable_cleared);
-void putback_active_hugepage(struct page *page);
+void folio_putback_active_hugetlb(struct folio *folio);
 void move_hugetlb_state(struct folio *old_folio, struct folio *new_folio, int reason);
 void free_huge_page(struct page *page);
 void hugetlb_fix_reserve_counts(struct inode *inode);
@@ -429,7 +429,7 @@ static inline int get_huge_page_for_hwpoison(unsigned long pfn, int flags,
        return 0;
 }
 
-static inline void putback_active_hugepage(struct page *page)
+static inline void folio_putback_active_hugetlb(struct folio *folio)
 {
 }
 
index a0d486ed541181947fcef586c27e572f4bf2622e..fd1ce61b8f3f2988e252b4531e2d2c5749378ef3 100644 (file)
@@ -7300,13 +7300,13 @@ int get_huge_page_for_hwpoison(unsigned long pfn, int flags,
        return ret;
 }
 
-void putback_active_hugepage(struct page *page)
+void folio_putback_active_hugetlb(struct folio *folio)
 {
        spin_lock_irq(&hugetlb_lock);
-       SetHPageMigratable(page);
-       list_move_tail(&page->lru, &(page_hstate(page))->hugepage_activelist);
+       folio_set_hugetlb_migratable(folio);
+       list_move_tail(&folio->lru, &(folio_hstate(folio))->hugepage_activelist);
        spin_unlock_irq(&hugetlb_lock);
-       put_page(page);
+       folio_put(folio);
 }
 
 void move_hugetlb_state(struct folio *old_folio, struct folio *new_folio, int reason)
index 811e76c6fac1a8c04fd77bcff42bde714103e30b..c09872cf41b76e3388979a7c69d194a34885a84c 100644 (file)
@@ -151,7 +151,7 @@ void putback_movable_pages(struct list_head *l)
 
        list_for_each_entry_safe(page, page2, l, lru) {
                if (unlikely(PageHuge(page))) {
-                       putback_active_hugepage(page);
+                       folio_putback_active_hugetlb(page_folio(page));
                        continue;
                }
                list_del(&page->lru);
@@ -1298,7 +1298,7 @@ static int unmap_and_move_huge_page(new_page_t get_new_page,
 
        if (folio_ref_count(src) == 1) {
                /* page was freed from under us. So we are done. */
-               putback_active_hugepage(hpage);
+               folio_putback_active_hugetlb(src);
                return MIGRATEPAGE_SUCCESS;
        }
 
@@ -1383,7 +1383,7 @@ out_unlock:
        folio_unlock(src);
 out:
        if (rc == MIGRATEPAGE_SUCCESS)
-               putback_active_hugepage(hpage);
+               folio_putback_active_hugetlb(src);
        else if (rc != -EAGAIN)
                list_move_tail(&src->lru, ret);
 
@@ -1395,7 +1395,7 @@ out:
        if (put_new_page)
                put_new_page(new_hpage, private);
        else
-               putback_active_hugepage(new_hpage);
+               folio_putback_active_hugetlb(dst);
 
        return rc;
 }