mm/hugetlb: convert isolate_hugetlb to folios
authorSidhartha Kumar <sidhartha.kumar@oracle.com>
Fri, 13 Jan 2023 22:30:50 +0000 (16:30 -0600)
committerAndrew Morton <akpm@linux-foundation.org>
Mon, 13 Feb 2023 23:54:26 +0000 (15:54 -0800)
Patch series "continue hugetlb folio conversion", v3.

This series continues the conversion of core hugetlb functions to use
folios. This series converts many helper funtions in the hugetlb fault
path. This is in preparation for another series to convert the hugetlb
fault code paths to operate on folios.

This patch (of 8):

Convert isolate_hugetlb() to take in a folio and convert its callers to
pass a folio.  Use page_folio() to convert the callers to use a folio is
safe as isolate_hugetlb() operates on a head page.

Link: https://lkml.kernel.org/r/20230113223057.173292-1-sidhartha.kumar@oracle.com
Link: https://lkml.kernel.org/r/20230113223057.173292-2-sidhartha.kumar@oracle.com
Signed-off-by: Sidhartha Kumar <sidhartha.kumar@oracle.com>
Reviewed-by: Mike Kravetz <mike.kravetz@oracle.com>
Cc: John Hubbard <jhubbard@nvidia.com>
Cc: Matthew Wilcox <willy@infradead.org>
Cc: Mike Kravetz <mike.kravetz@oracle.com>
Cc: Muchun Song <songmuchun@bytedance.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
include/linux/hugetlb.h
mm/gup.c
mm/hugetlb.c
mm/memory-failure.c
mm/memory_hotplug.c
mm/mempolicy.c
mm/migrate.c

index a51e6daacac64359f8e65afb83d1ae3f3763c9a1..6e38a019f654b5218506e0b2b85ccc4ac6883a77 100644 (file)
@@ -171,7 +171,7 @@ bool hugetlb_reserve_pages(struct inode *inode, long from, long to,
                                                vm_flags_t vm_flags);
 long hugetlb_unreserve_pages(struct inode *inode, long start, long end,
                                                long freed);
-int isolate_hugetlb(struct page *page, struct list_head *list);
+int isolate_hugetlb(struct folio *folio, struct list_head *list);
 int get_hwpoison_hugetlb_folio(struct folio *folio, bool *hugetlb, bool unpoison);
 int get_huge_page_for_hwpoison(unsigned long pfn, int flags,
                                bool *migratable_cleared);
@@ -413,7 +413,7 @@ static inline pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr,
        return NULL;
 }
 
-static inline int isolate_hugetlb(struct page *page, struct list_head *list)
+static inline int isolate_hugetlb(struct folio *folio, struct list_head *list)
 {
        return -EBUSY;
 }
index 25e4a3d923d6e92c736057531b0eabd3f5fc6840..b0885f70579c5ac63a34d47c83ec3687e4f47345 100644 (file)
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -1930,7 +1930,7 @@ static unsigned long collect_longterm_unpinnable_pages(
                        continue;
 
                if (folio_test_hugetlb(folio)) {
-                       isolate_hugetlb(&folio->page, movable_page_list);
+                       isolate_hugetlb(folio, movable_page_list);
                        continue;
                }
 
index ab35b1cc99277dfba2a0a38fabb795a39025d10e..0c1e1ce113c84ca98318a8c8c79d004b37552d0a 100644 (file)
@@ -2925,7 +2925,7 @@ retry:
                 * Fail with -EBUSY if not possible.
                 */
                spin_unlock_irq(&hugetlb_lock);
-               ret = isolate_hugetlb(&old_folio->page, list);
+               ret = isolate_hugetlb(old_folio, list);
                spin_lock_irq(&hugetlb_lock);
                goto free_new;
        } else if (!folio_test_hugetlb_freed(old_folio)) {
@@ -3000,7 +3000,7 @@ int isolate_or_dissolve_huge_page(struct page *page, struct list_head *list)
        if (hstate_is_gigantic(h))
                return -ENOMEM;
 
-       if (folio_ref_count(folio) && !isolate_hugetlb(&folio->page, list))
+       if (folio_ref_count(folio) && !isolate_hugetlb(folio, list))
                ret = 0;
        else if (!folio_ref_count(folio))
                ret = alloc_and_dissolve_hugetlb_folio(h, folio, list);
@@ -7250,19 +7250,19 @@ __weak unsigned long hugetlb_mask_last_page(struct hstate *h)
  * These functions are overwritable if your architecture needs its own
  * behavior.
  */
-int isolate_hugetlb(struct page *page, struct list_head *list)
+int isolate_hugetlb(struct folio *folio, struct list_head *list)
 {
        int ret = 0;
 
        spin_lock_irq(&hugetlb_lock);
-       if (!PageHeadHuge(page) ||
-           !HPageMigratable(page) ||
-           !get_page_unless_zero(page)) {
+       if (!folio_test_hugetlb(folio) ||
+           !folio_test_hugetlb_migratable(folio) ||
+           !folio_try_get(folio)) {
                ret = -EBUSY;
                goto unlock;
        }
-       ClearHPageMigratable(page);
-       list_move_tail(&page->lru, list);
+       folio_clear_hugetlb_migratable(folio);
+       list_move_tail(&folio->lru, list);
 unlock:
        spin_unlock_irq(&hugetlb_lock);
        return ret;
index b4b30d9b078272d0a80bc44613fc21d73c1e2a79..db85c2d37f70ac95eaae9dce0e492e5bc650b746 100644 (file)
@@ -2508,7 +2508,7 @@ static bool isolate_page(struct page *page, struct list_head *pagelist)
        bool isolated = false;
 
        if (PageHuge(page)) {
-               isolated = !isolate_hugetlb(page, pagelist);
+               isolated = !isolate_hugetlb(page_folio(page), pagelist);
        } else {
                bool lru = !__PageMovable(page);
 
index fd40f7e9f17635135b0c6bc8046931772f888ec0..a1e8c3e9ab080893780661366899240f713adc83 100644 (file)
@@ -1641,7 +1641,7 @@ do_migrate_range(unsigned long start_pfn, unsigned long end_pfn)
 
                if (PageHuge(page)) {
                        pfn = page_to_pfn(head) + compound_nr(head) - 1;
-                       isolate_hugetlb(head, &source);
+                       isolate_hugetlb(folio, &source);
                        continue;
                } else if (PageTransHuge(page))
                        pfn = page_to_pfn(head) + thp_nr_pages(page) - 1;
index dd5ca942256f30c0e4d8a6c10766ce6dfea8bd55..fc034b07064584a82540e620279dad94b5d6db80 100644 (file)
@@ -602,7 +602,7 @@ static int queue_pages_hugetlb(pte_t *pte, unsigned long hmask,
        if (flags & (MPOL_MF_MOVE_ALL) ||
            (flags & MPOL_MF_MOVE && page_mapcount(page) == 1 &&
             !hugetlb_pmd_shared(pte))) {
-               if (isolate_hugetlb(page, qp->pagelist) &&
+               if (isolate_hugetlb(page_folio(page), qp->pagelist) &&
                        (flags & MPOL_MF_STRICT))
                        /*
                         * Failed to isolate page but allow migrating pages
index 206fcdbe67f3d2b7b1b42ec1ceec136851f7e88b..f6464bce76780329ae7c7e9809fd809a4e484b96 100644 (file)
@@ -1773,7 +1773,7 @@ static int add_page_for_migration(struct mm_struct *mm, unsigned long addr,
 
        if (PageHuge(page)) {
                if (PageHead(page)) {
-                       err = isolate_hugetlb(page, pagelist);
+                       err = isolate_hugetlb(page_folio(page), pagelist);
                        if (!err)
                                err = 1;
                }