migrate: convert unmap_and_move_huge_page() to use folios
authorMatthew Wilcox (Oracle) <willy@infradead.org>
Fri, 2 Sep 2022 19:46:47 +0000 (20:46 +0100)
committerAndrew Morton <akpm@linux-foundation.org>
Mon, 3 Oct 2022 21:02:54 +0000 (14:02 -0700)
Saves several calls to compound_head() and removes a couple of uses of
page->lru.

Link: https://lkml.kernel.org/r/20220902194653.1739778-52-willy@infradead.org
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
mm/migrate.c

index 1ea149f14f849e835f5a1265755390023d472ed5..c1c2d9d9032b9db3d30eecc6d2da7e0253d58ddc 100644 (file)
@@ -1263,7 +1263,7 @@ static int unmap_and_move_huge_page(new_page_t get_new_page,
        if (!hugepage_migration_supported(page_hstate(hpage)))
                return -ENOSYS;
 
-       if (page_count(hpage) == 1) {
+       if (folio_ref_count(src) == 1) {
                /* page was freed from under us. So we are done. */
                putback_active_hugepage(hpage);
                return MIGRATEPAGE_SUCCESS;
@@ -1274,7 +1274,7 @@ static int unmap_and_move_huge_page(new_page_t get_new_page,
                return -ENOMEM;
        dst = page_folio(new_hpage);
 
-       if (!trylock_page(hpage)) {
+       if (!folio_trylock(src)) {
                if (!force)
                        goto out;
                switch (mode) {
@@ -1284,29 +1284,29 @@ static int unmap_and_move_huge_page(new_page_t get_new_page,
                default:
                        goto out;
                }
-               lock_page(hpage);
+               folio_lock(src);
        }
 
        /*
         * Check for pages which are in the process of being freed.  Without
-        * page_mapping() set, hugetlbfs specific move page routine will not
+        * folio_mapping() set, hugetlbfs specific move page routine will not
         * be called and we could leak usage counts for subpools.
         */
-       if (hugetlb_page_subpool(hpage) && !page_mapping(hpage)) {
+       if (hugetlb_page_subpool(hpage) && !folio_mapping(src)) {
                rc = -EBUSY;
                goto out_unlock;
        }
 
-       if (PageAnon(hpage))
-               anon_vma = page_get_anon_vma(hpage);
+       if (folio_test_anon(src))
+               anon_vma = page_get_anon_vma(&src->page);
 
-       if (unlikely(!trylock_page(new_hpage)))
+       if (unlikely(!folio_trylock(dst)))
                goto put_anon;
 
-       if (page_mapped(hpage)) {
+       if (folio_mapped(src)) {
                enum ttu_flags ttu = 0;
 
-               if (!PageAnon(hpage)) {
+               if (!folio_test_anon(src)) {
                        /*
                         * In shared mappings, try_to_unmap could potentially
                         * call huge_pmd_unshare.  Because of this, take
@@ -1327,7 +1327,7 @@ static int unmap_and_move_huge_page(new_page_t get_new_page,
                        i_mmap_unlock_write(mapping);
        }
 
-       if (!page_mapped(hpage))
+       if (!folio_mapped(src))
                rc = move_to_new_folio(dst, src, mode);
 
        if (page_was_mapped)
@@ -1335,7 +1335,7 @@ static int unmap_and_move_huge_page(new_page_t get_new_page,
                        rc == MIGRATEPAGE_SUCCESS ? dst : src, false);
 
 unlock_put_anon:
-       unlock_page(new_hpage);
+       folio_unlock(dst);
 
 put_anon:
        if (anon_vma)
@@ -1347,12 +1347,12 @@ put_anon:
        }
 
 out_unlock:
-       unlock_page(hpage);
+       folio_unlock(src);
 out:
        if (rc == MIGRATEPAGE_SUCCESS)
                putback_active_hugepage(hpage);
        else if (rc != -EAGAIN)
-               list_move_tail(&hpage->lru, ret);
+               list_move_tail(&src->lru, ret);
 
        /*
         * If migration was not successful and there's a freeing callback, use