mm/migrate_device: page_remove_rmap() -> folio_remove_rmap_pte()
authorDavid Hildenbrand <david@redhat.com>
Wed, 20 Dec 2023 22:44:53 +0000 (23:44 +0100)
committerAndrew Morton <akpm@linux-foundation.org>
Fri, 29 Dec 2023 19:58:54 +0000 (11:58 -0800)
Let's convert migrate_vma_collect_pmd().  While at it, perform more folio
conversion.

Link: https://lkml.kernel.org/r/20231220224504.646757-30-david@redhat.com
Signed-off-by: David Hildenbrand <david@redhat.com>
Cc: Hugh Dickins <hughd@google.com>
Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
Cc: Muchun Song <muchun.song@linux.dev>
Cc: Muchun Song <songmuchun@bytedance.com>
Cc: Peter Xu <peterx@redhat.com>
Cc: Ryan Roberts <ryan.roberts@arm.com>
Cc: Yin Fengwei <fengwei.yin@intel.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
mm/migrate_device.c

index 81193363f8cd597cf9854d7c3fbc7d108a4579d5..39b7754480c6790158381888c140229844cf1f57 100644 (file)
@@ -107,6 +107,7 @@ again:
 
        for (; addr < end; addr += PAGE_SIZE, ptep++) {
                unsigned long mpfn = 0, pfn;
+               struct folio *folio;
                struct page *page;
                swp_entry_t entry;
                pte_t pte;
@@ -168,41 +169,43 @@ again:
                }
 
                /*
-                * By getting a reference on the page we pin it and that blocks
+                * By getting a reference on the folio we pin it and that blocks
                 * any kind of migration. Side effect is that it "freezes" the
                 * pte.
                 *
-                * We drop this reference after isolating the page from the lru
-                * for non device page (device page are not on the lru and thus
+                * We drop this reference after isolating the folio from the lru
+                * for non device folio (device folio are not on the lru and thus
                 * can't be dropped from it).
                 */
-               get_page(page);
+               folio = page_folio(page);
+               folio_get(folio);
 
                /*
-                * We rely on trylock_page() to avoid deadlock between
+                * We rely on folio_trylock() to avoid deadlock between
                 * concurrent migrations where each is waiting on the others
-                * page lock. If we can't immediately lock the page we fail this
+                * folio lock. If we can't immediately lock the folio we fail this
                 * migration as it is only best effort anyway.
                 *
-                * If we can lock the page it's safe to set up a migration entry
-                * now. In the common case where the page is mapped once in a
+                * If we can lock the folio it's safe to set up a migration entry
+                * now. In the common case where the folio is mapped once in a
                 * single process setting up the migration entry now is an
                 * optimisation to avoid walking the rmap later with
                 * try_to_migrate().
                 */
-               if (trylock_page(page)) {
+               if (folio_trylock(folio)) {
                        bool anon_exclusive;
                        pte_t swp_pte;
 
                        flush_cache_page(vma, addr, pte_pfn(pte));
-                       anon_exclusive = PageAnon(page) && PageAnonExclusive(page);
+                       anon_exclusive = folio_test_anon(folio) &&
+                                         PageAnonExclusive(page);
                        if (anon_exclusive) {
                                pte = ptep_clear_flush(vma, addr, ptep);
 
                                if (page_try_share_anon_rmap(page)) {
                                        set_pte_at(mm, addr, ptep, pte);
-                                       unlock_page(page);
-                                       put_page(page);
+                                       folio_unlock(folio);
+                                       folio_put(folio);
                                        mpfn = 0;
                                        goto next;
                                }
@@ -214,7 +217,7 @@ again:
 
                        /* Set the dirty flag on the folio now the pte is gone. */
                        if (pte_dirty(pte))
-                               folio_mark_dirty(page_folio(page));
+                               folio_mark_dirty(folio);
 
                        /* Setup special migration page table entry */
                        if (mpfn & MIGRATE_PFN_WRITE)
@@ -248,16 +251,16 @@ again:
 
                        /*
                         * This is like regular unmap: we remove the rmap and
-                        * drop page refcount. Page won't be freed, as we took
-                        * a reference just above.
+                        * drop the folio refcount. The folio won't be freed, as
+                        * we took a reference just above.
                         */
-                       page_remove_rmap(page, vma, false);
-                       put_page(page);
+                       folio_remove_rmap_pte(folio, page, vma);
+                       folio_put(folio);
 
                        if (pte_present(pte))
                                unmapped++;
                } else {
-                       put_page(page);
+                       folio_put(folio);
                        mpfn = 0;
                }