mm/memory: page_try_dup_anon_rmap() -> folio_try_dup_anon_rmap_pte()
authorDavid Hildenbrand <david@redhat.com>
Wed, 20 Dec 2023 22:45:00 +0000 (23:45 +0100)
committerAndrew Morton <akpm@linux-foundation.org>
Fri, 29 Dec 2023 19:58:56 +0000 (11:58 -0800)
Let's convert copy_nonpresent_pte().  While at it, perform some more folio
conversion.

Link: https://lkml.kernel.org/r/20231220224504.646757-37-david@redhat.com
Signed-off-by: David Hildenbrand <david@redhat.com>
Cc: Hugh Dickins <hughd@google.com>
Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
Cc: Muchun Song <muchun.song@linux.dev>
Cc: Muchun Song <songmuchun@bytedance.com>
Cc: Peter Xu <peterx@redhat.com>
Cc: Ryan Roberts <ryan.roberts@arm.com>
Cc: Yin Fengwei <fengwei.yin@intel.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
mm/memory.c

index fdc87bf1554511d03c2765bfda8b263baebc1d30..d66559cd55db7cdb16f2f1393fde67dc1bed63d8 100644 (file)
@@ -785,6 +785,7 @@ copy_nonpresent_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm,
        unsigned long vm_flags = dst_vma->vm_flags;
        pte_t orig_pte = ptep_get(src_pte);
        pte_t pte = orig_pte;
+       struct folio *folio;
        struct page *page;
        swp_entry_t entry = pte_to_swp_entry(orig_pte);
 
@@ -829,6 +830,7 @@ copy_nonpresent_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm,
                }
        } else if (is_device_private_entry(entry)) {
                page = pfn_swap_entry_to_page(entry);
+               folio = page_folio(page);
 
                /*
                 * Update rss count even for unaddressable pages, as
@@ -839,10 +841,10 @@ copy_nonpresent_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm,
                 * for unaddressable pages, at some point. But for now
                 * keep things as they are.
                 */
-               get_page(page);
+               folio_get(folio);
                rss[mm_counter(page)]++;
                /* Cannot fail as these pages cannot get pinned. */
-               BUG_ON(page_try_dup_anon_rmap(page, false, src_vma));
+               folio_try_dup_anon_rmap_pte(folio, page, src_vma);
 
                /*
                 * We do not preserve soft-dirty information, because so
@@ -956,7 +958,7 @@ copy_present_pte(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma,
                 * future.
                 */
                folio_get(folio);
-               if (unlikely(page_try_dup_anon_rmap(page, false, src_vma))) {
+               if (unlikely(folio_try_dup_anon_rmap_pte(folio, page, src_vma))) {
                        /* Page may be pinned, we have to copy. */
                        folio_put(folio);
                        return copy_present_page(dst_vma, src_vma, dst_pte, src_pte,