mm/ksm: page_add_anon_rmap() -> folio_add_anon_rmap_pte()
authorDavid Hildenbrand <david@redhat.com>
Wed, 20 Dec 2023 22:44:42 +0000 (23:44 +0100)
committerAndrew Morton <akpm@linux-foundation.org>
Fri, 29 Dec 2023 19:58:51 +0000 (11:58 -0800)
Let's convert replace_page().  While at it, perform some folio conversion.

Link: https://lkml.kernel.org/r/20231220224504.646757-19-david@redhat.com
Signed-off-by: David Hildenbrand <david@redhat.com>
Cc: Hugh Dickins <hughd@google.com>
Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
Cc: Muchun Song <muchun.song@linux.dev>
Cc: Muchun Song <songmuchun@bytedance.com>
Cc: Peter Xu <peterx@redhat.com>
Cc: Ryan Roberts <ryan.roberts@arm.com>
Cc: Yin Fengwei <fengwei.yin@intel.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
mm/ksm.c

index 8fa6053a225d950db513058aba37df080e973e0b..146aa75fa6ff767907dd33e848896d115fe485c5 100644 (file)
--- a/mm/ksm.c
+++ b/mm/ksm.c
@@ -1369,6 +1369,7 @@ out:
 static int replace_page(struct vm_area_struct *vma, struct page *page,
                        struct page *kpage, pte_t orig_pte)
 {
+       struct folio *kfolio = page_folio(kpage);
        struct mm_struct *mm = vma->vm_mm;
        struct folio *folio;
        pmd_t *pmd;
@@ -1408,15 +1409,16 @@ static int replace_page(struct vm_area_struct *vma, struct page *page,
                goto out_mn;
        }
        VM_BUG_ON_PAGE(PageAnonExclusive(page), page);
-       VM_BUG_ON_PAGE(PageAnon(kpage) && PageAnonExclusive(kpage), kpage);
+       VM_BUG_ON_FOLIO(folio_test_anon(kfolio) && PageAnonExclusive(kpage),
+                       kfolio);
 
        /*
         * No need to check ksm_use_zero_pages here: we can only have a
         * zero_page here if ksm_use_zero_pages was enabled already.
         */
        if (!is_zero_pfn(page_to_pfn(kpage))) {
-               get_page(kpage);
-               page_add_anon_rmap(kpage, vma, addr, RMAP_NONE);
+               folio_get(kfolio);
+               folio_add_anon_rmap_pte(kfolio, kpage, vma, addr, RMAP_NONE);
                newpte = mk_pte(kpage, vma->vm_page_prot);
        } else {
                /*