mm: convert mm_counter() to take a folio
authorKefeng Wang <wangkefeng.wang@huawei.com>
Thu, 11 Jan 2024 15:24:28 +0000 (15:24 +0000)
committerAndrew Morton <akpm@linux-foundation.org>
Thu, 22 Feb 2024 00:00:03 +0000 (16:00 -0800)
Now all callers of mm_counter() have a folio, convert mm_counter() to take
a folio.  Saves a call to compound_head() hidden inside PageAnon().

Link: https://lkml.kernel.org/r/20240111152429.3374566-10-willy@infradead.org
Signed-off-by: Kefeng Wang <wangkefeng.wang@huawei.com>
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Cc: David Hildenbrand <david@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
arch/s390/mm/pgtable.c
include/linux/mm.h
mm/memory.c
mm/rmap.c
mm/userfaultfd.c

index 7e5dd4b176642c8fbb50adad8858f66420ffa024..b71432b15d665c7e496fa74fd182e06f7f215fbc 100644 (file)
@@ -723,7 +723,7 @@ static void ptep_zap_swap_entry(struct mm_struct *mm, swp_entry_t entry)
        else if (is_migration_entry(entry)) {
                struct folio *folio = pfn_swap_entry_folio(entry);
 
-               dec_mm_counter(mm, mm_counter(&folio->page));
+               dec_mm_counter(mm, mm_counter(folio));
        }
        free_swap_and_cache(entry);
 }
index f5a97dec51694894a979dd1d045a6b982622e09c..22e597b36b388718d469ca24cef6e699b1653b1b 100644 (file)
@@ -2603,11 +2603,11 @@ static inline int mm_counter_file(struct page *page)
        return MM_FILEPAGES;
 }
 
-static inline int mm_counter(struct page *page)
+static inline int mm_counter(struct folio *folio)
 {
-       if (PageAnon(page))
+       if (folio_test_anon(folio))
                return MM_ANONPAGES;
-       return mm_counter_file(page);
+       return mm_counter_file(&folio->page);
 }
 
 static inline unsigned long get_mm_rss(struct mm_struct *mm)
index 4bed82009ea7e0b1554e580e3bf0a015dcf472df..87ef9809984728f89e1bdd76313b8f35d759ca1b 100644 (file)
@@ -808,7 +808,7 @@ copy_nonpresent_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm,
        } else if (is_migration_entry(entry)) {
                folio = pfn_swap_entry_folio(entry);
 
-               rss[mm_counter(&folio->page)]++;
+               rss[mm_counter(folio)]++;
 
                if (!is_readable_migration_entry(entry) &&
                                is_cow_mapping(vm_flags)) {
@@ -840,7 +840,7 @@ copy_nonpresent_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm,
                 * keep things as they are.
                 */
                folio_get(folio);
-               rss[mm_counter(page)]++;
+               rss[mm_counter(folio)]++;
                /* Cannot fail as these pages cannot get pinned. */
                folio_try_dup_anon_rmap_pte(folio, page, src_vma);
 
@@ -1476,7 +1476,7 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb,
                                if (pte_young(ptent) && likely(vma_has_recency(vma)))
                                        folio_mark_accessed(folio);
                        }
-                       rss[mm_counter(page)]--;
+                       rss[mm_counter(folio)]--;
                        if (!delay_rmap) {
                                folio_remove_rmap_pte(folio, page, vma);
                                if (unlikely(page_mapcount(page) < 0))
@@ -1504,7 +1504,7 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb,
                         * see zap_install_uffd_wp_if_needed().
                         */
                        WARN_ON_ONCE(!vma_is_anonymous(vma));
-                       rss[mm_counter(page)]--;
+                       rss[mm_counter(folio)]--;
                        if (is_device_private_entry(entry))
                                folio_remove_rmap_pte(folio, page, vma);
                        folio_put(folio);
@@ -1519,7 +1519,7 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb,
                        folio = pfn_swap_entry_folio(entry);
                        if (!should_zap_folio(details, folio))
                                continue;
-                       rss[mm_counter(&folio->page)]--;
+                       rss[mm_counter(folio)]--;
                } else if (pte_marker_entry_uffd_wp(entry)) {
                        /*
                         * For anon: always drop the marker; for file: only
index f5d43edad529a76858a9aab5536a755f0a50ec67..4648cf1d8178b5b3e5e8edc66a4f2582dca0f1d9 100644 (file)
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -1780,7 +1780,7 @@ static bool try_to_unmap_one(struct folio *folio, struct vm_area_struct *vma,
                                set_huge_pte_at(mm, address, pvmw.pte, pteval,
                                                hsz);
                        } else {
-                               dec_mm_counter(mm, mm_counter(&folio->page));
+                               dec_mm_counter(mm, mm_counter(folio));
                                set_pte_at(mm, address, pvmw.pte, pteval);
                        }
 
@@ -1795,7 +1795,7 @@ static bool try_to_unmap_one(struct folio *folio, struct vm_area_struct *vma,
                         * migration) will not expect userfaults on already
                         * copied pages.
                         */
-                       dec_mm_counter(mm, mm_counter(&folio->page));
+                       dec_mm_counter(mm, mm_counter(folio));
                } else if (folio_test_anon(folio)) {
                        swp_entry_t entry = page_swap_entry(subpage);
                        pte_t swp_pte;
@@ -2181,7 +2181,7 @@ static bool try_to_migrate_one(struct folio *folio, struct vm_area_struct *vma,
                                set_huge_pte_at(mm, address, pvmw.pte, pteval,
                                                hsz);
                        } else {
-                               dec_mm_counter(mm, mm_counter(&folio->page));
+                               dec_mm_counter(mm, mm_counter(folio));
                                set_pte_at(mm, address, pvmw.pte, pteval);
                        }
 
@@ -2196,7 +2196,7 @@ static bool try_to_migrate_one(struct folio *folio, struct vm_area_struct *vma,
                         * migration) will not expect userfaults on already
                         * copied pages.
                         */
-                       dec_mm_counter(mm, mm_counter(&folio->page));
+                       dec_mm_counter(mm, mm_counter(folio));
                } else {
                        swp_entry_t entry;
                        pte_t swp_pte;
index 7cf7d43842590ccd99bf37795918a7054b61a8c4..ae80c37148290a15f70ae695af7974b24dd96f8c 100644 (file)
@@ -124,7 +124,7 @@ int mfill_atomic_install_pte(pmd_t *dst_pmd,
         * Must happen after rmap, as mm_counter() checks mapping (via
         * PageAnon()), which is set by __page_set_anon_rmap().
         */
-       inc_mm_counter(dst_mm, mm_counter(page));
+       inc_mm_counter(dst_mm, mm_counter(folio));
 
        set_pte_at(dst_mm, dst_addr, dst_pte, _dst_pte);