mm: thp: batch-collapse PMD with set_ptes()
authorRyan Roberts <ryan.roberts@arm.com>
Thu, 15 Feb 2024 10:31:49 +0000 (10:31 +0000)
committerAndrew Morton <akpm@linux-foundation.org>
Thu, 22 Feb 2024 23:27:17 +0000 (15:27 -0800)
Refactor __split_huge_pmd_locked() so that a present PMD can be collapsed
to PTEs in a single batch using set_ptes().

This should improve performance a little bit, but the real motivation is
to remove the need for the arm64 backend to have to fold the contpte
entries.  Instead, since the ptes are set as a batch, the contpte blocks
can be initially set up pre-folded (once the arm64 contpte support is
added in the next few patches).  This leads to noticeable performance
improvement during split.

Link: https://lkml.kernel.org/r/20240215103205.2607016-3-ryan.roberts@arm.com
Signed-off-by: Ryan Roberts <ryan.roberts@arm.com>
Acked-by: David Hildenbrand <david@redhat.com>
Cc: Alistair Popple <apopple@nvidia.com>
Cc: Andrey Ryabinin <ryabinin.a.a@gmail.com>
Cc: Ard Biesheuvel <ardb@kernel.org>
Cc: Barry Song <21cnbao@gmail.com>
Cc: Borislav Petkov (AMD) <bp@alien8.de>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Dave Hansen <dave.hansen@linux.intel.com>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: James Morse <james.morse@arm.com>
Cc: John Hubbard <jhubbard@nvidia.com>
Cc: Kefeng Wang <wangkefeng.wang@huawei.com>
Cc: Marc Zyngier <maz@kernel.org>
Cc: Mark Rutland <mark.rutland@arm.com>
Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Will Deacon <will@kernel.org>
Cc: Yang Shi <shy828301@gmail.com>
Cc: Zi Yan <ziy@nvidia.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
mm/huge_memory.c

index 016e20bd813eaf1507c82b8ded2a572282eeecff..14888b15121e590238996067e77ddf0d2dac2a0b 100644 (file)
@@ -2579,15 +2579,16 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd,
 
        pte = pte_offset_map(&_pmd, haddr);
        VM_BUG_ON(!pte);
-       for (i = 0, addr = haddr; i < HPAGE_PMD_NR; i++, addr += PAGE_SIZE) {
-               pte_t entry;
-               /*
-                * Note that NUMA hinting access restrictions are not
-                * transferred to avoid any possibility of altering
-                * permissions across VMAs.
-                */
-               if (freeze || pmd_migration) {
+
+       /*
+        * Note that NUMA hinting access restrictions are not transferred to
+        * avoid any possibility of altering permissions across VMAs.
+        */
+       if (freeze || pmd_migration) {
+               for (i = 0, addr = haddr; i < HPAGE_PMD_NR; i++, addr += PAGE_SIZE) {
+                       pte_t entry;
                        swp_entry_t swp_entry;
+
                        if (write)
                                swp_entry = make_writable_migration_entry(
                                                        page_to_pfn(page + i));
@@ -2606,25 +2607,32 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd,
                                entry = pte_swp_mksoft_dirty(entry);
                        if (uffd_wp)
                                entry = pte_swp_mkuffd_wp(entry);
-               } else {
-                       entry = mk_pte(page + i, READ_ONCE(vma->vm_page_prot));
-                       if (write)
-                               entry = pte_mkwrite(entry, vma);
-                       if (!young)
-                               entry = pte_mkold(entry);
-                       /* NOTE: this may set soft-dirty too on some archs */
-                       if (dirty)
-                               entry = pte_mkdirty(entry);
-                       if (soft_dirty)
-                               entry = pte_mksoft_dirty(entry);
-                       if (uffd_wp)
-                               entry = pte_mkuffd_wp(entry);
+
+                       VM_WARN_ON(!pte_none(ptep_get(pte + i)));
+                       set_pte_at(mm, addr, pte + i, entry);
                }
-               VM_BUG_ON(!pte_none(ptep_get(pte)));
-               set_pte_at(mm, addr, pte, entry);
-               pte++;
+       } else {
+               pte_t entry;
+
+               entry = mk_pte(page, READ_ONCE(vma->vm_page_prot));
+               if (write)
+                       entry = pte_mkwrite(entry, vma);
+               if (!young)
+                       entry = pte_mkold(entry);
+               /* NOTE: this may set soft-dirty too on some archs */
+               if (dirty)
+                       entry = pte_mkdirty(entry);
+               if (soft_dirty)
+                       entry = pte_mksoft_dirty(entry);
+               if (uffd_wp)
+                       entry = pte_mkuffd_wp(entry);
+
+               for (i = 0; i < HPAGE_PMD_NR; i++)
+                       VM_WARN_ON(!pte_none(ptep_get(pte + i)));
+
+               set_ptes(mm, haddr, pte, entry, HPAGE_PMD_NR);
        }
-       pte_unmap(pte - 1);
+       pte_unmap(pte);
 
        if (!pmd_migration)
                folio_remove_rmap_pmd(folio, page, vma);