mm: convert unuse_pte() to use a folio throughout
authorMatthew Wilcox (Oracle) <willy@infradead.org>
Mon, 11 Dec 2023 16:22:08 +0000 (16:22 +0000)
committerAndrew Morton <akpm@linux-foundation.org>
Fri, 29 Dec 2023 19:58:25 +0000 (11:58 -0800)
Saves about eight calls to compound_head().

Link: https://lkml.kernel.org/r/20231211162214.2146080-4-willy@infradead.org
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
mm/swapfile.c

index 85f9c355cb996201aa89b8099aed82bb8d4cf574..1501bc9564566faefe87cedd6d27df5ef6c26582 100644 (file)
@@ -1741,21 +1741,25 @@ static inline int pte_same_as_swp(pte_t pte, pte_t swp_pte)
 static int unuse_pte(struct vm_area_struct *vma, pmd_t *pmd,
                unsigned long addr, swp_entry_t entry, struct folio *folio)
 {
-       struct page *page = folio_file_page(folio, swp_offset(entry));
-       struct page *swapcache;
+       struct page *page;
+       struct folio *swapcache;
        spinlock_t *ptl;
        pte_t *pte, new_pte, old_pte;
-       bool hwpoisoned = PageHWPoison(page);
+       bool hwpoisoned = false;
        int ret = 1;
 
-       swapcache = page;
+       swapcache = folio;
        folio = ksm_might_need_to_copy(folio, vma, addr);
        if (unlikely(!folio))
                return -ENOMEM;
-       else if (unlikely(folio == ERR_PTR(-EHWPOISON)))
+       else if (unlikely(folio == ERR_PTR(-EHWPOISON))) {
+               hwpoisoned = true;
+               folio = swapcache;
+       }
+
+       page = folio_file_page(folio, swp_offset(entry));
+       if (PageHWPoison(page))
                hwpoisoned = true;
-       else
-               page = folio_file_page(folio, swp_offset(entry));
 
        pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
        if (unlikely(!pte || !pte_same_as_swp(ptep_get(pte),
@@ -1766,13 +1770,12 @@ static int unuse_pte(struct vm_area_struct *vma, pmd_t *pmd,
 
        old_pte = ptep_get(pte);
 
-       if (unlikely(hwpoisoned || !PageUptodate(page))) {
+       if (unlikely(hwpoisoned || !folio_test_uptodate(folio))) {
                swp_entry_t swp_entry;
 
                dec_mm_counter(vma->vm_mm, MM_SWAPENTS);
                if (hwpoisoned) {
-                       swp_entry = make_hwpoison_entry(swapcache);
-                       page = swapcache;
+                       swp_entry = make_hwpoison_entry(page);
                } else {
                        swp_entry = make_poisoned_swp_entry();
                }
@@ -1786,27 +1789,27 @@ static int unuse_pte(struct vm_area_struct *vma, pmd_t *pmd,
         * when reading from swap. This metadata may be indexed by swap entry
         * so this must be called before swap_free().
         */
-       arch_swap_restore(entry, page_folio(page));
+       arch_swap_restore(entry, folio);
 
        dec_mm_counter(vma->vm_mm, MM_SWAPENTS);
        inc_mm_counter(vma->vm_mm, MM_ANONPAGES);
-       get_page(page);
-       if (page == swapcache) {
+       folio_get(folio);
+       if (folio == swapcache) {
                rmap_t rmap_flags = RMAP_NONE;
 
                /*
-                * See do_swap_page(): PageWriteback() would be problematic.
-                * However, we do a wait_on_page_writeback() just before this
-                * call and have the page locked.
+                * See do_swap_page(): writeback would be problematic.
+                * However, we do a folio_wait_writeback() just before this
+                * call and have the folio locked.
                 */
-               VM_BUG_ON_PAGE(PageWriteback(page), page);
+               VM_BUG_ON_FOLIO(folio_test_writeback(folio), folio);
                if (pte_swp_exclusive(old_pte))
                        rmap_flags |= RMAP_EXCLUSIVE;
 
                page_add_anon_rmap(page, vma, addr, rmap_flags);
        } else { /* ksm created a completely new copy */
-               page_add_new_anon_rmap(page, vma, addr);
-               lru_cache_add_inactive_or_unevictable(page, vma);
+               folio_add_new_anon_rmap(folio, vma, addr);
+               folio_add_lru_vma(folio, vma);
        }
        new_pte = pte_mkold(mk_pte(page, vma->vm_page_prot));
        if (pte_swp_soft_dirty(old_pte))
@@ -1819,9 +1822,9 @@ setpte:
 out:
        if (pte)
                pte_unmap_unlock(pte, ptl);
-       if (page != swapcache) {
-               unlock_page(page);
-               put_page(page);
+       if (folio != swapcache) {
+               folio_unlock(folio);
+               folio_put(folio);
        }
        return ret;
 }