mm: convert ksm_might_need_to_copy() to work on folios
authorMatthew Wilcox (Oracle) <willy@infradead.org>
Mon, 11 Dec 2023 16:22:06 +0000 (16:22 +0000)
committerAndrew Morton <akpm@linux-foundation.org>
Fri, 29 Dec 2023 19:58:25 +0000 (11:58 -0800)
Patch series "Finish two folio conversions".

Most callers of page_add_new_anon_rmap() and
lru_cache_add_inactive_or_unevictable() have been converted to their folio
equivalents, but there are still a few stragglers.  There's a bit of
preparatory work in ksm and unuse_pte(), but after that it's pretty
mechanical.

This patch (of 9):

Accept a folio as an argument and return a folio result.  Removes a call
to compound_head() in do_swap_page(), and prevents folio & page from
getting out of sync in unuse_pte().

Reviewed-by: David Hildenbrand <david@redhat.com>
[willy@infradead.org: fix smatch warning]
Link: https://lkml.kernel.org/r/ZXnPtblC6A1IkyAB@casper.infradead.org
[david@redhat.com: only adjust the page if the folio changed]
Link: https://lkml.kernel.org/r/6a8f2110-fa91-4c10-9eae-88315309a6e3@redhat.com
Link: https://lkml.kernel.org/r/20231211162214.2146080-1-willy@infradead.org
Link: https://lkml.kernel.org/r/20231211162214.2146080-2-willy@infradead.org
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Signed-off-by: David Hildenbrand <david@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
include/linux/ksm.h
mm/ksm.c
mm/memory.c
mm/swapfile.c

index 4643d5244e77cf4faaba1f256053021965e31676..401348e9f92b4e3b7de61391393d0e08b64cdfb2 100644 (file)
@@ -76,7 +76,7 @@ static inline void ksm_exit(struct mm_struct *mm)
  * We'd like to make this conditional on vma->vm_flags & VM_MERGEABLE,
  * but what if the vma was unmerged while the page was swapped out?
  */
-struct page *ksm_might_need_to_copy(struct page *page,
+struct folio *ksm_might_need_to_copy(struct folio *folio,
                        struct vm_area_struct *vma, unsigned long addr);
 
 void rmap_walk_ksm(struct folio *folio, struct rmap_walk_control *rwc);
@@ -129,10 +129,10 @@ static inline int ksm_madvise(struct vm_area_struct *vma, unsigned long start,
        return 0;
 }
 
-static inline struct page *ksm_might_need_to_copy(struct page *page,
+static inline struct folio *ksm_might_need_to_copy(struct folio *folio,
                        struct vm_area_struct *vma, unsigned long addr)
 {
-       return page;
+       return folio;
 }
 
 static inline void rmap_walk_ksm(struct folio *folio,
index c0e1995fb4449a1803eb5228fbbf9a310497326b..e2ce850c27391d6dd48e159ae03940e5924ad0db 100644 (file)
--- a/mm/ksm.c
+++ b/mm/ksm.c
@@ -2873,30 +2873,30 @@ void __ksm_exit(struct mm_struct *mm)
        trace_ksm_exit(mm);
 }
 
-struct page *ksm_might_need_to_copy(struct page *page,
+struct folio *ksm_might_need_to_copy(struct folio *folio,
                        struct vm_area_struct *vma, unsigned long addr)
 {
-       struct folio *folio = page_folio(page);
+       struct page *page = folio_page(folio, 0);
        struct anon_vma *anon_vma = folio_anon_vma(folio);
        struct folio *new_folio;
 
        if (folio_test_large(folio))
-               return page;
+               return folio;
 
        if (folio_test_ksm(folio)) {
                if (folio_stable_node(folio) &&
                    !(ksm_run & KSM_RUN_UNMERGE))
-                       return page;    /* no need to copy it */
+                       return folio;   /* no need to copy it */
        } else if (!anon_vma) {
-               return page;            /* no need to copy it */
+               return folio;           /* no need to copy it */
        } else if (folio->index == linear_page_index(vma, addr) &&
                        anon_vma->root == vma->anon_vma->root) {
-               return page;            /* still no need to copy it */
+               return folio;           /* still no need to copy it */
        }
        if (PageHWPoison(page))
                return ERR_PTR(-EHWPOISON);
        if (!folio_test_uptodate(folio))
-               return page;            /* let do_swap_page report the error */
+               return folio;           /* let do_swap_page report the error */
 
        new_folio = vma_alloc_folio(GFP_HIGHUSER_MOVABLE, 0, vma, addr, false);
        if (new_folio &&
@@ -2905,9 +2905,10 @@ struct page *ksm_might_need_to_copy(struct page *page,
                new_folio = NULL;
        }
        if (new_folio) {
-               if (copy_mc_user_highpage(&new_folio->page, page, addr, vma)) {
+               if (copy_mc_user_highpage(folio_page(new_folio, 0), page,
+                                                               addr, vma)) {
                        folio_put(new_folio);
-                       memory_failure_queue(page_to_pfn(page), 0);
+                       memory_failure_queue(folio_pfn(folio), 0);
                        return ERR_PTR(-EHWPOISON);
                }
                folio_set_dirty(new_folio);
@@ -2918,7 +2919,7 @@ struct page *ksm_might_need_to_copy(struct page *page,
 #endif
        }
 
-       return new_folio ? &new_folio->page : NULL;
+       return new_folio;
 }
 
 void rmap_walk_ksm(struct folio *folio, struct rmap_walk_control *rwc)
index b9cc56a75f4b3ea415ec0c256a2f0c8fe4781e3d..7649cb9eb7f52dd6ad2dfb9a46e284c15da01808 100644 (file)
@@ -3942,15 +3942,18 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
                 * page->index of !PageKSM() pages would be nonlinear inside the
                 * anon VMA -- PageKSM() is lost on actual swapout.
                 */
-               page = ksm_might_need_to_copy(page, vma, vmf->address);
-               if (unlikely(!page)) {
+               folio = ksm_might_need_to_copy(folio, vma, vmf->address);
+               if (unlikely(!folio)) {
                        ret = VM_FAULT_OOM;
+                       folio = swapcache;
                        goto out_page;
-               } else if (unlikely(PTR_ERR(page) == -EHWPOISON)) {
+               } else if (unlikely(folio == ERR_PTR(-EHWPOISON))) {
                        ret = VM_FAULT_HWPOISON;
+                       folio = swapcache;
                        goto out_page;
                }
-               folio = page_folio(page);
+               if (folio != swapcache)
+                       page = folio_page(folio, 0);
 
                /*
                 * If we want to map a page that's in the swapcache writable, we
index 8be70912e298fc76a300f93d2ed4bb179e6921f5..0371b7b3cd2752a79300671ade2e3a81676dd533 100644 (file)
@@ -1749,11 +1749,13 @@ static int unuse_pte(struct vm_area_struct *vma, pmd_t *pmd,
        int ret = 1;
 
        swapcache = page;
-       page = ksm_might_need_to_copy(page, vma, addr);
-       if (unlikely(!page))
+       folio = ksm_might_need_to_copy(folio, vma, addr);
+       if (unlikely(!folio))
                return -ENOMEM;
-       else if (unlikely(PTR_ERR(page) == -EHWPOISON))
+       else if (unlikely(folio == ERR_PTR(-EHWPOISON)))
                hwpoisoned = true;
+       else
+               page = folio_file_page(folio, swp_offset(entry));
 
        pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
        if (unlikely(!pte || !pte_same_as_swp(ptep_get(pte),