mm: ksm: use more folio api in ksm_might_need_to_copy()
authorKefeng Wang <wangkefeng.wang@huawei.com>
Sat, 18 Nov 2023 02:32:28 +0000 (10:32 +0800)
committerAndrew Morton <akpm@linux-foundation.org>
Tue, 12 Dec 2023 18:57:05 +0000 (10:57 -0800)
Patch series "mm: cleanup and use more folio in page fault", v3.

Rename page_copy_prealloc() to folio_prealloc(), which is used by more
functions, also do more folio conversion in page fault.

This patch (of 5):

Since ksm only support normal page, no swapout/in for ksm large folio too,
add large folio check in ksm_might_need_to_copy(), also convert
page->index to folio->index as page->index is going away.

Then convert ksm_might_need_to_copy() to use more folio api to save nine
compound_head() calls, short 'address' to reduce max-line-length.

Link: https://lkml.kernel.org/r/20231118023232.1409103-1-wangkefeng.wang@huawei.com
Link: https://lkml.kernel.org/r/20231118023232.1409103-2-wangkefeng.wang@huawei.com
Signed-off-by: Kefeng Wang <wangkefeng.wang@huawei.com>
Cc: David Hildenbrand <david@redhat.com>
Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
Cc: Sidhartha Kumar <sidhartha.kumar@oracle.com>
Cc: Vishal Moola (Oracle) <vishal.moola@gmail.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
include/linux/ksm.h
mm/ksm.c

index c2dd786a30e1f7c39bd52b8ae4aa78625092eaf1..4643d5244e77cf4faaba1f256053021965e31676 100644 (file)
@@ -77,7 +77,7 @@ static inline void ksm_exit(struct mm_struct *mm)
  * but what if the vma was unmerged while the page was swapped out?
  */
 struct page *ksm_might_need_to_copy(struct page *page,
-                       struct vm_area_struct *vma, unsigned long address);
+                       struct vm_area_struct *vma, unsigned long addr);
 
 void rmap_walk_ksm(struct folio *folio, struct rmap_walk_control *rwc);
 void folio_migrate_ksm(struct folio *newfolio, struct folio *folio);
@@ -130,7 +130,7 @@ static inline int ksm_madvise(struct vm_area_struct *vma, unsigned long start,
 }
 
 static inline struct page *ksm_might_need_to_copy(struct page *page,
-                       struct vm_area_struct *vma, unsigned long address)
+                       struct vm_area_struct *vma, unsigned long addr)
 {
        return page;
 }
index 5d60d5385de67a38da131a585d0e765bb9af5912..b93389a3780e04bb59e58da985984b1052cc9d54 100644 (file)
--- a/mm/ksm.c
+++ b/mm/ksm.c
@@ -2876,48 +2876,51 @@ void __ksm_exit(struct mm_struct *mm)
 }
 
 struct page *ksm_might_need_to_copy(struct page *page,
-                       struct vm_area_struct *vma, unsigned long address)
+                       struct vm_area_struct *vma, unsigned long addr)
 {
        struct folio *folio = page_folio(page);
        struct anon_vma *anon_vma = folio_anon_vma(folio);
-       struct page *new_page;
+       struct folio *new_folio;
 
-       if (PageKsm(page)) {
-               if (page_stable_node(page) &&
+       if (folio_test_large(folio))
+               return page;
+
+       if (folio_test_ksm(folio)) {
+               if (folio_stable_node(folio) &&
                    !(ksm_run & KSM_RUN_UNMERGE))
                        return page;    /* no need to copy it */
        } else if (!anon_vma) {
                return page;            /* no need to copy it */
-       } else if (page->index == linear_page_index(vma, address) &&
+       } else if (folio->index == linear_page_index(vma, addr) &&
                        anon_vma->root == vma->anon_vma->root) {
                return page;            /* still no need to copy it */
        }
        if (PageHWPoison(page))
                return ERR_PTR(-EHWPOISON);
-       if (!PageUptodate(page))
+       if (!folio_test_uptodate(folio))
                return page;            /* let do_swap_page report the error */
 
-       new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, address);
-       if (new_page &&
-           mem_cgroup_charge(page_folio(new_page), vma->vm_mm, GFP_KERNEL)) {
-               put_page(new_page);
-               new_page = NULL;
+       new_folio = vma_alloc_folio(GFP_HIGHUSER_MOVABLE, 0, vma, addr, false);
+       if (new_folio &&
+           mem_cgroup_charge(new_folio, vma->vm_mm, GFP_KERNEL)) {
+               folio_put(new_folio);
+               new_folio = NULL;
        }
-       if (new_page) {
-               if (copy_mc_user_highpage(new_page, page, address, vma)) {
-                       put_page(new_page);
+       if (new_folio) {
+               if (copy_mc_user_highpage(&new_folio->page, page, addr, vma)) {
+                       folio_put(new_folio);
                        memory_failure_queue(page_to_pfn(page), 0);
                        return ERR_PTR(-EHWPOISON);
                }
-               SetPageDirty(new_page);
-               __SetPageUptodate(new_page);
-               __SetPageLocked(new_page);
+               folio_set_dirty(new_folio);
+               __folio_mark_uptodate(new_folio);
+               __folio_set_locked(new_folio);
 #ifdef CONFIG_SWAP
                count_vm_event(KSM_SWPIN_COPY);
 #endif
        }
 
-       return new_page;
+       return new_folio ? &new_folio->page : NULL;
 }
 
 void rmap_walk_ksm(struct folio *folio, struct rmap_walk_control *rwc)