ksm: add ksm zero pages for each process
authorxu xin <xu.xin16@zte.com.cn>
Tue, 13 Jun 2023 03:09:38 +0000 (11:09 +0800)
committerAndrew Morton <akpm@linux-foundation.org>
Fri, 18 Aug 2023 17:12:10 +0000 (10:12 -0700)
As the number of ksm zero pages is not included in ksm_merging_pages per
process when enabling use_zero_pages, it's unclear of how many actual
pages are merged by KSM. To let users accurately estimate their memory
demands when unsharing KSM zero-pages, it's necessary to show KSM zero-
pages per process. In addition, it help users to know the actual KSM
profit because KSM-placed zero pages are also benefit from KSM.

since unsharing zero pages placed by KSM accurately is achieved, then
tracking empty pages merging and unmerging is not a difficult thing any
longer.

Since we already have /proc/<pid>/ksm_stat, just add the information of
'ksm_zero_pages' in it.

Link: https://lkml.kernel.org/r/20230613030938.185993-1-yang.yang29@zte.com.cn
Signed-off-by: xu xin <xu.xin16@zte.com.cn>
Acked-by: David Hildenbrand <david@redhat.com>
Reviewed-by: Xiaokai Ran <ran.xiaokai@zte.com.cn>
Reviewed-by: Yang Yang <yang.yang29@zte.com.cn>
Cc: Claudio Imbrenda <imbrenda@linux.ibm.com>
Cc: Xuexin Jiang <jiang.xuexin@zte.com.cn>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
fs/proc/base.c
include/linux/ksm.h
include/linux/mm_types.h
mm/khugepaged.c
mm/ksm.c
mm/memory.c

index 05452c3b9872bd00aee5c739d5e865c46d892a2a..eb2e498e3b8de1cf569f75a411cd60d1f8d1cedf 100644 (file)
@@ -3207,6 +3207,7 @@ static int proc_pid_ksm_stat(struct seq_file *m, struct pid_namespace *ns,
        mm = get_task_mm(task);
        if (mm) {
                seq_printf(m, "ksm_rmap_items %lu\n", mm->ksm_rmap_items);
+               seq_printf(m, "ksm_zero_pages %lu\n", mm->ksm_zero_pages);
                seq_printf(m, "ksm_merging_pages %lu\n", mm->ksm_merging_pages);
                seq_printf(m, "ksm_process_profit %ld\n", ksm_process_profit(mm));
                mmput(mm);
index e80aa49009b262411ad197c5db05b1739da60639..c2dd786a30e1f7c39bd52b8ae4aa78625092eaf1 100644 (file)
@@ -35,10 +35,12 @@ void __ksm_exit(struct mm_struct *mm);
 
 extern unsigned long ksm_zero_pages;
 
-static inline void ksm_might_unmap_zero_page(pte_t pte)
+static inline void ksm_might_unmap_zero_page(struct mm_struct *mm, pte_t pte)
 {
-       if (is_ksm_zero_pte(pte))
+       if (is_ksm_zero_pte(pte)) {
                ksm_zero_pages--;
+               mm->ksm_zero_pages--;
+       }
 }
 
 static inline int ksm_fork(struct mm_struct *mm, struct mm_struct *oldmm)
@@ -109,7 +111,7 @@ static inline void ksm_exit(struct mm_struct *mm)
 {
 }
 
-static inline void ksm_might_unmap_zero_page(pte_t pte)
+static inline void ksm_might_unmap_zero_page(struct mm_struct *mm, pte_t pte)
 {
 }
 
index 5e74ce4a28cd65eb90efa8142f1744fd92dd97a0..51d04c1847c11362a67d967ce893de17439d10e5 100644 (file)
@@ -812,7 +812,7 @@ struct mm_struct {
 #ifdef CONFIG_KSM
                /*
                 * Represent how many pages of this process are involved in KSM
-                * merging.
+                * merging (not including ksm_zero_pages).
                 */
                unsigned long ksm_merging_pages;
                /*
@@ -820,7 +820,12 @@ struct mm_struct {
                 * including merged and not merged.
                 */
                unsigned long ksm_rmap_items;
-#endif
+               /*
+                * Represent how many empty pages are merged with kernel zero
+                * pages when enabling KSM use_zero_pages.
+                */
+               unsigned long ksm_zero_pages;
+#endif /* CONFIG_KSM */
 #ifdef CONFIG_LRU_GEN
                struct {
                        /* this mm_struct is on lru_gen_mm_list */
index 419981dcc889db5c7583fbb2421d0f37d9c5d605..4b8b8673d5d9ffb15e5e08726e43de2f9c61b3e6 100644 (file)
@@ -710,7 +710,7 @@ static void __collapse_huge_page_copy_succeeded(pte_t *pte,
                                spin_lock(ptl);
                                ptep_clear(vma->vm_mm, address, _pte);
                                spin_unlock(ptl);
-                               ksm_might_unmap_zero_page(pteval);
+                               ksm_might_unmap_zero_page(vma->vm_mm, pteval);
                        }
                } else {
                        src_page = pte_page(pteval);
index e037d9aad691a0700a28e23296f9c8d831a97fe5..e1772081e8cbfa75c7011a60602cacdce820e7a0 100644 (file)
--- a/mm/ksm.c
+++ b/mm/ksm.c
@@ -1233,6 +1233,7 @@ static int replace_page(struct vm_area_struct *vma, struct page *page,
                 */
                newpte = pte_mkdirty(pte_mkspecial(pfn_pte(page_to_pfn(kpage), vma->vm_page_prot)));
                ksm_zero_pages++;
+               mm->ksm_zero_pages++;
                /*
                 * We're replacing an anonymous page with a zero page, which is
                 * not anonymous. We need to do proper accounting otherwise we
index c256da05bb5e41ae07597bfeda487c225aad2449..5f863b1a0edc64dcba23edfd8ac695c2b8b6a87b 100644 (file)
@@ -1434,7 +1434,7 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb,
                        zap_install_uffd_wp_if_needed(vma, addr, pte, details,
                                                      ptent);
                        if (unlikely(!page)) {
-                               ksm_might_unmap_zero_page(ptent);
+                               ksm_might_unmap_zero_page(mm, ptent);
                                continue;
                        }
 
@@ -3130,7 +3130,7 @@ static vm_fault_t wp_page_copy(struct vm_fault *vmf)
                                inc_mm_counter(mm, MM_ANONPAGES);
                        }
                } else {
-                       ksm_might_unmap_zero_page(vmf->orig_pte);
+                       ksm_might_unmap_zero_page(mm, vmf->orig_pte);
                        inc_mm_counter(mm, MM_ANONPAGES);
                }
                flush_cache_page(vma, vmf->address, pte_pfn(vmf->orig_pte));