mm/ksm: use folio in stable_node_dup
authorAlex Shi (tencent) <alexs@kernel.org>
Thu, 11 Apr 2024 06:17:06 +0000 (14:17 +0800)
committerAndrew Morton <akpm@linux-foundation.org>
Mon, 6 May 2024 00:53:33 +0000 (17:53 -0700)
Use ksm_get_folio() and save 2 compound_head calls.

Link: https://lkml.kernel.org/r/20240411061713.1847574-6-alexs@kernel.org
Signed-off-by: Alex Shi (tencent) <alexs@kernel.org>
Reviewed-by: David Hildenbrand <david@redhat.com>
Cc: Izik Eidus <izik.eidus@ravellosystems.com>
Cc: Matthew Wilcox <willy@infradead.org>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: Hugh Dickins <hughd@google.com>
Cc: Chris Wright <chrisw@sous-sol.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
mm/ksm.c

index 1a7b130045898d448b7b8bc952529148c56c3572..654400f993fccdb2110c6ebf99318d7c3b9e2373 100644 (file)
--- a/mm/ksm.c
+++ b/mm/ksm.c
@@ -1638,7 +1638,7 @@ static struct page *stable_node_dup(struct ksm_stable_node **_stable_node_dup,
 {
        struct ksm_stable_node *dup, *found = NULL, *stable_node = *_stable_node;
        struct hlist_node *hlist_safe;
-       struct page *_tree_page, *tree_page = NULL;
+       struct folio *folio, *tree_folio = NULL;
        int nr = 0;
        int found_rmap_hlist_len;
 
@@ -1657,24 +1657,24 @@ static struct page *stable_node_dup(struct ksm_stable_node **_stable_node_dup,
                 * We must walk all stable_node_dup to prune the stale
                 * stable nodes during lookup.
                 *
-                * get_ksm_page can drop the nodes from the
+                * ksm_get_folio can drop the nodes from the
                 * stable_node->hlist if they point to freed pages
                 * (that's why we do a _safe walk). The "dup"
                 * stable_node parameter itself will be freed from
                 * under us if it returns NULL.
                 */
-               _tree_page = get_ksm_page(dup, GET_KSM_PAGE_NOLOCK);
-               if (!_tree_page)
+               folio = ksm_get_folio(dup, GET_KSM_PAGE_NOLOCK);
+               if (!folio)
                        continue;
                nr += 1;
                if (is_page_sharing_candidate(dup)) {
                        if (!found ||
                            dup->rmap_hlist_len > found_rmap_hlist_len) {
                                if (found)
-                                       put_page(tree_page);
+                                       folio_put(tree_folio);
                                found = dup;
                                found_rmap_hlist_len = found->rmap_hlist_len;
-                               tree_page = _tree_page;
+                               tree_folio = folio;
 
                                /* skip put_page for found dup */
                                if (!prune_stale_stable_nodes)
@@ -1682,7 +1682,7 @@ static struct page *stable_node_dup(struct ksm_stable_node **_stable_node_dup,
                                continue;
                        }
                }
-               put_page(_tree_page);
+               folio_put(folio);
        }
 
        if (found) {
@@ -1747,7 +1747,7 @@ static struct page *stable_node_dup(struct ksm_stable_node **_stable_node_dup,
        }
 
        *_stable_node_dup = found;
-       return tree_page;
+       return &tree_folio->page;
 }
 
 static struct ksm_stable_node *stable_node_dup_any(struct ksm_stable_node *stable_node,