mm/khugepaged: convert hpage_collapse_scan_pmd() to use folios
authorVishal Moola (Oracle) <vishal.moola@gmail.com>
Fri, 20 Oct 2023 18:33:28 +0000 (11:33 -0700)
committerAndrew Morton <akpm@linux-foundation.org>
Wed, 25 Oct 2023 23:47:14 +0000 (16:47 -0700)
Replaces 5 calls to compound_head(), and removes 1385 bytes of kernel
text.

Link: https://lkml.kernel.org/r/20231020183331.10770-3-vishal.moola@gmail.com
Signed-off-by: Vishal Moola (Oracle) <vishal.moola@gmail.com>
Reviewed-by: Rik van Riel <riel@surriel.com>
Reviewed-by: Yang Shi <shy828301@gmail.com>
Cc: Kefeng Wang <wangkefeng.wang@huawei.com>
Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
mm/khugepaged.c

index 500756604488a4cca96edb725e8ba71b761a37db..6c4b5af4337144ee1a90e9988448a6a027d07ea1 100644 (file)
@@ -1248,6 +1248,7 @@ static int hpage_collapse_scan_pmd(struct mm_struct *mm,
        int result = SCAN_FAIL, referenced = 0;
        int none_or_zero = 0, shared = 0;
        struct page *page = NULL;
+       struct folio *folio = NULL;
        unsigned long _address;
        spinlock_t *ptl;
        int node = NUMA_NO_NODE, unmapped = 0;
@@ -1334,29 +1335,28 @@ static int hpage_collapse_scan_pmd(struct mm_struct *mm,
                        }
                }
 
-               page = compound_head(page);
-
+               folio = page_folio(page);
                /*
                 * Record which node the original page is from and save this
                 * information to cc->node_load[].
                 * Khugepaged will allocate hugepage from the node has the max
                 * hit record.
                 */
-               node = page_to_nid(page);
+               node = folio_nid(folio);
                if (hpage_collapse_scan_abort(node, cc)) {
                        result = SCAN_SCAN_ABORT;
                        goto out_unmap;
                }
                cc->node_load[node]++;
-               if (!PageLRU(page)) {
+               if (!folio_test_lru(folio)) {
                        result = SCAN_PAGE_LRU;
                        goto out_unmap;
                }
-               if (PageLocked(page)) {
+               if (folio_test_locked(folio)) {
                        result = SCAN_PAGE_LOCK;
                        goto out_unmap;
                }
-               if (!PageAnon(page)) {
+               if (!folio_test_anon(folio)) {
                        result = SCAN_PAGE_ANON;
                        goto out_unmap;
                }
@@ -1371,7 +1371,7 @@ static int hpage_collapse_scan_pmd(struct mm_struct *mm,
                 * has excessive GUP pins (i.e. 512).  Anyway the same check
                 * will be done again later the risk seems low.
                 */
-               if (!is_refcount_suitable(page)) {
+               if (!is_refcount_suitable(&folio->page)) {
                        result = SCAN_PAGE_COUNT;
                        goto out_unmap;
                }
@@ -1381,8 +1381,8 @@ static int hpage_collapse_scan_pmd(struct mm_struct *mm,
                 * enough young pte to justify collapsing the page
                 */
                if (cc->is_khugepaged &&
-                   (pte_young(pteval) || page_is_young(page) ||
-                    PageReferenced(page) || mmu_notifier_test_young(vma->vm_mm,
+                   (pte_young(pteval) || folio_test_young(folio) ||
+                    folio_test_referenced(folio) || mmu_notifier_test_young(vma->vm_mm,
                                                                     address)))
                        referenced++;
        }
@@ -1404,7 +1404,7 @@ out_unmap:
                *mmap_locked = false;
        }
 out:
-       trace_mm_khugepaged_scan_pmd(mm, page, writable, referenced,
+       trace_mm_khugepaged_scan_pmd(mm, &folio->page, writable, referenced,
                                     none_or_zero, result, unmapped);
        return result;
 }